filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_19648
|
import yaml, logging, datetime, time
from . import DataModel
from .data import *
from .endpoints import Endpoint
from ..helpers.helpers import ConsulTemplate
class Fqdns(DataCasting, object):
def __init__(self):
super().__init__('fqdn')
self.fqdns = []
for f in self.list():
self.fqdns.append(Fqdn(f))
@property
def key(self):
return self.cast
@property
def value(self):
return self.fqdns
def __repr__(self):
return str(list(self.list()))
def __iter__(self):
self.iter = 0
return self
def __next__(self):
if self.iter < len(self.fqdns):
self.iter = self.iter + 1
return str(self.fqdns[self.iter - 1])
raise StopIteration()
def add(self, fqdn, value=None):
if value is None:
value = {}
value.update({'fqdn': fqdn})
result = Fqdn.create(value)
if result:
return result
return False
# noinspection PyBroadException
def load_yaml(self, file):
try:
with open(file, 'r') as f:
fqdn_list = yaml.load(f, Loader=yaml.SafeLoader)
f.close()
except Exception:
fqdn_list = {}
logging.info(" load file content {}".format(str(fqdn_list)))
for fqdn in fqdn_list:
value = fqdn_list[fqdn]
value['backend'] = '\n'.join(value['backend'])
if self.exists(fqdn):
Fqdn(fqdn).update(value)
else:
self.add(fqdn, value)
return fqdn_list
def json(self, owner=None):
results = []
for fqdn in super().list():
if Fqdn(fqdn).owner == owner or owner is None:
results.append(Fqdn(fqdn).json())
return results
def publish(self, owner=None):
results = []
for fqdn in super().list():
if Fqdn(fqdn).owner == owner or owner is None:
if Fqdn(fqdn).publish():
results.append(Fqdn(fqdn).json())
logging.info(str(results))
return results
def unpublish(self, owner=None):
results = []
for fqdn in super().list():
if Fqdn(fqdn).owner == owner or owner is None:
if Fqdn(fqdn).unpublish():
results.append(Fqdn(fqdn).json())
logging.info(str(results))
return results
class Fqdn(DataCasting, object):
def __init__(self, fqdn=None):
super().__init__('fqdn')
self.fqdn = fqdn
self.props = list(DataModel['haproxy']['casting']['fqdns']['fqdn'])
self.props.remove('fqdn')
if fqdn is not None:
self.load()
@property
def key(self):
return self.fqdn
@property
def value(self):
return self.dict()
@property
def backend_name(self):
return self.fqdn + '-' + self.owner
@property
def front_type(self):
if self.extended == "true":
return "frontex"
else:
return "front"
@property
def frontend_content(self):
if self.extended == "true":
self.frontex = ""
if self.mode == "tcp":
self.frontex = "use_backend {backend}-tcp if {{ req_ssl_sni -i {subdom} {fqdn} }}"
if self.mode == "http":
self.frontex = "acl {backend} hdr_end(host) -i {subdom} {fqdn}\n"
self.frontex += "use_backend {backend}-http if {backend}"
if self.buggyclient == "true":
if self.mode == "tcp":
self.frontex += "\nuse_backend {backend}-tcp if {{ req_ssl_sni -i {subdom} {fqdn}:443 }}"
if self.mode == "http":
self.frontex += "\nacl {backend}-buggyclient hdr_end(host) -i {subdom} {fqdn}:443\n"
self.frontex += "use_backend {backend}-http if {backend}-buggyclient"
subdomainoption = ""
if self.subdomains == "true":
subdomainoption = "-m end"
return self.frontex.format(subdom=subdomainoption, backend=self.backend_name, fqdn=self.fqdn)
else:
return self.backend_name
def exists(self, key=""):
""" check if an fqdn of any mode exists """
mode = self.mode
self.mode = 'tcp'
if super().exists():
self.mode = mode
return True
self.mode = 'http'
if super().exists():
self.mode = mode
return True
return False
@classmethod
def create(cls, payload):
result = cls(payload['fqdn'])
if 'subdomains' in payload and payload['subdomains'] == "true":
result.extended = "true"
if 'buggyclient' in payload and payload['buggyclient'] == "true":
result.extended = "true"
result.update(payload)
result.save()
return result
def update(self, value):
super().update(value)
self.save()
if self.is_publish():
self.publish()
return self
@staticmethod
def timestamp():
return datetime.datetime.now().timestamp()
# noinspection PyBroadException
@staticmethod
def timeout(timepoint, delta=10):
if datetime.datetime.now().timestamp() > (timepoint + delta):
return True
return False
def spiidgen(self):
maxwaittime = 10
maxstucktime = 5 * 60
now = self.timestamp()
# while something is tested
if not Endpoint('test', '', '', '').is_empty() and not self.timeout(now, maxwaittime):
logging.warning(" hoho, it's not empty. Waiting concurrency to terminate.")
while not Endpoint('test', '', '', '').is_empty() and not self.timeout(now, maxwaittime):
time.sleep(0.3)
# some entries are stuck ! so clean it
if not Endpoint('test', '', '', '').is_empty():
logging.warning(" hoho, it wasn't empty")
for test in Endpoint('test', '', '', '').list():
if now - float(test.split('-')[-1]) > maxstucktime:
logging.warning(" hoho, something is stuck :{}".format(test))
Endpoint('test', test, '', '').delete(recurse=True)
self.spiid = "{}-{}".format(self.backend_name, str(self.timestamp()))
return self.spiid
def safe(self):
self.message = ""
if self.state not in self.states:
logging.info(' bad state {} for {}'.format(self.state, self.key))
self.message = "state: {} unknown; cleaned to 'unpublish'.\n".format(self.state)
self.state = "unpublish"
self.save()
if self.mode not in ['http', 'tcp']:
logging.info(' bad mode {} for {}'.format(self.mode, self.key))
self.message = self.message + "mode: {} unknown; cleaned to 'http'.\n".format(self.mode)
self.mode = "http"
self.save()
return self
def __repr__(self):
return self.fqdn
def getPath(self):
return super().getPath() + self.fqdn
def json(self):
result = self.dict()
logging.info(' self.dict : {}'.format(str(result)))
result['fqdn'] = result.pop('key')
result['backend'] = result['backend'].split('\n')
if result['message'] == "":
result['message'] = []
else:
result['message'] = result['message'].split('\n')
return result
def destroy(self):
f = self
if not self.is_unpublish():
self.unpublish()
if self.delete():
f.state = "deleted"
return f
f.state = "not_deleted"
return f
def is_publish(self):
return self.states[self.state] == 'publish'
def is_publish_fail(self):
return self.state == 'publish_failed'
def is_unpublish(self):
return self.states[self.state] == 'unpublish'
def unpublish(self):
if self.is_unpublish():
return self
if (not Endpoint('publ', self.front_type, self.mode, self.fqdn).exists() and
not Endpoint('publ', 'back', self.mode, self.backend_name).exists()):
self.state = 'unpublish'
return self
Endpoint('publ', self.front_type, self.mode, self.fqdn).delete()
Endpoint('publ', 'back', self.mode, self.backend_name).delete()
if not Endpoint('publ', self.front_type, self.mode, self.fqdn).exists():
self.state = 'unpublish'
self.save()
return self
@property
def testpoint_backend(self):
return Endpoint('test', 'back', self.mode, self.backend_name, spiid=self.spiid)
@property
def testpoint_frontend(self):
return Endpoint('test', self.front_type, self.mode, self.fqdn, spiid=self.spiid)
@property
def failpoint_backend(self):
return Endpoint('fail', 'back', self.mode, self.backend_name, spiid=self.spiid)
@property
def failpoint_frontend(self):
return Endpoint('fail', self.front_type, self.mode, self.fqdn, spiid=self.spiid)
@property
def publpoint_backend(self):
return Endpoint('publ', 'back', self.mode, self.backend_name)
@property
def publpoint_frontend(self):
return Endpoint('publ', self.front_type, self.mode, self.fqdn)
def publish(self):
logging.info(' fqdn publish start')
# made @ update method
# if self.is_publish():
# self.unpublish()
self.spiidgen()
logging.info(str(self.dict()))
# push backend first
# cleanup failing
if self.failpoint_backend.exists():
self.failpoint_backend.delete(recurse=True)
logging.info(' haprestio publish : delete logfail backend {}'.format(self.backend_name))
logging.info(' haprestio publish : test push backend {}'.format(self.backend_name))
self.testpoint_backend.update(self.backend)
validate = ConsulTemplate(self.spiid)
if not validate.evaluation():
self.testpoint_backend.delete()
self.message = validate.returnerr
self.failpoint_backend.update(validate.returnerr)
logging.info(" fail publish backend {} : {}".format(self.backend_name, self.message))
self.state = "publish_failed"
self.save()
return self
# push then frontend
# cleanup failing
if self.failpoint_frontend.exists():
self.failpoint_frontend.delete(recurse=True)
logging.info(' haprestio publish : delete logfail frontend {}'.format(self.backend_name))
logging.info(' haprestio publish : test push frontend {}'.format(self.backend_name))
self.testpoint_frontend.update(self.frontend_content)
validate = ConsulTemplate(self.spiid)
if not validate.evaluation():
self.testpoint_frontend.delete()
self.message = validate.returnerr
self.failpoint_frontend.update(validate.returnerr)
logging.info(" fail publish backend {} : {}".format(self.backend_name, self.message))
self.state = "publish_failed"
self.save()
return self
self.testpoint_backend.delete()
self.testpoint_frontend.delete()
self.publpoint_backend.update(self.backend)
self.publpoint_frontend.update(self.frontend_content)
self.message = ""
self.state = "published"
self.save()
return self
|
the-stack_106_19649
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import subprocess
import socket
try:
import urllib.parse
url_parser = urllib.parse.urlparse
except:
try:
import urlparse
url_parser = urlparse.urlparse
except:
print('urllib or urlparse is needed')
sys.exit(1)
import framework.rpc
import framework.daemon
import framework.wallet
USAGE = 'usage: python -i console.py [[[scheme]<host>:]<port> [[[scheme]<host>:]<port>...]]'
daemons = []
wallets = []
rpcs = []
for n in range(1, len(sys.argv)):
scheme='http'
host='127.0.0.1'
port=None
try:
try:
port = int(sys.argv[n])
except:
t = url_parser(sys.argv[n], allow_fragments = False)
scheme = t.scheme or scheme
host = t.hostname or host
port = t.port or port
if scheme != 'http' and scheme != 'https':
raise Exception(USAGE)
if port <= 0 or port > 65535:
raise Exception(USAGE)
except Exception as e:
print('Error: ' + str(e))
raise Exception(USAGE)
# check for open port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
if s.connect_ex((host, port)) != 0:
raise Exception('No wallet or daemon RPC on port ' + str(port))
s.close()
# both wallet and daemon have a get_version JSON RPC
rpc = framework.rpc.JSONRPC('{protocol}://{host}:{port}'.format(protocol=scheme, host=host, port=port))
get_version = {
'method': 'get_version',
'jsonrpc': '2.0',
'id': '0'
}
try:
res = rpc.send_json_rpc_request(get_version)
except Exception as e:
raise Exception('Failed to call version RPC: ' + str(e))
if 'version' not in res:
raise Exception('Server is not a MKEcoin process')
if 'status' in res:
daemons.append(framework.daemon.Daemon(port=port))
rpcs.append(daemons[-1])
else:
wallets.append(framework.wallet.Wallet(port=port))
rpcs.append(wallets[-1])
# add tab completion if we can: https://stackoverflow.com/questions/246725
try:
import readline
except:
pass
else:
import rlcompleter
readline.parse_and_bind('tab: complete')
if len(daemons) == 1:
daemon = daemons[0]
if len(wallets) == 1:
wallet = wallets[0]
didx = 0
widx = 0
for rpc in rpcs:
if type(rpc) == framework.daemon.Daemon:
var = "daemon" if len(daemons) == 1 else "daemons[" + str(didx) + "]"
didx += 1
else:
var = "wallet" if len(wallets) == 1 else "wallets[" + str(widx) + "]"
widx += 1
print('Variable \'%s\' connected to %s RPC on %s:%u' % (var, 'daemon' if type(rpc) == framework.daemon.Daemon else 'wallet', rpc.host ,rpc.port))
|
the-stack_106_19650
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: DO NOT SUBMIT without one-line documentation for dataset.
"""
import os
import numpy as np
from typing import List
from dataclasses import dataclass
@dataclass
class MaskingCurve:
masking_frequency: float
masking_level: int
probe_level: int
probe_frequencies: List[float]
decibel_masking: List[float]
probe_frequencies_bark: List[float]
bel_masking: List[float]
class MaskingDataset(object):
def __init__(self):
self._curves = {}
self.critical_bands = [
20, 100, 200, 300, 400, 505, 630, 770, 915, 1080, 1265, 1475, 1720,
1990, 2310, 2690, 3125, 3675, 4350, 5250, 6350, 7650, 9400, 11750,
15250, 20000
]
def add_curve(self, curve: MaskingCurve):
if curve.masking_frequency not in self._curves:
self._curves[curve.masking_frequency] = {}
if curve.probe_level not in self._curves[curve.masking_frequency]:
self._curves[curve.masking_frequency][curve.probe_level] = {}
self._curves[curve.masking_frequency][curve.probe_level][
curve.masking_level] = curve
@staticmethod
def binary_search(arr, item):
"""Finds closest index to the left of an item in arr."""
low = 0
high = len(arr) - 1
mid = 0
while low <= high:
mid = (high + low) // 2
# Check if item is present at mid
if arr[mid] < item:
low = mid
# If item is greater, ignore left half
elif arr[mid] > item:
high = mid
# If item is smaller, ignore right half
else:
return mid
if arr[high] <= item:
return high
if arr[low] <= item < arr[low + 1]:
return low
return mid
def frequency_to_cb(self, frequency: float) -> int:
return self.binary_search(self.critical_bands, frequency)
def read_data(self, directory: str, input_file: str):
metadata = input_file.strip(".txt").split("_")
masking_frequency = metadata[1]
probe_level = metadata[3]
with open(os.path.join(directory, input_file), "r") as infile:
for line in infile:
split_line = line.split(":")
masking_level = split_line[0].split()[1]
data_points = split_line[1].split(";")
probe_frequencies = []
probe_frequencies_bark = []
decibel_masking = []
bel_masking = []
for point in data_points:
split_point = point.split(",")
probe_frequency = float(split_point[0])
probe_cb = self.frequency_to_cb(probe_frequency)
probe_frequencies.append(probe_frequency)
decibel_masking.append(float(split_point[1]))
probe_frequencies_bark.append(probe_cb)
bel_masking.append(float(split_point[1]) / 10)
masking_curve = MaskingCurve(float(masking_frequency),
int(masking_level),
int(probe_level),
probe_frequencies,
decibel_masking,
probe_frequencies_bark,
bel_masking)
self.add_curve(masking_curve)
return
def get_curve_data(self, masking_frequency: float, probe_level: int,
masking_level: int):
if masking_frequency not in self._curves:
raise ValueError(
"No curve for masking frequency {}".format(masking_frequency))
if probe_level not in self._curves[masking_frequency]:
raise ValueError("No curve for probe level {}".format(probe_level))
if masking_level not in self._curves[masking_frequency][probe_level]:
raise ValueError("No curve for masking level {}".format(masking_level))
curve = self._curves[masking_frequency][probe_level][masking_level]
data = list(zip(curve.probe_frequencies_bark, curve.bel_masking))
return data
def get_all_data(self):
for masking_frequency, probe_masker_curves in self._curves.items():
for probe_level, masker_curves in probe_masker_curves.items():
for masker_level, curve_data in masker_curves.items():
for (probe_frequency_bark, bel_masking, probe_frequency) in zip(curve_data.probe_frequencies_bark,
curve_data.bel_masking,
curve_data.probe_frequencies):
yield {"masker_frequency": masking_frequency,
"probe_level": probe_level,
"probe_frequency": probe_frequency,
"masker_level": masker_level,
"probe_frequency_bark": probe_frequency_bark,
"target_bel_masking": bel_masking}
|
the-stack_106_19651
|
# Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
#
# This program and the accompanying materials are made available under
# the terms of the under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import io
import os
import sys
import ephemerol
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename) as f:
buf.append(f.read())
return sep.join(buf)
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='ephemerol',
version=ephemerol.__version__,
url='https://github.com/Pivotal-Field-Engineering/ephemerol',
license='None',
author='Chris DeLashmutt',
tests_require=['pytest', 'mock'],
install_requires=['terminaltables'],
cmdclass={'test': PyTest},
author_email='[email protected]',
description='A Cloud Native readiness scanner',
packages=['ephemerol'],
include_package_data=True,
platforms='any',
extras_require={
'testing': ['pytest'],
}
)
|
the-stack_106_19654
|
lr_mult = 4
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2500,
warmup_ratio=0.001,
step=[55*lr_mult, 68*lr_mult])
total_epochs = 80*lr_mult
checkpoint_config = dict(interval=total_epochs)
log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
dataset_type = 'RetinaFaceDataset'
data_root = 'data/crowdhuman/'
train_root = 'data/crowdhuman/train/'
val_root = 'data/crowdhuman/val/'
img_norm_cfg = dict(
mean=[127.5, 127.5, 127.5], std=[128.0, 128.0, 128.0], to_rgb=True)
data = dict(
samples_per_gpu=64,
workers_per_gpu=8,
train=dict(
type='RetinaFaceDataset',
ann_file='data/crowdhuman/train/label_fullbody.txt',
img_prefix='data/crowdhuman/train/images/',
pipeline=[
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_keypoints=True),
dict(
type='RandomSquareCrop',
crop_choice=[
0.3, 0.45, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0
],
bbox_clip_border=False),
dict(
type='Resize',
img_scale=(640, 640),
keep_ratio=False,
bbox_clip_border=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Normalize',
mean=[127.5, 127.5, 127.5],
std=[128.0, 128.0, 128.0],
to_rgb=True),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=[
'img', 'gt_bboxes', 'gt_labels', 'gt_bboxes_ignore',
'gt_keypointss'
])
]),
val=dict(
type='RetinaFaceDataset',
ann_file='data/crowdhuman/val/label_fullbody.txt',
img_prefix='data/crowdhuman/val/images/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
#dict(type='RandomFlip', flip_ratio=0.0),
dict(
type='Normalize',
mean=[127.5, 127.5, 127.5],
std=[128.0, 128.0, 128.0],
to_rgb=True),
dict(type='Pad', size=(640, 640), pad_val=0),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]),
test=dict(
type='RetinaFaceDataset',
ann_file='data/crowdhuman/val/label_fullbody.txt',
img_prefix='data/crowdhuman/val/images/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
#dict(type='RandomFlip', flip_ratio=0.0),
dict(
type='Normalize',
mean=[127.5, 127.5, 127.5],
std=[128.0, 128.0, 128.0],
to_rgb=True),
dict(type='Pad', size=(640, 640), pad_val=0),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]))
model = dict(
type='SCRFD',
backbone=dict(
type='ResNetV1e',
depth=0,
block_cfg=dict(
block='BasicBlock',
stage_blocks=(3, 5, 3, 2),
stage_planes=[24, 48, 48, 80]),
base_channels=24,
num_stages=4,
out_indices=(0, 1, 2, 3),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
style='pytorch'),
neck=dict(
type='PAFPN',
in_channels=[24, 48, 48, 80],
out_channels=24,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='SCRFDHead',
num_classes=1,
in_channels=24,
stacked_convs=2,
feat_channels=64,
norm_cfg=dict(type='BN', requires_grad=True),
#norm_cfg=dict(type='GN', num_groups=16, requires_grad=True),
cls_reg_share=True,
strides_share=False,
scale_mode=2,
#anchor_generator=dict(
# type='AnchorGenerator',
# ratios=[1.0],
# scales = [1,2],
# base_sizes = [16, 64, 256],
# strides=[8, 16, 32]),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[2.0],
scales = [3],
base_sizes = [8, 16, 32, 64, 128],
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=False,
reg_max=8,
loss_bbox=dict(type='DIoULoss', loss_weight=2.0),
use_kps=True,
loss_kps=dict(
type='SmoothL1Loss', beta=0.1111111111111111, loss_weight=0.1),
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=-1,
min_bbox_size=0,
score_thr=0.02,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=-1)))
train_cfg = dict(
assigner=dict(type='ATSSAssigner', topk=9, mode=0),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=-1,
min_bbox_size=0,
score_thr=0.02,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=-1)
evaluation = dict(interval=40, metric='mAP')
|
the-stack_106_19658
|
# Copyright 2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import logging
from azure.mgmt.resource.resources.models import GenericResource, ResourceGroupPatchable
from c7n_azure.utils import is_resource_group
class TagHelper:
log = logging.getLogger('custodian.azure.utils.TagHelper')
@staticmethod
def update_resource_tags(tag_action, resource, tags):
client = tag_action.session.client('azure.mgmt.resource.ResourceManagementClient')
# resource group type
if is_resource_group(resource):
params_patch = ResourceGroupPatchable(
tags=tags
)
client.resource_groups.update(
resource['name'],
params_patch,
)
# other Azure resources
else:
# deserialize the original object
az_resource = GenericResource.deserialize(resource)
if not tag_action.manager.tag_operation_enabled(az_resource.type):
raise NotImplementedError('Cannot tag resource with type {0}'
.format(az_resource.type))
api_version = tag_action.session.resource_api_version(resource['id'])
# create a PATCH object with only updates to tags
tags_patch = GenericResource(tags=tags)
client.resources.update_by_id(resource['id'], api_version, tags_patch)
@staticmethod
def remove_tags(tag_action, resource, tags_to_delete):
# get existing tags
tags = resource.get('tags', {})
# only determine if any tags_to_delete exist on the resource
tags_exist = False
for tag in tags_to_delete:
if tag in tags:
tags_exist = True
break
# only call the resource update if there are tags to delete tags
if tags_exist:
resource_tags = {key: tags[key] for key in tags if key not in tags_to_delete}
TagHelper.update_resource_tags(tag_action, resource, resource_tags)
@staticmethod
def add_tags(tag_action, resource, tags_to_add):
new_or_updated_tags = False
# get existing tags
tags = resource.get('tags', {})
# add or update tags
for key in tags_to_add:
# nothing to do if the tag and value already exists on the resource
if key in tags:
if tags[key] != tags_to_add[key]:
new_or_updated_tags = True
else:
# the tag doesn't exist or the value was updated
new_or_updated_tags = True
tags[key] = tags_to_add[key]
# call the arm resource update method if there are new or updated tags
if new_or_updated_tags:
TagHelper.update_resource_tags(tag_action, resource, tags)
@staticmethod
def get_tag_value(resource, tag, utf_8=False):
"""Get the resource's tag value."""
tags = {k.lower(): v for k, v in resource.get('tags', {}).items()}
value = tags.get(tag, False)
if value is not False:
if utf_8:
value = value.encode('utf8').decode('utf8')
return value
|
the-stack_106_19659
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.base.payload import Payload
from pants.build_graph.target import Target
from pants.contrib.go.targets.go_local_source import GoLocalSource
from pants.contrib.go.targets.go_target import GoTarget
class GoThriftLibrary(Target):
"""A Go library generated from Thrift IDL files."""
def __init__(self,
address=None,
payload=None,
sources=None,
**kwargs):
"""
:param sources: thrift source files
:type sources: :class:`pants.source.wrapped_globs.FilesetWithSpec` or list of strings. Paths
are relative to the BUILD file's directory.
:param import_path: Deprecated: unused.
"""
payload = payload or Payload()
payload.add_field('sources',
self.create_sources_field(sources, address.spec_path, key_arg='sources'))
super().__init__(payload=payload, address=address, **kwargs)
@classmethod
def alias(cls):
return "go_thrift_library"
class GoThriftGenLibrary(GoTarget):
def __init__(self, sources=None, address=None, payload=None, **kwargs):
payload = payload or Payload()
payload.add_fields({
'sources': self.create_sources_field(sources=sources,
sources_rel_path=address.spec_path,
key_arg='sources'),
})
super().__init__(address=address, payload=payload, **kwargs)
@property
def import_path(self):
"""The import path as used in import statements in `.go` source files."""
return GoLocalSource.local_import_path(self.target_base, self.address)
|
the-stack_106_19660
|
# -*- coding: utf-8 -*-
#
# multimeter_file.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Multimeter to file example
--------------------------
This file demonstrates recording from an `iaf_cond_alpha` neuron using a
multimeter and writing data to file.
'''
'''
First, the necessary modules for simulation and plotting are imported.
The simulation kernel is put back to its initial state using `ResetKernel`.
'''
import nest
import numpy
import pylab
nest.ResetKernel()
'''
With `SetKernelStatus`, global properties of the simulation kernel can be
specified. The following properties are related to writing to file:
* `overwrite_files` is set to True to permit overwriting of an existing file.
* `data_path` is the path to which all data is written. It is given relative to
the current working directory.
* 'data_prefix' allows to specify a common prefix for all data files.
'''
nest.SetKernelStatus({"overwrite_files": True,
"data_path": "",
"data_prefix": ""})
'''
For illustration, the recordables of the `iaf_cond_alpha` neuron model are
displayed. This model is an implementation of a spiking neuron using
integrate-and-fire dynamics with conductance-based synapses. Incoming spike
events induce a post-synaptic change of conductance modelled by an alpha
function.
'''
print("iaf_cond_alpha recordables: {0}".format(
nest.GetDefaults("iaf_cond_alpha")["recordables"]))
'''
A neuron, a multimeter as recording device and two spike generators for
excitatory and inhibitory stimulation are instantiated. The command `Create`
expects a model type and, optionally, the desired number of nodes and a
dictionary of parameters to overwrite the default values of the model.
* For the neuron, the rise time of the excitatory synaptic alpha function
in ms `tau_syn_ex` and the reset potential of the membrane in mV `V_reset`
are specified.
* For the multimeter, the time interval for recording in ms `interval` and a
selection of measures to record (the membrane voltage in mV `V_m` and the
excitatory `g_ex` and inhibitoy `g_in` synaptic conductances in nS) are set.
In addition, more parameters can be modified for writing to file:
- `withgid` is set to True to record the global id of the observed node(s).
(default: False).
- `to_file` indicates whether to write the recordings to file and is set
to True.
- `label` specifies an arbitrary label for the device. It is used instead of
the name of the model in the output file name.
* For the spike generators, the spike times in ms `spike_times` are given
explicitly.
'''
n = nest.Create("iaf_cond_alpha",
params={"tau_syn_ex": 1.0, "V_reset": -70.0})
m = nest.Create("multimeter",
params={"interval": 0.1,
"record_from": ["V_m", "g_ex", "g_in"],
"withgid": True,
"to_file": True,
"label": "my_multimeter"})
s_ex = nest.Create("spike_generator",
params={"spike_times": numpy.array([10.0, 20.0, 50.0])})
s_in = nest.Create("spike_generator",
params={"spike_times": numpy.array([15.0, 25.0, 55.0])})
'''
Next, the spike generators are connected to the neuron with `Connect`. Synapse
specifications can be provided in a dictionary. In this example of a
conductance-based neuron, the synaptic weight `weight` is given in nS.
Note that it is positive for excitatory and negative for inhibitory
connections.
'''
nest.Connect(s_ex, n, syn_spec={"weight": 40.0})
nest.Connect(s_in, n, syn_spec={"weight": -20.0})
nest.Connect(m, n)
'''
A network simulation with a duration of 100 ms is started with `Simulate`.
'''
nest.Simulate(100.)
'''
After the simulation, the recordings are obtained from the multimeter via the
key `events` of the status dictionary accessed by `GetStatus`. `times`
indicates the recording times stored for each data point. They are recorded
if the parameter `withtime` of the multimeter is set to True which is the
default case.
'''
events = nest.GetStatus(m)[0]["events"]
t = events["times"]
'''
Finally, the time courses of the membrane voltage and the synaptic
conductance are displayed.
'''
pylab.clf()
pylab.subplot(211)
pylab.plot(t, events["V_m"])
pylab.axis([0, 100, -75, -53])
pylab.ylabel("membrane potential (mV)")
pylab.subplot(212)
pylab.plot(t, events["g_ex"], t, events["g_in"])
pylab.axis([0, 100, 0, 45])
pylab.xlabel("time (ms)")
pylab.ylabel("synaptic conductance (nS)")
pylab.legend(("g_exc", "g_inh"))
|
the-stack_106_19661
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.http import Http404
from django.urls import reverse
from django.views.generic import TemplateView
from django.contrib.auth.mixins import UserPassesTestMixin
from Functie.rol import Rollen, rol_get_huidige, rol_get_huidige_functie
from Plein.menu import menu_dynamics
from Competitie.models import (LAAG_RK, DeelCompetitie,
DeelcompetitieKlasseLimiet, KampioenschapSchutterBoog)
TEMPLATE_VERENIGING_LIJST_RK = 'vereniging/lijst-rk-selectie.dtl'
class VerenigingLijstRkSelectieView(UserPassesTestMixin, TemplateView):
""" Deze view laat de kandidaat-schutters van en RK zien van de vereniging van de HWL,
met mogelijkheid voor de HWL om deze te bevestigen.
"""
# class variables shared by all instances
template_name = TEMPLATE_VERENIGING_LIJST_RK
raise_exception = True # genereer PermissionDenied als test_func False terug geeft
toon_alles = False
def test_func(self):
""" called by the UserPassesTestMixin to verify the user has permissions to use this view """
rol_nu = rol_get_huidige(self.request)
return rol_nu in (Rollen.ROL_HWL, Rollen.ROL_WL)
def get_context_data(self, **kwargs):
""" called by the template system to get the context data for the template """
context = super().get_context_data(**kwargs)
# er zijn 2 situaties:
# 1) regiocompetities zijn nog niet afgesloten --> verwijst naar pagina tussenstand rayon
# 2) deelnemers voor RK zijn vastgesteld --> toon lijst
try:
rk_deelcomp_pk = int(kwargs['rk_deelcomp_pk'][:6]) # afkappen geeft beveiliging
rk_deelcomp_rk = (DeelCompetitie
.objects
.select_related('competitie', 'nhb_rayon')
.get(pk=rk_deelcomp_pk, laag=LAAG_RK))
except (ValueError, DeelCompetitie.DoesNotExist):
raise Http404('Geen valide competitie')
if not rk_deelcomp_rk.heeft_deelnemerslijst:
raise Http404('Geen deelnemerslijst beschikbaar')
context['deelcomp_rk'] = rk_deelcomp_rk
rol_nu, functie_nu = rol_get_huidige_functie(self.request)
deelnemers = (KampioenschapSchutterBoog
.objects
.select_related('deelcompetitie',
'klasse__indiv',
'sporterboog__sporter',
'bij_vereniging')
.filter(deelcompetitie=rk_deelcomp_rk,
volgorde__lte=48) # max 48 schutters per klasse tonen
.order_by('klasse__indiv__volgorde', # groepeer per klasse
'volgorde', # oplopend op volgorde (dubbelen mogelijk)
'-gemiddelde')) # aflopend op gemiddelde
if not self.toon_alles:
deelnemers = deelnemers.filter(bij_vereniging=functie_nu.nhb_ver)
wkl2limiet = dict() # [pk] = aantal
for limiet in (DeelcompetitieKlasseLimiet
.objects
.select_related('klasse')
.filter(deelcompetitie=rk_deelcomp_rk)):
wkl2limiet[limiet.klasse.pk] = limiet.limiet
# for
context['kan_wijzigen'] = kan_wijzigen = (rol_nu == Rollen.ROL_HWL)
aantal_klassen = 0
keep = list()
groepje = list()
behoud_groepje = False
klasse = -1
for deelnemer in deelnemers:
deelnemer.break_klasse = (klasse != deelnemer.klasse.indiv.volgorde)
if deelnemer.break_klasse:
if len(groepje) and behoud_groepje:
aantal_klassen += 1
keep.extend(groepje)
groepje = list()
behoud_groepje = False
deelnemer.klasse_str = deelnemer.klasse.indiv.beschrijving
klasse = deelnemer.klasse.indiv.volgorde
try:
limiet = wkl2limiet[deelnemer.klasse.pk]
except KeyError:
limiet = 24
sporter = deelnemer.sporterboog.sporter
deelnemer.naam_str = "[%s] %s" % (sporter.lid_nr, sporter.volledige_naam())
if deelnemer.bij_vereniging == functie_nu.nhb_ver:
behoud_groepje = True
deelnemer.mijn_vereniging = True
if kan_wijzigen:
deelnemer.url_wijzig = reverse('Competitie:wijzig-status-rk-deelnemer',
kwargs={'deelnemer_pk': deelnemer.pk})
if deelnemer.rank > limiet:
deelnemer.is_reserve = True
groepje.append(deelnemer)
# for
if len(groepje) and behoud_groepje:
aantal_klassen += 1
keep.extend(groepje)
context['deelnemers'] = keep
context['aantal_klassen'] = aantal_klassen
if self.toon_alles:
context['url_filtered'] = reverse('Vereniging:lijst-rk',
kwargs={'rk_deelcomp_pk': rk_deelcomp_rk.pk})
else:
context['url_alles'] = reverse('Vereniging:lijst-rk-alles',
kwargs={'rk_deelcomp_pk': rk_deelcomp_rk.pk})
menu_dynamics(self.request, context, actief='vereniging')
return context
class VerenigingLijstRkSelectieAllesView(VerenigingLijstRkSelectieView):
""" Deze view laat alle kandidaat-schutters van en RK zien,
met mogelijkheid voor de HWL om deze te bevestigen.
"""
toon_alles = True
# end of file
|
the-stack_106_19664
|
from django.urls import path
from django_filters.views import FilterView
from autobuyfast.cars.views import ( # CarLikeFunc,
AllSearchView,
CarCreateView,
CarDeleteView,
CarSold,
CarUpdateView,
CompareCreateView,
CompareView,
car_detail_view,
cars_list_view,
filter_car_search_view,
filter_home_car_search_view,
save_search,
unsave_search,
unwatch_car,
watch_car,
)
app_name = "cars"
urlpatterns = [
path("", view=cars_list_view, name="list"),
path("create/", view=CarCreateView.as_view(), name="create"),
path("compare-cars/<slug>/update", view=CompareCreateView.as_view(), name="compare"),
path("compare-cars/<slug>/", view=CompareView.as_view(), name="compare_detail"),
path("detail/<slug>/update", view=CarUpdateView.as_view(), name="update"),
path("detail/<slug>/delete", view=CarDeleteView, name="delete"),
path("detail/<slug>/sold", view=CarSold, name="sold"),
path("detail/<slug>/", view=car_detail_view, name="detail"),
path("search/", view=filter_car_search_view, name="search"),
path("search/alt/", view=filter_home_car_search_view, name="home_search"),
path("watch/<slug>/", view=watch_car, name="watch_add"),
path("unwatch/<slug>/", view=unwatch_car, name="watch_remove"),
path("save/search/", view=save_search, name="save_search"),
path("save/search/<int:pk>/", view=unsave_search, name="unsave_search"),
path("search/list/", view=AllSearchView.as_view(), name="search_list"),
]
|
the-stack_106_19665
|
# Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import falcon
from monasca_common.rest import utils as rest_utils
from monasca_log_api.app.base.validation import validate_authorization
from monasca_log_api.app.controller.api import healthcheck_api
from monasca_log_api.healthcheck import kafka_check
class HealthChecks(healthcheck_api.HealthChecksApi):
# response configuration
CACHE_CONTROL = ['must-revalidate', 'no-cache', 'no-store']
# response codes
HEALTHY_CODE_GET = falcon.HTTP_OK
HEALTHY_CODE_HEAD = falcon.HTTP_NO_CONTENT
NOT_HEALTHY_CODE = falcon.HTTP_SERVICE_UNAVAILABLE
def __init__(self):
self._kafka_check = kafka_check.KafkaHealthCheck()
super(HealthChecks, self).__init__()
def on_head(self, req, res):
validate_authorization(req, ['log_api:healthcheck:head'])
res.status = self.HEALTHY_CODE_HEAD
res.cache_control = self.CACHE_CONTROL
def on_get(self, req, res):
# at this point we know API is alive, so
# keep up good work and verify kafka status
validate_authorization(req, ['log_api:healthcheck:get'])
kafka_result = self._kafka_check.healthcheck()
# in case it'd be unhealthy,
# message will contain error string
status_data = {
'kafka': kafka_result.message
}
# Really simple approach, ideally that should be
# part of monasca-common with some sort of registration of
# healthchecks concept
res.status = (self.HEALTHY_CODE_GET
if kafka_result.healthy else self.NOT_HEALTHY_CODE)
res.cache_control = self.CACHE_CONTROL
res.body = rest_utils.as_json(status_data)
|
the-stack_106_19667
|
import numpy as np
import pandas as pd
import math
from copy import deepcopy
from abc import abstractmethod, ABCMeta
from scipy.interpolate import interp1d
from bids.utils import listify
from itertools import chain
from six import add_metaclass
from bids.utils import matches_entities
@add_metaclass(ABCMeta)
class BIDSVariable(object):
''' Base representation of a column in a BIDS project. '''
# Columns that define special properties (e.g., onset, duration). These
# will be stored separately from the main data object, and are accessible
# as properties on the BIDSVariable instance.
_property_columns = set()
def __init__(self, name, values, source):
self.name = name
self.values = values
self.source = source
self._index_entities()
def clone(self, data=None, **kwargs):
''' Clone (deep copy) the current column, optionally replacing its
data and/or any other attributes.
Args:
data (DataFrame, ndarray): Optional new data to substitute into
the cloned column. Must have same dimensionality as the
original.
kwargs (dict): Optional keyword arguments containing new attribute
values to set in the copy. E.g., passing `name='my_name'`
would set the `.name` attribute on the cloned instance to the
passed value.
'''
result = deepcopy(self)
if data is not None:
if data.shape != self.values.shape:
raise ValueError("Replacement data has shape %s; must have "
"same shape as existing data %s." %
(data.shape, self.values.shape))
result.values = pd.DataFrame(data)
if kwargs:
for k, v in kwargs.items():
setattr(result, k, v)
# Need to update name on Series as well
# result.values.name = kwargs.get('name', self.name)
return result
def filter(self, filters=None, query=None, strict=False, inplace=False):
''' Returns a copy of the current Variable with only rows that match
the filters retained.
Args:
filters (dict): Dictionary of filters to apply. Keys can be either
'amplitude' or any named entity. Values must be single values
or lists.
query (str): Optional query string to pass to df.query(). Will not
be validated in any way, so must have valid column names. Takes
precedence over filters in the event that both are passed.
strict (bool): By default, keys in 'filters' that cannot be found
in the Variable will be silently ignored. If strict=True, None
will be returned in such cases.
inplace (bool): If True, filtering is performed in place. If False,
a filtered copy of the Variable is returned.
Returns:
A BIDSVariable, or None if no rows are left after filtering.
'''
if filters is None and query is None:
raise ValueError("Either the 'filters' or the 'query' argument "
"must be provided!")
if filters is not None and query is None:
query = []
for name, val in filters.items():
if name != 'amplitude' and name not in self.index.columns:
if strict:
return None
continue
oper = 'in' if isinstance(val, (list, tuple)) else '=='
q = '{name} {oper} {val}'.format(name=name, oper=oper,
val=repr(val))
query.append(q)
query = ' and '.join(query)
var = self if inplace else self.clone()
if query:
inds = self.to_df().query(query).index
var.values = var.values.loc[inds]
var.index = var.index.loc[inds]
if hasattr(self, '_build_entity_index'):
var._build_entity_index()
if not inplace:
return var
@classmethod
def merge(cls, variables, name=None, **kwargs):
''' Merge/concatenate a list of variables along the row axis.
Args:
variables (list): A list of Variables to merge.
name (str): Optional name to assign to the output Variable. By
default, uses the same name as the input variables.
kwargs: Optional keyword arguments to pass onto the class-specific
merge() call. See merge_variables docstring for details.
Returns:
A single BIDSVariable of the same class as the input variables.
Notes: see merge_variables docstring for additional details.
'''
variables = listify(variables)
if len(variables) == 1:
return variables[0]
var_names = set([v.name for v in variables])
if len(var_names) > 1:
raise ValueError("Columns with different names cannot be merged. "
"Column names provided: %s" % var_names)
if name is None:
name = variables[0].name
return cls._merge(variables, name, **kwargs)
@classmethod
@abstractmethod
def _merge(cls, variables, name, **kwargs):
pass
def get_grouper(self, groupby='run'):
''' Return a pandas Grouper object suitable for use in groupby calls.
Args:
groupby (str, list): Name(s) of column(s) defining the grouper
object. Anything that would be valid inside a .groupby() call
on a pandas structure.
Returns:
A pandas Grouper object constructed from the specified columns
of the current index.
'''
return pd.core.groupby._get_grouper(self.index, groupby)[0]
def apply(self, func, groupby='run', *args, **kwargs):
''' Applies the passed function to the groups defined by the groupby
argument. Works identically to the standard pandas df.groupby() call.
Args:
func (callable): The function to apply to each group.
groupby (str, list): Name(s) of column(s) defining the grouping.
args, kwargs: Optional positional and keyword arguments to pass
onto the function call.
'''
grouper = self.get_grouper(groupby)
return self.values.groupby(grouper).apply(func, *args, **kwargs)
def to_df(self, condition=True, entities=True, **kwargs):
''' Convert to a DataFrame, with columns for name and entities.
Args:
condition (bool): If True, adds a column for condition name, and
names the amplitude column 'amplitude'. If False, returns just
onset, duration, and amplitude, and gives the amplitude column
the current column name.
entities (bool): If True, adds extra columns for all entities.
'''
amp = 'amplitude' if condition else self.name
data = pd.DataFrame({amp: self.values.values.ravel()})
for sc in self._property_columns:
data[sc] = getattr(self, sc)
if condition:
data['condition'] = self.name
if entities:
ent_data = self.index.reset_index(drop=True)
data = pd.concat([data, ent_data], axis=1)
return data.reset_index(drop=True)
def matches_entities(self, entities, strict=False):
''' Checks whether current Variable's entities match the input. '''
return matches_entities(self, entities, strict)
def _index_entities(self):
''' Returns a dict of entities for the current Variable.
Note: Only entity key/value pairs common to all rows in the Variable
are returned. E.g., if a Variable contains events extracted from
runs 1, 2 and 3 from subject '01', the returned dict will be
{'subject': '01'}; the runs will be excluded as they vary across
the Variable contents.
'''
constant = self.index.apply(lambda x: x.nunique() == 1)
if constant.empty:
self.entities = {}
else:
keep = self.index.columns[constant]
self.entities = {k: self.index[k].iloc[0] for k in keep}
class SimpleVariable(BIDSVariable):
''' Represents a simple design matrix column that has no timing
information.
Args:
name (str): Name of the column.
data (DataFrame): A pandas DataFrame minimally containing a column
named 'amplitude' as well as any identifying entities.
source (str): The type of BIDS variable file the data were extracted
from. Must be one of: 'events', 'physio', 'stim', 'confounds',
'scans', 'sessions', 'participants', or 'beh'.
kwargs: Optional keyword arguments passed onto superclass.
'''
_entity_columns = {'condition', 'amplitude'}
def __init__(self, name, data, source, **kwargs):
ent_cols = list(set(data.columns) - self._entity_columns)
self.index = data.loc[:, ent_cols]
values = data['amplitude'].reset_index(drop=True)
values.name = name
super(SimpleVariable, self).__init__(name, values, source)
def split(self, grouper):
''' Split the current SparseRunVariable into multiple columns.
Args:
grouper (iterable): list to groupby, where each unique value will
be taken as the name of the resulting column.
Returns:
A list of SparseRunVariables, one per unique value in the
grouper.
'''
data = self.to_df(condition=True, entities=True)
data = data.drop('condition', axis=1)
subsets = []
for i, (name, g) in enumerate(data.groupby(grouper)):
name = '%s.%s' % (self.name, name)
args = [name, g, self.source]
if hasattr(self, 'run_info'):
args.append(self.run_info)
col = self.__class__(*args)
subsets.append(col)
return subsets
@classmethod
def _merge(cls, variables, name, **kwargs):
dfs = [v.to_df() for v in variables]
data = pd.concat(dfs, axis=0).reset_index(drop=True)
data = data.rename(columns={name: 'amplitude'})
return cls(name, data, source=variables[0].source, **kwargs)
class SparseRunVariable(SimpleVariable):
''' A sparse representation of a single column of events.
Args:
name (str): Name of the column.
data (DataFrame): A pandas DataFrame minimally containing the columns
'onset', 'duration', and 'amplitude'.
run_info (list): A list of RunInfo objects carrying information about
all runs represented in the Variable.
source (str): The type of BIDS variable file the data were extracted
from. Must be one of: 'events', 'physio', 'stim', 'confounds',
'scans', 'sessions', 'participants', or 'beh'.
kwargs: Optional keyword arguments passed onto superclass.
'''
_property_columns = {'onset', 'duration'}
def __init__(self, name, data, run_info, source, **kwargs):
if hasattr(run_info, 'duration'):
run_info = [run_info]
self.run_info = run_info
for sc in self._property_columns:
setattr(self, sc, data.pop(sc).values)
super(SparseRunVariable, self).__init__(name, data, source, **kwargs)
def get_duration(self):
''' Return the total duration of the Variable's run(s). '''
return sum([r.duration for r in self.run_info])
def to_dense(self, sampling_rate):
''' Convert the current sparse column to a dense representation.
Returns: A DenseRunVariable.
Args:
sampling_rate (int, str): Sampling rate (in Hz) to use when
constructing the DenseRunVariable.
Returns:
A DenseRunVariable.
'''
duration = int(math.ceil(sampling_rate * self.get_duration()))
ts = np.zeros(duration, dtype=self.values.dtype)
onsets = np.round(self.onset * sampling_rate).astype(int)
durations = np.round(self.duration * sampling_rate).astype(int)
run_i, start, last_ind = 0, 0, 0
for i, val in enumerate(self.values.values):
if onsets[i] < last_ind:
start += self.run_info[run_i].duration * sampling_rate
run_i += 1
_onset = start + onsets[i]
_offset = _onset + durations[i]
ts[_onset:_offset] = val
last_ind = onsets[i]
run_info = list(self.run_info)
return DenseRunVariable(self.name, ts, run_info, self.source,
sampling_rate)
@classmethod
def _merge(cls, variables, name, **kwargs):
run_info = list(chain(*[v.run_info for v in variables]))
return super(SparseRunVariable, cls)._merge(variables, name,
run_info=run_info,
**kwargs)
class DenseRunVariable(BIDSVariable):
''' A dense representation of a single column.
Args:
name (str): The name of the column.
values (NDArray): The values/amplitudes to store.
run_info (list): A list of RunInfo objects carrying information about
all runs represented in the Variable.
source (str): The type of BIDS variable file the data were extracted
from. Must be one of: 'events', 'physio', 'stim', 'confounds',
'scans', 'sessions', 'participants', or 'beh'.
sampling_rate (float): Optional sampling rate (in Hz) to use. Must
match the sampling rate used to generate the values. If None,
the collection's sampling rate will be used.
'''
def __init__(self, name, values, run_info, source, sampling_rate):
values = pd.DataFrame(values)
if hasattr(run_info, 'duration'):
run_info = [run_info]
self.run_info = run_info
self.sampling_rate = sampling_rate
self.index = self._build_entity_index(run_info, sampling_rate)
super(DenseRunVariable, self).__init__(name, values, source)
def split(self, grouper):
''' Split the current DenseRunVariable into multiple columns.
Args:
grouper (DataFrame): binary DF specifying the design matrix to
use for splitting. Number of rows must match current
DenseRunVariable; a new DenseRunVariable will be generated
for each column in the grouper.
Returns:
A list of DenseRunVariables, one per unique value in the grouper.
'''
values = grouper.values * self.values.values
df = pd.DataFrame(values, columns=grouper.columns)
return [DenseRunVariable('%s.%s' % (self.name, name), df[name].values,
self.run_info, self.source,
self.sampling_rate)
for i, name in enumerate(df.columns)]
def _build_entity_index(self, run_info, sampling_rate):
''' Build the entity index from run information. '''
index = []
sr = int(round(1000. / sampling_rate))
_timestamps = []
for run in run_info:
reps = int(math.ceil(run.duration * sampling_rate))
ent_vals = list(run.entities.values())
data = np.broadcast_to(ent_vals, (reps, len(ent_vals)))
df = pd.DataFrame(data, columns=list(run.entities.keys()))
ts = pd.date_range(0, periods=len(df), freq='%sms' % sr)
_timestamps.append(ts.to_series())
index.append(df)
self.timestamps = pd.concat(_timestamps, axis=0)
return pd.concat(index, axis=0).reset_index(drop=True)
def resample(self, sampling_rate, inplace=False, kind='linear'):
''' Resample the Variable to the specified sampling rate.
Args:
sampling_rate (int, float): Target sampling rate (in Hz)
inplace (bool): If True, performs resampling in-place. If False,
returns a resampled copy of the current Variable.
kind (str): Argument to pass to scipy's interp1d; indicates the
kind of interpolation approach to use. See interp1d docs for
valid values.
'''
if not inplace:
var = self.clone()
var.resample(sampling_rate, True, kind)
return var
if sampling_rate == self.sampling_rate:
return
old_sr = self.sampling_rate
n = len(self.index)
self.index = self._build_entity_index(self.run_info, sampling_rate)
x = np.arange(n)
num = int(np.ceil(n * sampling_rate / old_sr))
f = interp1d(x, self.values.values.ravel(), kind=kind)
x_new = np.linspace(0, n - 1, num=num)
self.values = pd.DataFrame(f(x_new))
self.sampling_rate = sampling_rate
def to_df(self, condition=True, entities=True, timing=True):
''' Convert to a DataFrame, with columns for name and entities.
Args:
condition (bool): If True, adds a column for condition name, and
names the amplitude column 'amplitude'. If False, returns just
onset, duration, and amplitude, and gives the amplitude column
the current column name.
entities (bool): If True, adds extra columns for all entities.
timing (bool): If True, includes onset and duration columns (even
though events are sampled uniformly). If False, omits them.
'''
df = super(DenseRunVariable, self).to_df(condition, entities)
if timing:
df['onset'] = self.timestamps.values.astype(float) / 1e+9
df['duration'] = 1. / self.sampling_rate
return df
@classmethod
def _merge(cls, variables, name, sampling_rate=None, **kwargs):
if not isinstance(sampling_rate, int):
rates = set([v.sampling_rate for v in variables])
if len(rates) == 1:
sampling_rate = list(rates)[0]
else:
if sampling_rate is 'auto':
sampling_rate = max(rates)
else:
msg = ("Cannot merge DenseRunVariables (%s) with different"
" sampling rates (%s). Either specify an integer "
"sampling rate to use for all variables, or set "
"sampling_rate='auto' to use the highest sampling "
"rate found." % (name, rates))
raise ValueError(msg)
variables = [v.resample(sampling_rate) for v in variables]
values = pd.concat([v.values for v in variables], axis=0)
run_info = list(chain(*[v.run_info for v in variables]))
source = variables[0].source
return DenseRunVariable(name, values, run_info, source, sampling_rate)
def merge_variables(variables, name=None, **kwargs):
''' Merge/concatenate a list of variables along the row axis.
Args:
variables (list): A list of Variables to merge.
name (str): Optional name to assign to the output Variable. By
default, uses the same name as the input variables.
kwargs: Optional keyword arguments to pass onto the class-specific
merge() call. Possible args:
- sampling_rate (int, str): The sampling rate to use if resampling
of DenseRunVariables is necessary for harmonization. If 'auto',
the highest sampling rate found will be used. This argument is
only used when passing DenseRunVariables in the variables list.
Returns:
A single BIDSVariable of the same class as the input variables.
Notes:
* Currently, this function only support homogenously-typed lists. In
future, it may be extended to support implicit conversion.
* Variables in the list must all share the same name (i.e., it is not
possible to merge two different variables into a single variable.)
'''
classes = set([v.__class__ for v in variables])
if len(classes) > 1:
raise ValueError("Variables of different classes cannot be merged. "
"Variables passed are of classes: %s" % classes)
sources = set([v.source for v in variables])
if len(sources) > 1:
raise ValueError("Variables extracted from different types of files "
"cannot be merged. Sources found: %s" % sources)
return list(classes)[0].merge(variables, **kwargs)
|
the-stack_106_19668
|
#***************************************************************************************************
#
# File Name: gen_support.py
# Application Version: v0.1
# Application Developer: Anastasiia Butko (LBNL)
#
# Software: Task assIGnment mappER (TIGER)
# Author: Anastasiia Butko (LBNL)
# Website: https://github.com/lbnlcomputerarch/tiger
#
# The copyright information of this software can be found in the file COPYRIGHT.
#
#*************************************************************************************************
from qubo_gen import *
from qmasm_gen import *
from sol_gen import *
from gen_support import *
import random
import math
import os
import sys
import ast
import re
import networkx as nx
import numpy as np
import collections
from random import randint
from itertools import izip_longest, ifilter
def longest_path_length(G, src, dst):
paths = nx.all_simple_paths(G, src, dst)
check = list(nx.all_simple_paths(G, src, dst))
if check:
return len(max(paths, key = len))-1
else:
return 0
def level_groups(G, root, size):
levels = collections.OrderedDict()
levels.update({0: root})
for taskID in G:
if taskID not in root:
levelID = 0
for rootIdx in root:
level = longest_path_length(G, rootIdx, taskID)
levelID = level if level > levelID else levelID
if (levelID in levels):
levels[levelID].append(taskID)
else:
levels.update({levelID: [taskID]})
return levels
def adjust_levels(G, levels, procNum):
levelShift = 0
adjustedLevels = collections.OrderedDict()
print("\nAdjusting levels ...")
for il, lvl in enumerate(levels):
if len(levels[lvl]) > procNum:
subLevelsNum = len(levels[lvl])/procNum
remainderTasks = len(levels[lvl])%procNum
print("Num of sublevels: " + str(subLevelsNum) + " RemainderTasks: " + str(remainderTasks))
for idx in range(0, subLevelsNum):
adjustedLevels.update({(lvl+levelShift): levels[lvl][(idx*procNum):(idx*procNum + procNum)]})
levelShift += 1
# if remainderTasks > 0:
# adjustedLevels.update({(lvl+levelShift): levels[lvl][(subLevelsNum*procNum):(subLevelsNum*procNum + procNum)]})
if remainderTasks > 0:
if (il < len(levels)-1):
if len(levels[lvl+1]) < procNum:
bufA = []
bufB = []
freeSpace = procNum - len(levels[lvl+1])
for subTask in range(0, remainderTasks):
depFlag = 0
for nextLevelTask in range(0, len(levels[lvl+1])):
if (G.has_edge(levels[lvl][subLevelsNum*procNum+subTask], levels[lvl+1][nextLevelTask])):
depFlag = 1
break
if((depFlag != 1) and (len(bufA) < freeSpace)):
bufA.append(levels[lvl][subLevelsNum*procNum+subTask])
else:
bufB.append(levels[lvl][subLevelsNum*procNum+subTask])
for taskID in bufA:
levels[lvl+1].append(taskID)
if bufB:
adjustedLevels.update({(lvl+levelShift): bufB})
else:
levelShift -= 1
else:
adjustedLevels.update({(lvl+levelShift): levels[lvl][(subLevelsNum*procNum):(subLevelsNum*procNum + procNum)]})
else:
adjustedLevels.update({(lvl+levelShift): levels[lvl][(subLevelsNum*procNum):(subLevelsNum*procNum + procNum)]})
else:
levelShift -= 1
else:
adjustedLevels.update({(lvl+levelShift): levels[lvl]})
print("\n")
return adjustedLevels
def vertical_couplings(taskID, penalty, file, ProcNum):
number = 0
for i in range(0, ProcNum):
for j in range(i, ProcNum):
if i!=j:
A = taskID*ProcNum + i
B = taskID*ProcNum + j
file.write(str(A) + " " + str(B) + " " + str(penalty) + "\n")
number += 1
return number
def horizontal_couplings(taskID_0, taskID_1, penalty, file, ProcNum):
number = 0
for p in range(0, ProcNum):
A = taskID_0 + p
B = taskID_1 + p
file.write(str(A) + " " + str(B) + " " + str(penalty) + "\n")
number += 1
return number
def task_to_qubit(SG, ProcNum):
TaskQubitDict = collections.OrderedDict()
for i, task in enumerate(sorted(SG.nodes())):
TaskQubitDict.update({task: (i*ProcNum)})
return TaskQubitDict
def devide_graph(TCG, LpSG, adjustedLevels):
SG = []
SL = []
chunks = [adjustedLevels.iteritems()]*LpSG
g = (dict(ifilter(None, v)) for v in izip_longest(*chunks))
SL = list(g)
for sub_level in SL:
SGlevels = []
for key, value in sub_level.iteritems():
SGlevels.extend(value)
SG.append(TCG.subgraph(SGlevels))
return SG, SL
|
the-stack_106_19670
|
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Incur., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Testing connection.CMySQLConnection class using the C Extension
"""
import tests
from mysql.connector import errors
from mysql.connector.constants import ClientFlag, flag_is_set
from mysql.connector.connection import MySQLConnection
from mysql.connector.connection_cext import CMySQLConnection
class CMySQLConnectionTests(tests.MySQLConnectorTests):
def setUp(self):
config = tests.get_mysql_config()
self.cnx = CMySQLConnection(**config)
self.pcnx = MySQLConnection(**config)
def test__info_query(self):
query = "SELECT 1, 'a', 2, 'b'"
exp = (1, 'a', 2, 'b')
self.assertEqual(exp, self.cnx.info_query(query))
self.assertRaises(errors.InterfaceError, self.cnx.info_query,
"SHOW VARIABLES LIKE '%char%'")
def test_client_flags(self):
defaults = ClientFlag.default
set_flags = self.cnx._cmysql.st_client_flag()
for flag in defaults:
self.assertTrue(flag_is_set(flag, set_flags))
def test_get_rows(self):
self.assertRaises(errors.InternalError, self.cnx.get_rows)
query = "SHOW STATUS LIKE 'Aborted_c%'"
self.cnx.cmd_query(query)
self.assertRaises(AttributeError, self.cnx.get_rows, 0)
self.assertRaises(AttributeError, self.cnx.get_rows, -10)
self.assertEqual(2, len(self.cnx.get_rows()))
self.cnx.free_result()
self.cnx.cmd_query(query)
self.assertEqual(1, len(self.cnx.get_rows(count=1)))
self.assertEqual(1, len(self.cnx.get_rows(count=1)))
self.assertEqual([], self.cnx.get_rows(count=1))
self.cnx.free_result()
def test_cmd_init_db(self):
query = "SELECT DATABASE()"
self.cnx.cmd_init_db('mysql')
self.assertEqual('mysql', self.cnx.info_query(query)[0])
self.cnx.cmd_init_db('myconnpy')
self.assertEqual('myconnpy', self.cnx.info_query(query)[0])
def test_cmd_query(self):
query = "SHOW STATUS LIKE 'Aborted_c%'"
info = self.cnx.cmd_query(query)
exp = {
'eof': {'status_flag': 32, 'warning_count': 0},
'columns': [
('Variable_name', 253, None, None, None, None, 0, 1),
('Value', 253, None, None, None, None, 1, 0)]
}
self.assertEqual(exp, info)
rows = self.cnx.get_rows()
vars = [ row[0] for row in rows ]
self.assertEqual(2, len(rows))
vars.sort()
exp = ['Aborted_clients', 'Aborted_connects']
self.assertEqual(exp, vars)
exp = ['Value', 'Variable_name']
fields = [fld[0] for fld in info['columns']]
fields.sort()
self.assertEqual(exp, fields)
self.cnx.free_result()
info = self.cnx.cmd_query("SET @a = 1")
exp = {
'warning_count': 0, 'insert_id': 0, 'affected_rows': 0,
'server_status': 0, 'field_count': 0
}
self.assertEqual(exp, info)
|
the-stack_106_19672
|
import os, sys
import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
from util import get_median_inter_mnist, Kernel, load_data, ROOT_PATH, jitchol, _sqdist, \
remove_outliers, nystrom_decomp, chol_inv, bundle_az_aw, visualise_ATEs
from joblib import Parallel, delayed
import time
import matplotlib.pyplot as plt
from datetime import date
Nfeval = 1
seed = 527
np.random.seed(seed)
JITTER = 1e-7
nystr_M = 300
EYE_nystr = np.eye(nystr_M)
opt_params = None
prev_norm = None
opt_test_err = None
def experiment(sname, seed, datasize, nystr=False):
def LMO_err(params, M=10):
al, bl = np.exp(params)
L = bl * bl * np.exp(-L0 / al / al / 2) + 1e-6 * EYEN
if nystr:
tmp_mat = L @ eig_vec_K
C = L - tmp_mat @ np.linalg.inv(eig_vec_K.T @ tmp_mat / N2 + inv_eig_val_K) @ tmp_mat.T / N2
c = C @ W_nystr_Y * N2
else:
LWL_inv = chol_inv(L @ W @ L + L / N2 + JITTER * EYEN)
C = L @ LWL_inv @ L / N2
c = C @ W @ Y * N2
c_y = c - Y
lmo_err = 0
N = 0
for ii in range(1):
permutation = np.random.permutation(X.shape[0])
for i in range(0, X.shape[0], M):
indices = permutation[i:i + M]
K_i = W[np.ix_(indices, indices)] * N2
C_i = C[np.ix_(indices, indices)]
c_y_i = c_y[indices]
b_y = np.linalg.inv(np.eye(M) - C_i @ K_i) @ c_y_i
lmo_err += b_y.T @ K_i @ b_y
N += 1
return lmo_err[0, 0] / N / M ** 2
def callback0(params, timer=None):
global Nfeval, prev_norm, opt_params, opt_test_err
if Nfeval % 1 == 0:
al, bl = params
L = bl * bl * np.exp(-L0 / al / al / 2) + 1e-6 * EYEN
if nystr:
alpha = EYEN - eig_vec_K @ np.linalg.inv(
eig_vec_K.T @ L @ eig_vec_K / N2 + np.diag(1 / eig_val_K / N2)) @ eig_vec_K.T @ L / N2
alpha = alpha @ W_nystr @ Y * N2
else:
LWL_inv = chol_inv(L @ W @ L + L / N2 + JITTER * EYEN)
alpha = LWL_inv @ L @ W @ Y
# L_W_inv = chol_inv(W*N2+L_inv)
test_L = bl * bl * np.exp(-test_L0 / al / al / 2)
pred_mean = test_L @ alpha
if timer:
return
test_err = ((pred_mean - test_Y) ** 2).mean() # ((pred_mean-test_Y)**2/np.diag(pred_cov)).mean()+(np.log(np.diag(pred_cov))).mean()
norm = alpha.T @ L @ alpha
Nfeval += 1
if prev_norm is not None:
if norm[0, 0] / prev_norm >= 3:
if opt_params is None:
opt_test_err = test_err
opt_params = params
print(True, opt_params, opt_test_err, prev_norm)
raise Exception
if prev_norm is None or norm[0, 0] <= prev_norm:
prev_norm = norm[0, 0]
opt_test_err = test_err
opt_params = params
print('params,test_err, norm: ', opt_params, opt_test_err, prev_norm)
def get_causal_effect(params, do_A, w):
"to be called within experiment function."
al, bl = params
L = bl * bl * np.exp(-L0 / al / al / 2) + 1e-6 * EYEN
if nystr:
alpha = EYEN - eig_vec_K @ np.linalg.inv(
eig_vec_K.T @ L @ eig_vec_K / N2 + np.diag(1 / eig_val_K / N2)) @ eig_vec_K.T @ L / N2
alpha = alpha @ W_nystr @ Y * N2
else:
LWL_inv = chol_inv(L @ W @ L + L / N2 + JITTER * EYEN)
alpha = LWL_inv @ L @ W @ Y
# L_W_inv = chol_inv(W*N2+L_inv)
EYhat_do_A = []
for a in do_A:
a = np.repeat(a, [w.shape[0]]).reshape(-1, 1)
w = w.reshape(-1, 1)
aw = np.concatenate([a, w], axis=-1)
ate_L0 = _sqdist(aw, X)
ate_L = bl * bl * np.exp(-ate_L0 / al / al / 2)
h_out = ate_L @ alpha
mean_h = np.mean(h_out).reshape(-1, 1)
EYhat_do_A.append(mean_h)
print('a = {}, beta_a = {}'.format(np.mean(a), mean_h))
return np.concatenate(EYhat_do_A)
# train,dev,test = load_data(ROOT_PATH+'/data/zoo/{}_{}.npz'.format(sname,datasize))
# X = np.vstack((train.x,dev.x))
# Y = np.vstack((train.y,dev.y))
# Z = np.vstack((train.z,dev.z))
# test_X = test.x
# test_Y = test.g
train, dev, test = load_data(ROOT_PATH + "/data/zoo/" + sname + '/main_orig.npz')
Y = np.concatenate((train.y, dev.y), axis=0).reshape(-1, 1)
# test_Y = test.y
AZ_train, AW_train = bundle_az_aw(train.a, train.z, train.w)
AZ_test, AW_test = bundle_az_aw(test.a, test.z, test.w)
AZ_dev, AW_dev = bundle_az_aw(dev.a, dev.z, test.w)
X, Z = np.concatenate((AW_train, AW_dev), axis=0), np.concatenate((AZ_train, AZ_dev), axis=0)
test_X, test_Y = AW_test, test.y.reshape(-1, 1) # TODO: is test.g just test.y?
t0 = time.time()
EYEN = np.eye(X.shape[0])
ak0, ak1 = get_median_inter_mnist(Z[:, 0:1]), get_median_inter_mnist(Z[:, 1:2])
N2 = X.shape[0] ** 2
W0 = _sqdist(Z, None)
W = (np.exp(-W0 / ak0 / ak0 / 2) + np.exp(-W0 / ak0 / ak0 / 200) + np.exp(
-W0 / ak0 / ak0 * 50)) / 3 / N2 # TODO: recompute W for my case
del W0
L0, test_L0 = _sqdist(X, None), _sqdist(test_X, X)
# measure time
# callback0(np.random.randn(2)/10,True)
# np.save(ROOT_PATH + "/MMR_IVs/results/zoo/" + sname + '/LMO_errs_{}_nystr_{}_time.npy'.format(seed,train.x.shape[0]),time.time()-t0)
# return
params0 = np.random.randn(2) / 10
bounds = None # [[0.01,10],[0.01,5]]
if nystr:
for _ in range(seed + 1):
random_indices = np.sort(np.random.choice(range(W.shape[0]), nystr_M, replace=False))
eig_val_K, eig_vec_K = nystrom_decomp(W * N2, random_indices)
inv_eig_val_K = np.diag(1 / eig_val_K / N2)
W_nystr = eig_vec_K @ np.diag(eig_val_K) @ eig_vec_K.T / N2
W_nystr_Y = W_nystr @ Y
obj_grad = value_and_grad(lambda params: LMO_err(params))
# try:
res = minimize(obj_grad, x0=params0, bounds=bounds, method='L-BFGS-B', jac=True, options={'maxiter': 5000},
callback=callback0)
# res stands for results (not residuals!).
# except Exception as e:
# print(e)
PATH = ROOT_PATH + "/MMR_IVs/results/zoo/" + sname + "/"
assert opt_params is not None
params = opt_params
do_A = np.load(ROOT_PATH + "/data/zoo/" + sname + '/do_A_orig.npz')['do_A']
EY_do_A_gt = np.load(ROOT_PATH + "/data/zoo/" + sname + '/do_A_orig.npz')['gt_EY_do_A']
w_sample = train.w
EYhat_do_A = get_causal_effect(params=params, do_A=do_A, w=w_sample)
plt.figure()
plt.plot([i + 1 for i in range(20)], EYhat_do_A)
plt.xlabel('A')
plt.ylabel('EYdoA-est')
plt.savefig(
os.path.join(PATH, str(date.today()), 'causal_effect_estimates_nystr_prodkern_{}'.format(AW_train.shape[0]) + '.png'))
plt.close()
print('ground truth ate: ', EY_do_A_gt)
visualise_ATEs(EY_do_A_gt, EYhat_do_A,
x_name='E[Y|do(A)] - gt',
y_name='beta_A',
save_loc=os.path.join(PATH, str(date.today())) + '/',
save_name='ate_{}_nystr_prodkern.png'.format(AW_train.shape[0]))
causal_effect_mean_abs_err = np.mean(np.abs(EY_do_A_gt - EYhat_do_A))
causal_effect_mae_file = open(os.path.join(PATH, str(date.today()), "ate_mae_{}_nystrom_prodkern.txt".format(AW_train.shape[0])),
"a")
causal_effect_mae_file.write("mae_: {}\n".format(causal_effect_mean_abs_err))
causal_effect_mae_file.close()
os.makedirs(PATH, exist_ok=True)
np.save(os.path.join(PATH, str(date.today()), 'LMO_errs_{}_nystr_prodkern_{}.npy'.format(seed, AW_train.shape[0])), [opt_params, prev_norm, opt_test_err])
# TODO: where is alpha? and how is it making a prediction? alpha is defined in the callback function. how is it reached?
def summarize_res(sname, datasize):
print(sname)
res = []
times = []
for i in range(100):
PATH = ROOT_PATH + "/MMR_IVs/results/zoo/" + sname + "/"
filename = os.path.join(PATH, str(date.today()), 'LMO_errs_{}_nystr_prodkern_{}.npy'.format(i, datasize))
if os.path.exists(filename):
tmp_res = np.load(filename, allow_pickle=True)
if tmp_res[-1] is not None:
res += [tmp_res[-1]]
time_path = os.path.join(PATH, str(date.today()), '/LMO_errs_{}_nystr_prodkern_{}_time.npy'.format(i, datasize))
if os.path.exists(time_path):
t = np.load(time_path)
times += [t]
res = np.array(res)
times = np.array(times)
res = remove_outliers(res)
times = np.sort(times)[:80]
print(times)
print('mean, std: ', np.mean(res), np.std(res))
print('time: ', np.mean(times), np.std(times))
if __name__ == '__main__':
# snames = ['step','sin','abs','linear']
snames = ["sim_1d_no_x"]
for datasize in [5000]:
for sname in snames:
for seed in range(100):
experiment(sname, seed, datasize, False if datasize < 1000 else True)
summarize_res(sname, datasize)
|
the-stack_106_19673
|
# coding: utf-8
"""
CLOUD API
An enterprise-grade Infrastructure is provided as a Service (IaaS) solution that can be managed through a browser-based \"Data Center Designer\" (DCD) tool or via an easy to use API. The API allows you to perform a variety of management tasks such as spinning up additional servers, adding volumes, adjusting networking, and so forth. It is designed to allow users to leverage the same power and flexibility found within the DCD visual tool. Both tools are consistent with their concepts and lend well to making the experience smooth and intuitive. # noqa: E501
The version of the OpenAPI document: 5.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ionoscloud.configuration import Configuration
class KubernetesClusterPropertiesForPost(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'k8s_version': 'str',
'maintenance_window': 'KubernetesMaintenanceWindow',
'api_subnet_allow_list': 'list[str]',
's3_buckets': 'list[S3Bucket]',
}
attribute_map = {
'name': 'name',
'k8s_version': 'k8sVersion',
'maintenance_window': 'maintenanceWindow',
'api_subnet_allow_list': 'apiSubnetAllowList',
's3_buckets': 's3Buckets',
}
def __init__(self, name=None, k8s_version=None, maintenance_window=None, api_subnet_allow_list=None, s3_buckets=None, local_vars_configuration=None): # noqa: E501
"""KubernetesClusterPropertiesForPost - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._k8s_version = None
self._maintenance_window = None
self._api_subnet_allow_list = None
self._s3_buckets = None
self.discriminator = None
self.name = name
if k8s_version is not None:
self.k8s_version = k8s_version
if maintenance_window is not None:
self.maintenance_window = maintenance_window
if api_subnet_allow_list is not None:
self.api_subnet_allow_list = api_subnet_allow_list
if s3_buckets is not None:
self.s3_buckets = s3_buckets
@property
def name(self):
"""Gets the name of this KubernetesClusterPropertiesForPost. # noqa: E501
A Kubernetes Cluster Name. Valid Kubernetes Cluster name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. # noqa: E501
:return: The name of this KubernetesClusterPropertiesForPost. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this KubernetesClusterPropertiesForPost.
A Kubernetes Cluster Name. Valid Kubernetes Cluster name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. # noqa: E501
:param name: The name of this KubernetesClusterPropertiesForPost. # noqa: E501
:type name: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def k8s_version(self):
"""Gets the k8s_version of this KubernetesClusterPropertiesForPost. # noqa: E501
The kubernetes version in which a cluster is running. This imposes restrictions on what kubernetes versions can be run in a cluster's nodepools. Additionally, not all kubernetes versions are viable upgrade targets for all prior versions. # noqa: E501
:return: The k8s_version of this KubernetesClusterPropertiesForPost. # noqa: E501
:rtype: str
"""
return self._k8s_version
@k8s_version.setter
def k8s_version(self, k8s_version):
"""Sets the k8s_version of this KubernetesClusterPropertiesForPost.
The kubernetes version in which a cluster is running. This imposes restrictions on what kubernetes versions can be run in a cluster's nodepools. Additionally, not all kubernetes versions are viable upgrade targets for all prior versions. # noqa: E501
:param k8s_version: The k8s_version of this KubernetesClusterPropertiesForPost. # noqa: E501
:type k8s_version: str
"""
self._k8s_version = k8s_version
@property
def maintenance_window(self):
"""Gets the maintenance_window of this KubernetesClusterPropertiesForPost. # noqa: E501
:return: The maintenance_window of this KubernetesClusterPropertiesForPost. # noqa: E501
:rtype: KubernetesMaintenanceWindow
"""
return self._maintenance_window
@maintenance_window.setter
def maintenance_window(self, maintenance_window):
"""Sets the maintenance_window of this KubernetesClusterPropertiesForPost.
:param maintenance_window: The maintenance_window of this KubernetesClusterPropertiesForPost. # noqa: E501
:type maintenance_window: KubernetesMaintenanceWindow
"""
self._maintenance_window = maintenance_window
@property
def api_subnet_allow_list(self):
"""Gets the api_subnet_allow_list of this KubernetesClusterPropertiesForPost. # noqa: E501
Access to the K8s API server is restricted to these CIDRs. Cluster-internal traffic is not affected by this restriction. If no allowlist is specified, access is not restricted. If an IP without subnet mask is provided, the default value will be used: 32 for IPv4 and 128 for IPv6. # noqa: E501
:return: The api_subnet_allow_list of this KubernetesClusterPropertiesForPost. # noqa: E501
:rtype: list[str]
"""
return self._api_subnet_allow_list
@api_subnet_allow_list.setter
def api_subnet_allow_list(self, api_subnet_allow_list):
"""Sets the api_subnet_allow_list of this KubernetesClusterPropertiesForPost.
Access to the K8s API server is restricted to these CIDRs. Cluster-internal traffic is not affected by this restriction. If no allowlist is specified, access is not restricted. If an IP without subnet mask is provided, the default value will be used: 32 for IPv4 and 128 for IPv6. # noqa: E501
:param api_subnet_allow_list: The api_subnet_allow_list of this KubernetesClusterPropertiesForPost. # noqa: E501
:type api_subnet_allow_list: list[str]
"""
self._api_subnet_allow_list = api_subnet_allow_list
@property
def s3_buckets(self):
"""Gets the s3_buckets of this KubernetesClusterPropertiesForPost. # noqa: E501
List of S3 bucket configured for K8s usage. For now it contains only one S3 bucket used to store K8s API audit logs # noqa: E501
:return: The s3_buckets of this KubernetesClusterPropertiesForPost. # noqa: E501
:rtype: list[S3Bucket]
"""
return self._s3_buckets
@s3_buckets.setter
def s3_buckets(self, s3_buckets):
"""Sets the s3_buckets of this KubernetesClusterPropertiesForPost.
List of S3 bucket configured for K8s usage. For now it contains only one S3 bucket used to store K8s API audit logs # noqa: E501
:param s3_buckets: The s3_buckets of this KubernetesClusterPropertiesForPost. # noqa: E501
:type s3_buckets: list[S3Bucket]
"""
self._s3_buckets = s3_buckets
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KubernetesClusterPropertiesForPost):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, KubernetesClusterPropertiesForPost):
return True
return self.to_dict() != other.to_dict()
|
the-stack_106_19674
|
# -*- coding: UTF-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import tensorflow as tf
import tensorflow_text as tf_text
import paddle
import paddlenlp
from paddlenlp.transformers import BertTokenizer
from paddlenlp.experimental import FasterTokenizer
from paddlenlp.experimental import to_tensor
from transformers import AutoTokenizer
parser = argparse.ArgumentParser()
# yapf: disable
parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--batch_size", default=32, type=int, help="Batch size for tokenization.")
parser.add_argument("--epochs", default=10, type=int, help="Total number of tokenization epochs to perform.")
parser.add_argument("--num_samples", default=100, type=int, help="The number of samples to be tokenized")
# yapf: enable
args = parser.parse_args()
max_seq_length = args.max_seq_length
batch_size = args.batch_size
epochs = args.epochs
num_samples = args.num_samples
total_tokens = epochs * num_samples * max_seq_length
text = '在世界几大古代文明中,中华文明源远流长、从未中断,至今仍充满蓬勃生机与旺盛生命力,这在人类历史上是了不起的奇迹。' \
'本固根深、一脉相承的历史文化是铸就这一奇迹的重要基础。先秦时期是中华文化的创生期,奠定了此后几千年中华文化发展的' \
'基础。考古发现证实,早期中华文明的形成经历了从“满天星斗”到“月明星稀”再到“多元一体”的过程。在这个过程中,不同地域、' \
'不同人群的文化交流交融,中华民族最早的大家庭逐渐成形,国家由此诞生,“大同”社会理想和“天下为公,选贤与能,讲信修睦”' \
'的价值追求逐渐深入人心。在早期国家形成过程中,我们的先人积累了初步的国家治理经验,包括经济、政治、军事、法律、文化' \
'等各个方面,最终以典章、思想的形式进行总结和传承。流传至今的夏商西周国家治理经验、春秋战国诸子百家思想,是先秦时期' \
'历史文化的集中反映。秦汉至宋元时期是中华文化的发展期,中华传统文化在这个时期走向成熟并迈向新的高峰。中央集权制度的' \
'形成、郡县制度的推广、官僚制度的健全,推动中国传统社会形成国家治理的基本形态,为中国传统社会的长期延续和发展提供了' \
'坚实的制度和文化支撑,贯穿其中的价值主线是对“大一统”的坚定追求。与此同时,民为邦本的民本思想、以文化人的文治主张、' \
'协和万邦的天下观等,也在实践中得到丰富和完善。在追求“大一统”的历史中,民族精神世代相传,民族英雄史不绝书。'
data = [text[:max_seq_length]] * num_samples
# BERT Tokenizer using PaddleNLP FasterTokenizer
pp_tokenizer = FasterTokenizer.from_pretrained("bert-base-chinese")
batches = [
to_tensor(data[idx:idx + batch_size])
for idx in range(0, len(data), batch_size)
]
for batch_data in batches:
input_ids, token_type_ids = pp_tokenizer(
text=batch_data, max_seq_len=max_seq_length)
start = time.time()
for _ in range(epochs):
for batch_data in batches:
input_ids, token_type_ids = pp_tokenizer(
batch_data, max_seq_len=max_seq_length)
end = time.time()
print("The throughput of paddle FasterTokenizer: {:,.2f} tokens/s".format((
total_tokens / (end - start))))
hf_tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese", use_fast=True)
batches = [
data[idx:idx + batch_size] for idx in range(0, len(data), batch_size)
]
for batch_data in batches:
encoded_inputs = hf_tokenizer(batch_data)
# BERT Tokenizer using HuggingFace AutoTokenizer
start = time.time()
for _ in range(epochs):
for batch_data in batches:
encoded_inputs = hf_tokenizer(
batch_data) #, padding=True, truncation=True)
end = time.time()
print("The throughput of huggingface FasterTokenizer: {:,.2f} tokens/s".format((
total_tokens / (end - start))))
# BERT Tokenizer using PaddleNLP BertTokenizer
py_tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
for batch_data in batches:
encoded_inputs = py_tokenizer(batch_data)
start = time.time()
for _ in range(epochs):
for batch_data in batches:
encoded_inputs = py_tokenizer(batch_data)
end = time.time()
print("The throughput of paddle BertTokenizer: {:,.2f} tokens/s".format((
total_tokens / (end - start))))
# BERT Tokenizer using HuggingFace AutoTokenizer
hf_tokenizer = AutoTokenizer.from_pretrained(
"bert-base-chinese", use_fast=False)
for batch_data in batches:
encoded_inputs = hf_tokenizer(batch_data)
start = time.time()
for _ in range(epochs):
for batch_data in batches:
encoded_inputs = hf_tokenizer(
batch_data) #, padding=True, truncation=True)
end = time.time()
print("The throughput of huggingface python tokenizer: {:,.2f} tokens/s".format(
(total_tokens / (end - start))))
# BERT Tokenizer using TensorFlow Text
vocab_list = list(py_tokenizer.vocab.token_to_idx.keys())
lookup_table = tf.lookup.StaticVocabularyTable(
tf.lookup.KeyValueTensorInitializer(
keys=vocab_list,
key_dtype=tf.string,
values=tf.range(
tf.size(
vocab_list, out_type=tf.int64), dtype=tf.int64),
value_dtype=tf.int64),
num_oov_buckets=1)
tf_tokenizer = tf_text.BertTokenizer(lookup_table)
for batch_data in batches:
input_ids = tf_tokenizer.tokenize(batch_data)
start = time.time()
for _ in range(epochs):
for batch_data in batches:
input_ids = tf_tokenizer.tokenize(batch_data)
end = time.time()
print("The throughput of TensorFlow Text BertTokenizer: {:,.2f} tokens/s".
format((total_tokens / (end - start))))
|
the-stack_106_19675
|
"""Module with abstract interface for sparsifiers."""
from abc import ABC, abstractmethod
import copy
import numpy as np
import torch
import torch.nn as nn
from torch.distributions.multinomial import Multinomial
class BaseSparsifier(ABC, nn.Module):
"""The basic interface for a sparsifier.
A sparsifier sparsifies the weights incoming to neurons in a given layer.
"""
def __init__(self, pruner):
"""Initialize the sparsifier from the pruner of the same layer."""
super().__init__()
# Cache tensor and sensitivity of each parameter of tensor
self._tensor = pruner.tensor
self._probability = pruner.probability
self._probability_div = pruner.probability_div
@abstractmethod
def sparsify(self, num_samples):
"""Sparsify the edges of the associated neurons with num_samples."""
raise NotImplementedError
def forward(self, x):
"""It's a nn.Module, so strictly speaking it needs a forward func."""
class SimpleSparsifier(BaseSparsifier):
"""The interface for a simple sparsifier without sensitivity."""
@property
@abstractmethod
def _do_reweighing(self):
raise NotImplementedError
@abstractmethod
def _reweigh(self, counts, num_samples, probs_div):
raise NotImplementedError(
"The base class does not implement this " "method."
)
class RandSparsifier(SimpleSparsifier):
"""The partial implementation for the random sparsification."""
@property
def _do_reweighing(self):
return True
def _reweigh(self, counts, num_samples, probs_div):
gammas = counts.float() / num_samples.float() / probs_div
return gammas
def _generate_counts(self, num_samples, probs):
distribution = Multinomial(num_samples.item(), probs.view(-1))
counts = distribution.sample()
return counts.view(probs.shape)
class DetSparsifier(SimpleSparsifier):
"""The partial implementation for the deterministic sparsification."""
@property
def _do_reweighing(self):
return False
def _reweigh(self, counts, num_samples, probs_div):
sens_sum = max(0, 1 - torch.sum(probs_div[counts]).item())
kappa = 0
if sens_sum < 1:
kappa = sens_sum / (1 - sens_sum)
# Under the i.i.d. assumption this works, otherwise no.
gammas = (1 + kappa) * counts.float()
return gammas
def _generate_counts(self, num_samples, probs):
mask = torch.zeros_like(probs, dtype=torch.bool)
numel = probs.numel()
num_samples = int(np.clip(1, int(num_samples), numel))
idx_top = np.argpartition(probs.view(-1).cpu().numpy(), -num_samples)[
-num_samples:
]
mask.view(-1)[idx_top] = True
return mask
class FeatureSparsifier(SimpleSparsifier):
"""The partial implementation for the feature-wise sparsifier."""
def __init__(self, pruner):
"""Initialize the sparsifier from the pruner of the same layer."""
super().__init__(pruner)
@abstractmethod
def _generate_counts(self, num_samples_f, probs_f):
raise NotImplementedError(
"The base class does not implement this " "method."
)
def sparsify(self, num_samples):
"""Sparsify the edges of the associated feature with num_samples."""
# short notation
weight_original = self._tensor
probs = self._probability
# pre-allocate gammas
gammas = torch.ones_like(probs)
idx = (num_samples).nonzero()
gammas[(num_samples < 1).nonzero(), :] = 0.0
# loop through all filters from which we should sample from
for idx_f in idx:
# generate counts for this filter
counts = self._generate_counts(num_samples[idx_f], probs[idx_f])
# only use approximation when it effectively reduces size
less = (
counts.nonzero().shape[0]
< self._tensor[idx_f].nonzero().shape[0]
)
if less:
# if it does, reweigh appropriately
if self._do_reweighing:
gammas[idx_f] = self._reweigh(
counts,
num_samples[idx_f],
self._probability_div[idx_f],
)
else:
gammas[idx_f] = (counts > 0).float()
# return approximation
return gammas * weight_original
class RandFeatureSparsifier(RandSparsifier, FeatureSparsifier):
"""A sparsifier for random weight sparsification per feature."""
class DetFeatureSparsifier(DetSparsifier, FeatureSparsifier):
"""The sparsifier for deterministic weight sparsification per feature."""
class FilterSparsifier(SimpleSparsifier):
"""The implementation for the fake sparsifier for filter pruning."""
@property
def _do_reweighing(self):
return False
def _reweigh(self, counts, num_samples, probs_div):
gammas = counts.float() / num_samples.float() / probs_div
return gammas
def __init__(self, pruner, out_mode):
"""Initialize the sparsifier from the pruner of the same layer."""
super().__init__(pruner)
self._out_mode = out_mode
def sparsify(self, num_samples):
"""Fake-sparsify the edges (we don't do sparsification for filters)."""
# short notation
weight_original = self._tensor
# pre-allocate gammas
gammas = copy.deepcopy(num_samples).float()
# check for reweighing
if self._do_reweighing and num_samples.sum() > 0:
gammas = self._reweigh(
gammas, num_samples.sum(), self._probability_div
)
else:
gammas = (gammas > 0).float()
# make gammas compatible with Woriginal
gammas = gammas.unsqueeze(int(self._out_mode)).unsqueeze(-1)
# make Woriginal compatible with gammas and return
weight_hat = (
gammas
* weight_original.view(
weight_original.shape[0], weight_original.shape[1], -1
)
).view_as(weight_original)
return weight_hat
|
the-stack_106_19676
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os
import sys
from setuptools import setup, find_packages, Extension
from setuptools.dist import Distribution
from setuptools.command.install import install
PACKAGE_NAME = "turicreate"
VERSION = "6.4.1" # {{VERSION_STRING}}
# pkgs not needed for minimal pkg
NON_MINIMAL_LIST = [
"coremltools",
"pandas",
"resampy",
"scipy",
"tensorflow",
]
# Prevent distutils from thinking we are a pure python package
class BinaryDistribution(Distribution):
def is_pure(self):
return False
class InstallEngine(install):
"""Helper class to hook the python setup.py install path to download
client libraries and engine
"""
user_options = install.user_options + [
("minimal", None, "control minimal installation"), # a 'flag' option
]
def initialize_options(self):
install.initialize_options(self)
self.minimal = None
def run(self):
if self.minimal is not None:
def do_not_install(require):
require = require.strip()
for name in NON_MINIMAL_LIST:
if require.startswith(name):
return False
return True
install_requires_minimal = list(
filter(do_not_install, self.distribution.install_requires)
)
orig_install_requires = self.distribution.install_requires
self.distribution.install_requires = install_requires_minimal
print(" minimal install: ", install_requires_minimal)
print("original install: ", orig_install_requires)
import platform
# start by running base class implementation of run
install.run(self)
# Check correct version of architecture (64-bit only)
arch = platform.architecture()[0]
if arch != "64bit":
msg = (
"Turi Create currently supports only 64-bit operating systems, and only recent Linux/OSX "
+ "architectures. Please install using a supported version. Your architecture is currently: %s"
% arch
)
sys.stderr.write(msg)
sys.exit(1)
# if OSX, verify >= 10.8
from distutils.util import get_platform
from pkg_resources import parse_version
cur_platform = get_platform()
if cur_platform.startswith("macosx"):
mac_ver = platform.mac_ver()[0]
if parse_version(mac_ver) < parse_version("10.8.0"):
msg = (
"Turi Create currently does not support versions of OSX prior to 10.8. Please upgrade your Mac OSX "
"installation to a supported version. Your current OSX version is: %s"
% mac_ver
)
sys.stderr.write(msg)
sys.exit(1)
elif cur_platform.startswith("linux"):
pass
elif cur_platform.startswith("win"):
win_ver = platform.version()
# Verify this is Vista or above
if parse_version(win_ver) < parse_version("6.0"):
msg = (
"Turi Create currently does not support versions of Windows"
" prior to Vista, or versions of Windows Server prior to 2008."
"Your current version of Windows is: %s" % platform.release()
)
sys.stderr.write(msg)
sys.exit(1)
else:
msg = (
"Unsupported Platform: '%s'. Turi Create is only supported on Windows, Mac OSX, and Linux."
% cur_platform
)
sys.stderr.write(msg)
sys.exit(1)
if __name__ == "__main__":
from distutils.util import get_platform
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Information Technology",
"Intended Audience :: Other Audience",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Information Analysis",
]
cur_platform = get_platform()
if cur_platform.startswith("macosx"):
classifiers.append("Operating System :: MacOS :: MacOS X")
elif cur_platform.startswith("linux"):
classifiers += [
"Operating System :: POSIX :: Linux",
"Operating System :: POSIX :: BSD",
"Operating System :: Unix",
]
elif cur_platform.startswith("win"):
classifiers += ["Operating System :: Microsoft :: Windows"]
else:
msg = (
"Unsupported Platform: '%s'. Turi Create is only supported on Windows, Mac OSX, and Linux."
% cur_platform
)
sys.stderr.write(msg)
sys.exit(1)
with open(os.path.join(os.path.dirname(__file__), "README.rst"), "rb") as f:
long_description = f.read().decode("utf-8")
install_requires = [
"decorator >= 4.0.9",
"numpy",
"pandas >= 0.23.2",
"pillow >= 5.2.0",
"prettytable == 0.7.2",
"resampy == 0.2.1",
"requests >= 2.9.1",
"scipy >= 1.1.0",
"six >= 1.10.0",
"coremltools==4.0b4",
]
if sys.version_info[0] == 2 or (
sys.version_info[0] == 3 and sys.version_info[1] == 5
):
install_requires.append("llvmlite == 0.31.0")
if sys.platform == "darwin":
install_requires.append("tensorflow >= 2.0.0")
else:
# ST, OD, AC and DC segfault on Linux with TensorFlow 2.1.0 and 2.1.1
# See: https://github.com/apple/turicreate/issues/3003
# SC errors out on Linux with TensorFlow 2.2 and 2.3
# See: https://github.com/apple/turicreate/issues/3303
if sys.version_info[0] != 3 or sys.version_info[1] != 8:
install_requires.append("tensorflow >= 2.0.0,<2.1.0")
else:
# Only TensorFlow >= 2.2 supports Python 3.8
install_requires.append("tensorflow >= 2.0.0")
# numba 0.51 started using "manylinux2014" rather than "manylinux2010".
# This breaks a lot of Linux installs.
install_requires.append("numba < 0.51.0")
setup(
name="turicreate",
version=VERSION,
# This distribution contains platform-specific C++ libraries, but they are not
# built with distutils. So we must create a dummy Extension object so when we
# create a binary file it knows to make it platform-specific.
ext_modules=[Extension("turicreate.__dummy", sources=["dummy.c"])],
author="Apple Inc.",
author_email="[email protected]",
cmdclass=dict(install=InstallEngine),
distclass=BinaryDistribution,
package_data={
"turicreate": [
"_cython/*.so",
"_cython/*.pyd",
"*.so",
"*.dylib",
"toolkits/*.so",
# macOS visualization
"Turi Create Visualization.app/Contents/*",
"Turi Create Visualization.app/Contents/_CodeSignature/*",
"Turi Create Visualization.app/Contents/MacOS/*",
"Turi Create Visualization.app/Contents/Resources/*",
"Turi Create Visualization.app/Contents/Resources/Base.lproj/*",
"Turi Create Visualization.app/Contents/Resources/Base.lproj/Main.storyboardc/*",
"Turi Create Visualization.app/Contents/Resources/build/*",
"Turi Create Visualization.app/Contents/Resources/build/static/*",
"Turi Create Visualization.app/Contents/Resources/build/static/css/*",
"Turi Create Visualization.app/Contents/Resources/build/static/js/*",
"Turi Create Visualization.app/Contents/Resources/build/static/media/*",
"Turi Create Visualization.app/Contents/Frameworks/*",
# Linux visualization
"Turi Create Visualization/*.*",
"Turi Create Visualization/visualization_client",
"Turi Create Visualization/swiftshader/*",
"Turi Create Visualization/locales/*",
"Turi Create Visualization/html/*.*",
"Turi Create Visualization/html/static/js/*",
"Turi Create Visualization/html/static/css/*",
# Plot.save dependencies
"visualization/vega_3.2.1.js",
"visualization/vg2png",
"visualization/vg2svg",
]
},
packages=find_packages(exclude=["test"]),
url="https://github.com/apple/turicreate",
license="LICENSE.txt",
description="Turi Create simplifies the development of custom machine learning models.",
long_description=long_description,
classifiers=classifiers,
install_requires=install_requires,
)
|
the-stack_106_19677
|
import logging; _L = logging.getLogger('openaddr.ci.collect')
from argparse import ArgumentParser
from urllib.parse import urlparse
from datetime import date
from time import sleep
from os import environ
from .objects import read_latest_set, read_completed_runs_to_date
from . import db_connect, db_cursor, setup_logger, render_index_maps, log_function_errors, dashboard_stats
from .. import S3, util
parser = ArgumentParser(description='Run some source files.')
parser.add_argument('-o', '--owner', default='openaddresses',
help='Github repository owner. Defaults to "openaddresses".')
parser.add_argument('-r', '--repository', default='openaddresses',
help='Github repository name. Defaults to "openaddresses".')
parser.add_argument('-b', '--bucket', default=environ.get('AWS_S3_BUCKET', None),
help='S3 bucket name. Defaults to value of AWS_S3_BUCKET environment variable.')
parser.add_argument('-d', '--database-url', default=environ.get('DATABASE_URL', None),
help='Optional connection string for database. Defaults to value of DATABASE_URL environment variable.')
parser.add_argument('--sns-arn', default=environ.get('AWS_SNS_ARN', None),
help='Optional AWS Simple Notification Service (SNS) resource. Defaults to value of AWS_SNS_ARN environment variable.')
parser.add_argument('--hourly', default=False, action='store_true',
help='Run hourly forever instead of just once.')
parser.add_argument('-v', '--verbose', help='Turn on verbose logging',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
parser.add_argument('-q', '--quiet', help='Turn off most logging',
action='store_const', dest='loglevel',
const=logging.WARNING, default=logging.INFO)
@log_function_errors
def main():
''' Single threaded worker to serve the job queue.
'''
args = parser.parse_args()
setup_logger(args.sns_arn, None, log_level=args.loglevel)
s3 = S3(None, None, args.bucket)
db_args = util.prepare_db_kwargs(args.database_url)
while True:
with db_connect(**db_args) as conn:
with db_cursor(conn) as db:
set = read_latest_set(db, args.owner, args.repository)
runs = read_completed_runs_to_date(db, set.id)
stats = dashboard_stats.make_stats(db)
render_index_maps(s3, runs)
dashboard_stats.upload_stats(s3, stats)
if args.hourly:
_L.info('Sleeping for one hour')
sleep(3600)
else:
return
if __name__ == '__main__':
exit(main())
|
the-stack_106_19679
|
from bitarray import bitarray
def show(a):
_ptr, size, _endian, _unused, alloc = a.buffer_info()
print('%d %d' % (size, alloc))
a = bitarray()
prev = -1
while len(a) < 2000:
alloc = a.buffer_info()[4]
if prev != alloc:
show(a)
prev = alloc
a.append(1)
for i in 800_000, 400_000, 399_992, 0, 0, 80_000:
if len(a) < i:
a.extend(bitarray(i - len(a)))
else:
del a[i:]
assert len(a) == i
show(a)
|
the-stack_106_19681
|
#!/usr/bin/env python
import asyncio
import logging
from typing import (
Optional
)
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
from hummingbot.core.data_type.user_stream_tracker import UserStreamTracker
from hummingbot.core.utils.async_utils import (
safe_ensure_future,
safe_gather,
)
from hummingbot.connector.derivative.leverj_perpetual.leverj_perpetual_api_order_book_data_source import LeverjPerpetualAPIOrderBookDataSource
from hummingbot.connector.derivative.leverj_perpetual.leverj_perpetual_user_stream_data_source import LeverjPerpetualUserStreamDataSource
from hummingbot.connector.derivative.leverj_perpetual.leverj_perpetual_auth import LeverjPerpetualAuth
class LeverjPerpetualUserStreamTracker(UserStreamTracker):
_krust_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._krust_logger is None:
cls._krust_logger = logging.getLogger(__name__)
return cls._krust_logger
def __init__(self,
orderbook_tracker_data_source: LeverjPerpetualAPIOrderBookDataSource,
leverj_auth: LeverjPerpetualAuth,
domain: str = "kovan"):
super().__init__()
self._ev_loop: asyncio.events.AbstractEventLoop = asyncio.get_event_loop()
self._data_source: Optional[UserStreamTrackerDataSource] = None
self._user_stream_tracking_task: Optional[asyncio.Task] = None
self._orderbook_tracker_data_source = orderbook_tracker_data_source
self._leverj_auth: LeverjPerpetualAuth = leverj_auth
self._domain = domain
@property
def data_source(self) -> UserStreamTrackerDataSource:
if not self._data_source:
self._data_source = LeverjPerpetualUserStreamDataSource(orderbook_tracker_data_source=self._orderbook_tracker_data_source,
leverj_auth=self._leverj_auth,
domain=self._domain)
return self._data_source
@property
def exchange_name(self) -> str:
return "leverj_perpetual"
async def start(self):
self._user_stream_tracking_task = safe_ensure_future(
self.data_source.listen_for_user_stream(self._ev_loop, self._user_stream)
)
await safe_gather(self._user_stream_tracking_task)
|
the-stack_106_19682
|
#!/usr/bin/env python3
import argparse, requests, json, pickle, os, sys, subprocess, time, random, http.client, httplib2, datetime
from moviepy.editor import VideoFileClip, concatenate_videoclips
from google_auth_oauthlib.flow import Flow, InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload
from googleapiclient.errors import HttpError
from google.auth.transport.requests import Request
TWITCH_CS_FILENAME = 'twitch_client_secret.json'
YOUTUBE_CS_FILENAME = 'yt_client_secret.json'
GAME_IDS_FILENAME = 'game_ids.json'
def run(args=None):
game = args.game
days_ago = args.days_ago
num_clips = args.num_clips
oauth = get_twitch_oauth()
game_id = get_game_id(game, oauth)
clips = []
slugs = []
if(num_clips is None):
clips, slugs = manual_get_clips(game_id, oauth, days_ago)
else:
clips, slugs = auto_get_clips(game_id, oauth, num_clips, days_ago)
videos = download_clips(clips)
timestamps = concatenate_clips(videos)
upload_video(game_id, timestamps, slugs)
delete_mp4s(videos)
def get_twitch_oauth():
global TWITCH_CS
TWITCH_CS = read_json(TWITCH_CS_FILENAME)
response = requests.post('https://id.twitch.tv/oauth2/token?', TWITCH_CS).text
print("Twitch OAuth received.")
return json.loads(response)["access_token"]
def read_json(filename):
try:
with open(filename, "r") as f:
return json.loads(f.read())
except json.decoder.JSONDecodeError:
write_json({}, filename)
return read_json(filename)
def write_json(json_dict, filename):
with open(filename, "wt") as f:
json.dump(json_dict, f)
def open_video(filename):
if sys.platform == "win32":
os.startfile(filename)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, filename])
def get_game_id(game, oauth):
game_ids = read_json(GAME_IDS_FILENAME)
if game.lower() in game_ids:
print("Game ID retrieved.")
return game_ids[game.lower()]
url = 'https://api.twitch.tv/helix/games?name=' + game.title()
headers = {"Authorization":"Bearer " + oauth, "Client-Id":TWITCH_CS["client_id"]}
response = json.loads(requests.get(url, headers=headers).text)
if response["data"] == []:
official_name = input("Could not find "+game+". What is the official game name on Twitch? ")
id = get_game_id(official_name, oauth)
game_ids = read_json(GAME_IDS_FILENAME)
game_ids[game.lower()] = id
write_json(game_ids, GAME_IDS_FILENAME)
else:
id = response["data"][0]["id"]
game_ids[game.lower()] = id
write_json(game_ids, GAME_IDS_FILENAME)
print("Game ID retrieved.")
return game_ids[game.lower()]
def manual_get_clips(game_id, oauth, days_ago, cursor=None):
# Get date and time from days_ago days ago
today = datetime.date.today()
week_ago = (today - datetime.timedelta(days=days_ago)).strftime("%Y-%m-%d")
start_date = week_ago + "T00:00:00.00Z"
# Request clips from Twitch
print("Requesting clips...")
url = 'https://api.twitch.tv/helix/clips?'
# Request double the desired num_clips to account for approximated max exclude rate of 50%
params = {"game_id":game_id, "first":"3", "started_at":start_date, "after":cursor}
headers = {"Authorization":"Bearer " + oauth, "Client-Id":TWITCH_CS["client_id"]}
response = json.loads(requests.get(url, params, headers=headers).text)
clips = []
slugs = []
temp_clips = []
vid_length = 0
for data in response["data"]:
# get download links
url = data["thumbnail_url"]
splice_index = url.index("-preview")
url = url[:splice_index] + ".mp4"
temp_clips.append(url)
video = download_clips(temp_clips)
open_video("0.mp4")
vfc = VideoFileClip("0.mp4")
clip_duration = vfc.duration
vfc.close()
print("Current length of video: "+str(datetime.timedelta(seconds=vid_length))+"; length of video with current clip included: "+str(datetime.timedelta(seconds=(vid_length+clip_duration))))
choice = input("Include this clip in the video? (y, yf, n, nf): ").lower()
while(choice != 'y' and choice != 'n' and choice != 'yf' and choice != 'nf'):
print("Invalid reponse")
choice = input("Include this clip in the video? (y, yf, n, nf): ").lower()
if('y' in choice):
vid_length += clip_duration
clips.append(url)
# get public clip links (i.e., slugs)
slug = data["url"]
slugs.append(slug)
if('f' in choice):
delete_mp4s(video)
print("Clips chosen.")
return clips, slugs
delete_mp4s(video)
temp_clips = []
# If we haven't finished ('f' in choice), make another request
cursor = response['pagination']['cursor']
new_clips, new_slugs = manual_get_clips(game_id, oauth, days_ago, cursor)
clips.extend(new_clips)
slugs.extend(new_slugs)
return clips, slugs
def auto_get_clips(game_id, oauth, num_clips, days_ago, cursor=None):
# Get date and time from days_ago days ago
today = datetime.date.today()
week_ago = (today - datetime.timedelta(days=days_ago)).strftime("%Y-%m-%d")
start_date = week_ago + "T00:00:00.00Z"
# Request clips from Twitch
print("Requesting clips...")
url = 'https://api.twitch.tv/helix/clips?'
params = {"game_id":game_id, "first":num_clips, "started_at":start_date, "after":cursor}
headers = {"Authorization":"Bearer " + oauth, "Client-Id":TWITCH_CS["client_id"]}
response = json.loads(requests.get(url, params, headers=headers).text)
clips = []
slugs = []
for data in response["data"]:
# get download links
url = data["thumbnail_url"]
splice_index = url.index("-preview")
clips.append(url[:splice_index] + ".mp4")
# get public clip links (i.e., slugs)
url = data["url"]
slugs.append(url)
# If response does not include all clips, request until all clips are returned
if len(clips) < int(num_clips):
cursor = response['pagination']['cursor']
new_clips, new_slugs = auto_get_clips(game_id, oauth, str(int(num_clips)-len(clips)), days_ago, cursor)
clips.extend(new_clips)
slugs.extend(new_slugs)
print("Clips and slugs received.")
return clips, slugs
def download_clips(clips):
print("Downloading clips...")
videos = []
for i in range(len(clips)):
r = requests.get(clips[i], stream=True)
name = str(i) + ".mp4"
with open(name, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
videos.append(name)
print("Clips downloaded.")
return videos
def delete_mp4s(videos):
for video in videos:
os.remove(video)
if os.path.exists('final.mp4'):
os.remove('final.mp4')
print("Videos deleted.")
def concatenate_clips(videos):
vfcs = []
timestamps = [0]
for video in videos:
vfc = VideoFileClip(video, target_resolution=(1080, 1920))
vfcs.append(vfc)
# No need for last clip's duration
if video is not videos[-1]:
# Add most recent timestamp to current clip's duration for next timestamp
timestamps.append(timestamps[-1] + vfc.duration)
final_clip = concatenate_videoclips(vfcs)
final_clip.write_videofile("final.mp4", temp_audiofile="temp-audio.m4a", remove_temp=True, audio_codec="aac")
print("Final video created.")
# Apparently these need to be closed like a file
for vfc in vfcs:
vfc.close()
return timestamps
def create_service(client_secret_file, api_name, api_version, *scopes):
CLIENT_SECRET_FILE = client_secret_file
API_SERVICE_NAME = api_name
API_VERSION = api_version
SCOPES = [scope for scope in scopes[0]]
cred = None
pickle_file = f'token_{API_SERVICE_NAME}_{API_VERSION}.pickle'
if os.path.exists(pickle_file):
with open(pickle_file, 'rb') as token:
cred = pickle.load(token)
if not cred or not cred.valid:
if cred and cred.expired and cred.refresh_token:
cred.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRET_FILE, SCOPES)
cred = flow.run_local_server()
with open(pickle_file, 'wb') as token:
pickle.dump(cred, token)
try:
service = build(API_SERVICE_NAME, API_VERSION, credentials=cred)
print(API_SERVICE_NAME.title(), 'service created successfully')
return service
except Exception as e:
print('Unable to connect.')
print(e)
return None
def generate_title(playlist_title, video_count):
return playlist_title + " #" + str(video_count+1) + " - Funny Moments, Fails, and Highlights"
def generate_description(timestamps, slugs):
description = "Join our Discord to submit clips! https://discord.gg/Th55ADV \n\n"
for i in range(len(timestamps)):
timestamp = str(datetime.timedelta(seconds=timestamps[i]))
description += timestamp + " - " + slugs[i] + "\n"
return description
def generate_tags(game_id):
tags = read_json("tags.json")
return tags[game_id]
def upload_video(game_id, timestamps, slugs):
API_NAME = 'youtube'
API_VERSION = 'v3'
SCOPES = ['https://www.googleapis.com/auth/youtube']
service = create_service(YOUTUBE_CS_FILENAME, API_NAME, API_VERSION, SCOPES)
# Get playlist ID, title, and video count
playlist_id, playlist_title, video_count = get_playlist(game_id, service)
upload_request_body = {
'snippet': {
'categoryId': 20,
'title': generate_title(playlist_title, video_count),
'description': generate_description(timestamps, slugs),
'tags': generate_tags(game_id)
},
'status': {
'privacyStatus': 'private',
'selfDeclaredMadeForKids': False
}
}
mediaFile = MediaFileUpload('final.mp4', chunksize=-1, resumable=True)
video_insert_request = service.videos().insert(
part="snippet,status",
body=upload_request_body,
media_body=mediaFile
)
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, http.client.NotConnected,
http.client.IncompleteRead, http.client.ImproperConnectionState,
http.client.CannotSendRequest, http.client.CannotSendHeader,
http.client.ResponseNotReady, http.client.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# Upload the video... finally.
response = None
error = None
retry = 0
video_id = ''
while response is None:
try:
print("Uploading video...")
status, response = video_insert_request.next_chunk()
if response is not None:
if 'id' in response:
video_id = response['id']
print("Video id '%s' was successfully uploaded." % response['id'])
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = "A retriable error occurred: %s" % e
if error is not None:
print(error)
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print("Sleeping %f seconds and then retrying..." % sleep_seconds)
time.sleep(sleep_seconds)
# Wait for YouTube to process the upload
time.sleep(60)
# Insert video into playlist and update local playlist info
insert_to_playlist(service, game_id, playlist_id, video_id)
def get_playlist(game_id, service, pToken=None, playlist=None):
# Check if playlist_id exists for game_id
playlist_ids = read_json("playlist_ids.json")
if game_id in playlist_ids: return playlist_ids[game_id]
# If not, get list of playlists on channel
playlist_list_request = service.playlists().list(
part="snippet,id,contentDetails",
mine=True,
pageToken=pToken
)
playlist_list_response = playlist_list_request.execute()
# Ask user for name of playlist
if playlist is None:
playlist = input("Game not yet attributed to a playlist. What is the full name of the playlist? ")
# Find the playlist that our video belongs in
playlist_id = '0'
video_count = 0
if playlist_list_response is None:
exit("The playlist request failed with an unexpected response: %s" % response)
for item in playlist_list_response['items']:
playlist_title = item['snippet']['title']
if playlist.lower() in playlist_title.lower():
playlist_id = item['id']
count = item['contentDetails']['itemCount']
playlist_ids[game_id] = playlist_id, playlist_title, video_count
write_json(playlist_ids, "playlist_ids.json")
return playlist_id, playlist_title, video_count
if playlist_id == '0':
if 'nextPageToken' in playlist_list_response:
nextPageToken = playlist_list_response['nextPageToken']
return get_playlist(game_id, service, nextPageToken, playlist)
else:
exit("No playlist for the name given exists.")
def insert_to_playlist(service, game_id, playlist_id, video_id):
# Insert video into playlist
playlist_insert_request = service.playlistItems().insert(
part="snippet",
body={
"snippet": {
"playlistId": playlist_id,
"resourceId": {
"kind": "youtube#video",
"videoId": video_id
}
}
}
)
try:
playlist_insert_response = playlist_insert_request.execute()
except HttpError:
print("Video added to playlist.")
pass
# Increment local playlist video count
playlist_ids = read_json("playlist_ids.json")
playlist_ids[game_id][2] += 1
write_json(playlist_ids, "playlist_ids.json")
def main():
parser=argparse.ArgumentParser(description="Download, concatenate, and upload Twitch clips")
parser.add_argument("-g",help="Game name",dest="game",type=str,required=True)
parser.add_argument("-n",help="Number of clips to download",dest="num_clips",type=str,default=None)
parser.add_argument("-d",help="Number of days ago that clips started",dest="days_ago",type=int,default=7)
parser.set_defaults(func=run)
args=parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
the-stack_106_19684
|
#!/usr/bin/env python3
#####################################################################
# This script presents how to read and use the sound buffer.
# This script stores a "basic_sounds.wav" file of recorded audio.
# Note: This requires scipy library
#####################################################################
import vizdoom as vzd
from random import choice
import numpy as np
from scipy.io import wavfile
from time import sleep
if __name__ == "__main__":
game = vzd.DoomGame()
# Load config of the basic scenario
game.load_config('../../scenarios/basic.cfg')
# Turns on the audio buffer. (turned off by default)
# If this is switched on, the audio will stop playing on device, even with game.set_sound_enabled(True)
# Setting game.set_sound_enabled(True) is not required for audio buffer to work.
AUDIO_BUFFER_ENABLED = True
game.set_audio_buffer_enabled(AUDIO_BUFFER_ENABLED)
# Set the sampling rate used in the observation window. Has to be one from:
# - vzd.SamplingRate.SR_44100 (default)
# - vzd.SamplingRate.SR_22050
# - vzd.SamplingRate.SR_11025
# Remember to also set audio saving code at the bottom to use same sampling rate!
game.set_audio_sampling_rate(vzd.SamplingRate.SR_22050)
# When using frameskip (`tics` parameter of the `make_actions` function),
# we would only get the latest "frame" of audio (1/35 seconds).
# With this function you can set how many last "frames" of audio will be stored in audio buffer.
# Note that if you use larger frameskip than size of audio buffer you will lost some information about the audio.
# If you use frameskip smaller than size of audio buffer, some audio information will overlap.
frameskip = 4
game.set_audio_buffer_size(frameskip)
# This could fix "no audio in buffer" bug on Ubuntu 20.04.
#game.add_game_args("+snd_efx 0")
# Initialize the game. Further configuration won't take any effect from now on.
try:
game.init()
except Exception as e:
print(
"[ERROR] Could not launch ViZDoom. If you see an error above about BiquadFilter and gain,\n"
" try setting game.add_game_args('+snd_efx 0'). If that fails, see\n"
" https://github.com/mwydmuch/ViZDoom/pull/486"
)
exit(1)
actions = [[True, False, False], [False, True, False], [False, False, True]]
sleep_time = 1.0 / vzd.DEFAULT_TICRATE # = 0.028
episodes = 3
audio_slices = []
for i in range(episodes):
print("Episode #" + str(i + 1))
game.new_episode()
while not game.is_episode_finished():
# Gets the state
state = game.get_state()
audio_buffer = state.audio_buffer
audio_slices.append(audio_buffer)
# Makes a random action and get remember reward.
r = game.make_action(choice(actions), frameskip)
if not AUDIO_BUFFER_ENABLED:
sleep(sleep_time * frameskip)
game.close()
if AUDIO_BUFFER_ENABLED:
# Check that we have audio (having no audio is a common bug, see
# https://github.com/mwydmuch/ViZDoom/pull/486
audio_data = np.concatenate(audio_slices, axis=0)
if audio_data.max() == 0:
print(
"[WARNING] Audio buffers were full of silence. This is a common bug on e.g. Ubuntu 20.04\n"
" See https://github.com/mwydmuch/ViZDoom/pull/486\n"
" Two possible fixes:\n"
" 1) Try setting game.add_game_args('+snd_efx 0'). This my disable some audio effects\n"
" 2) Try installing a newer version of OpenAL Soft library, see https://github.com/mwydmuch/ViZDoom/pull/486#issuecomment-889389185"
)
# Save audio file
wavfile.write("basic_sounds.wav", 22050, np.concatenate(audio_slices, axis=0))
|
the-stack_106_19685
|
def kafkaSendFiles(
directories, files, basicmetadata, df_metadata, kafkaProducer, kafkaTopic
):
import hashlib
from PIL import Image
import os
from kafka import KafkaProducer
import requests
import json
blobstorage_dir = directories["blobstorage_dir"]
thumbnail_dir = directories["thumbnail_dir"]
ingest_dir = directories["ingest_dir"]
metadataYoloServer=directories["METADATA_YOLO"]
metadataClipServer=directories["METADATA_CLIP"]
j = -1
for filename in files:
j = j + 1
filenameOS = filename.replace("\\", "/")
with open(filenameOS, "rb") as f:
file_hash = hashlib.md5()
chunk = f.read(2 ** 13)
while chunk:
file_hash.update(chunk)
chunk = f.read(2 ** 13)
filenameHash = file_hash.hexdigest()
f.close()
files={'file': open(filenameOS, 'rb')}
yolo_response = requests.post(metadataYoloServer, files=files).json()
files={'file': open(filenameOS, 'rb')}
clip_response = requests.post(metadataClipServer, files=files).json()
image = Image.open(filenameOS).convert("RGB")
width, height = image.size
max_w = 400
factor = max_w / width
newsize = (int(width * factor), int(height * factor))
# Write to Blob Storage
print("Save image into Blob Storage:" + filenameHash)
image.save(blobstorage_dir + "/" + filenameHash + ".png")
imageThumb = image.resize(newsize)
imageThumb.save(thumbnail_dir + "/" + filenameHash + ".jpg")
relPath = os.path.relpath(filenameOS, ingest_dir)
relPath = relPath.replace("\\", "/")
(basedir, name) = os.path.split(relPath)
newEntry = {
"yolov5": json.loads(yolo_response),
"clip": json.loads( json.dumps(clip_response), parse_float=lambda x: round(float(x), 4) ),
"datasetprovider": basicmetadata["datasetprovider"],
"datasetproviderURL": basicmetadata["datasetproviderURL"],
"datasetname": basicmetadata["datasetname"],
"datasetcontainer": basicmetadata["datasetcontainer"],
"imageFilename": name,
"imageRelativePath": relPath,
"imageHeight": height,
"imageWidth": width,
"filenameHash": filenameHash,
}
if "lat" in df_metadata:
newEntry["lat"] = df_metadata["lat"][j]
if "lon" in df_metadata:
newEntry["lon"] = df_metadata["lon"][j]
if "alt" in df_metadata:
newEntry["alt"] = df_metadata["alt"][j]
if "vf" in df_metadata:
newEntry["velocity_lon"] = df_metadata["vf"][j]
if "vl" in df_metadata:
newEntry["velocity_lat"] = df_metadata["vl"][j]
if "vu" in df_metadata:
newEntry["velocity_alt"] = df_metadata["vu"][j]
if "ax" in df_metadata:
newEntry["acceleration_lon"] = df_metadata["ax"][j]
if "ay" in df_metadata:
newEntry["acceleration_lat"] = df_metadata["ay"][j]
if "az" in df_metadata:
newEntry["acceleration_alt"] = df_metadata["az"][j]
if "ts" in df_metadata:
newEntry["timestamp"] = df_metadata["ts"][j]
print("Sending ", relPath)
# kafkaTopic='topic_test'
kafkaProducer.send(kafkaTopic, value=newEntry)
|
the-stack_106_19686
|
from functools import partial
from crispy_forms.helper import FormHelper
from django import forms
from .models import (
Distribution,
Individual,
Household,
)
from workflow.models import (
Office,
Program,
SiteProfile,
)
class DatePicker(forms.DateInput):
"""
Use in form to create a Jquery datepicker element
"""
template_name = 'datepicker.html'
DateInput = partial(forms.DateInput, {'class': 'datepicker'})
class DistributionForm(forms.ModelForm):
start_date = forms.DateField(widget=DatePicker.DateInput(), required=False)
end_date = forms.DateField(widget=DatePicker.DateInput(), required=False)
form_filled_date = forms.DateField(widget=DatePicker.DateInput(), required=False)
form_verified_date = forms.DateField(widget=DatePicker.DateInput(), required=False)
class Meta:
model = Distribution
exclude = ['create_date', 'edit_date']
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.request = kwargs.pop('request')
self.organization = kwargs.pop('organization')
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
super(DistributionForm, self).__init__(*args, **kwargs)
self.fields['program'].queryset = Program.objects.filter(
organization=self.request.user.activity_user.organization)
self.fields['office'].queryset = Office.objects.filter(
organization=self.request.user.activity_user.organization)
self.fields['name'].label = '{} name'.format(self.organization.distribution_label)
self.fields['implementer'].label = '{} implementer'.format(self.organization.distribution_label)
class IndividualForm(forms.ModelForm):
class Meta:
model = Individual
exclude = ('created_by', 'modified_by', 'label')
date_of_birth = forms.DateTimeField(widget=DatePicker.DateInput(), required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.organization = kwargs.pop('organization')
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = True
super(IndividualForm, self).__init__(*args, **kwargs)
organization = self.request.user.activity_user.organization
self.fields['program'].queryset = Program.objects.filter(
organization=organization)
self.fields['site'].queryset = SiteProfile.objects.filter(
organizations__id__contains=self.request.user.activity_user.organization.id)
class HouseholdForm(forms.ModelForm):
class Meta:
model = Household
fields = '__all__'
exclude = ['create_date', 'edit_date', 'created_by', 'label', 'organization', 'modified_by']
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.organization = kwargs.pop('organization')
self.request = kwargs.pop('request')
self.helper.form_method = 'post'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
super(HouseholdForm, self).__init__(*args, **kwargs)
organization = self.request.user.activity_user.organization
self.fields['program'].queryset = Program.objects.filter(
organization=organization)
self.fields['name'].label = '{} name'.format(self.organization.household_label)
|
the-stack_106_19687
|
#!c:\users\yogeshwar\anaconda3\python.exe
from http.server import HTTPServer, BaseHTTPRequestHandler
import cgi
import logging
import pandas as pd
import json
from src.csv_to_db_package.csv_to_db import csv_to_db_func
from src.csv_to_db_package.crud_operations_db import view_db_data, delete_db_row, insert_db_row,\
update_db_row, select_db_row, upload_to_s3
from mysql.connector import connect, errors
import boto3
# from src.csv_to_db_package.csv_to_db import csv_to_db_func
api_endpoint = "https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data"
logging.basicConfig(filename='server_info.log', level=logging.INFO,
format='%(asctime)s:%(levelname)s:%(message)s')
s3 = boto3.client('s3')
def put_object_to_s3(filename):
with open(filename, 'rb') as file:
result = s3.put_object(Bucket='yogesh-lambda-bucket', Key=filename, Body=file.read())
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
if self.path.endswith('/uploadCSV'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<h1>Welcome to csv upload!!</h1>'
output += '<h3><a href="/uploadCSV/new">Add new file</a></h3>'
output += '</body></html>'
self.wfile.write(output.encode())
if self.path.endswith('/new'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<h2>Add new file</h2>'
output += '<form method="POST" enctype="multipart/form-data" action="/uploadCSV/new">'
output += '<input name="task" type="file" placeholder="Add new file">'
output += '<input type="submit" value="Upload">'
output += '</form>'
output += '</body></html>'
self.wfile.write(output.encode())
if self.path.endswith('/viewtable'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<h2>File uploaded Successfully</h2>'
output += '<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>'
output += """<script>
function my_button_click_handler()
{
var rowId = event.target.parentNode.parentNode.id;
var data = document.getElementById(rowId).querySelectorAll(".row-data");
var objectID = data[0].innerHTML;
alert('Button Clicked with row_id =' + rowId + ' objectID =as' + objectID);
$.ajax(
{
type:'POST',
contentType:'application/json;charset-utf-08',
dataType:'json',
url:'http://localhost:8000/delete_data?value='+objectID,
}
);
}
function my_update_data()
{
var rowId = event.target.parentNode.parentNode.id;
var data = document.getElementById(rowId).querySelectorAll(".row-data");
var objectID = data[0].innerHTML;
alert('Button Clicked with row_id =' + rowId + ' objectID =as' + objectID);
window.location.href="/update_data/objectID="+objectID
}
</script>"""
output += """<script>
function redirect_to_create()
{
window.location.href="/add"
}
</script>"""
output += """<script>
function redirect_to_S3()
{
window.location.href="/to_S3"
}
</script>"""
output += view_db_data("https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data")
output += '</body></html>'
self.wfile.write(output.encode())
if self.path.endswith('/to_S3'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<form method="POST" enctype="multipart/form-data" action="/to_S3">'
output += '<h2>Click to store data in S3</h2>'
output += '<input type="submit" value="Upload to S3">'
output += '</form>'
output += '</body></html>'
self.wfile.write(output.encode())
if self.path.endswith('/add'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<form method="POST" enctype="multipart/form-data" action="/add">'
output += 'objectID: <input name="objectID" type="text"><br><br>'
output += 'isHighlight: <input name="isHighlight" type="text"><br><br>'
output += 'accessionNumber: <input name="accessionNumber" type="text"><br><br>'
output += 'accessionYear: <input name="accessionYear" type="text"><br><br>'
output += 'isPublicDomain: <input name="isPublicDomain" type="text"><br><br>'
output += 'primaryImage: <input name="primaryImage" type="text"><br><br>'
output += 'primaryImageSmall: <input name="primaryImageSmall" type="text"><br><br>'
output += 'additionalImages: <input name="additionalImages" type="text"><br><br>'
output += 'department: <input name="department" type="text"><br><br>'
output += 'objectName: <input name="objectName" type="text"><br><br>'
output += 'title: <input name="title" type="text"><br><br>'
output += 'culture: <input name="culture" type="text"><br><br>'
output += 'period: <input name="period" type="text"><br><br>'
output += 'dynasty: <input name="dynasty" type="text"><br><br>'
output += 'reign: <input name="reign" type="text"><br><br>'
output += 'portfolio: <input name="portfolio" type="text"><br><br>'
output += 'artistRole: <input name="artistRole" type="text"><br><br>'
output += 'artistPrefix: <input name="artistPrefix" type="text"><br><br>'
output += 'artistDisplayName: <input name="artistDisplayName" type="text"><br><br>'
output += 'artistDisplayBio: <input name="artistDisplayBio" type="text"><br><br>'
output += 'artistSuffix: <input name="artistSuffix" type="text"><br><br>'
output += 'artistAlphaSort: <input name="artistAlphaSort" type="text"><br><br>'
output += 'artistNationality: <input name="artistNationality" type="text"><br><br>'
output += 'artistBeginDate: <input name="artistBeginDate" type="text"><br><br>'
output += 'artistEndDate: <input name="artistEndDate" type="text"><br><br>'
output += 'artistGender: <input name="artistGender" type="text"><br><br>'
output += 'artistWikidata_URL: <input name="artistWikidata_URL" type="text"><br><br>'
output += 'artistULAN_URL: <input name="artistULAN_URL" type="text"><br><br>'
output += 'objectDate: <input name="objectDate" type="text"><br><br>'
output += 'objectBeginDate: <input name="objectBeginDate" type="text"><br><br>'
output += 'objectEndDate: <input name="objectEndDate" type="text"><br><br>'
output += 'medium: <input name="medium" type="text"><br><br>'
output += 'dimensions: <input name="dimensions" type="text"><br><br>'
output += 'measurements: <input name="measurements" type="text"><br><br>'
output += 'creditLine: <input name="creditLine" type="text"><br><br>'
output += 'geographyType: <input name="geographyType" type="text"><br><br>'
output += 'city: <input name="city" type="text"><br><br>'
output += 'state: <input name="state" type="text"><br><br>'
output += 'county: <input name="county" type="text"><br><br>'
output += 'country: <input name="country" type="text"><br><br>'
output += 'region: <input name="region" type="text"><br><br>'
output += 'subregion: <input name="subregion" type="text"><br><br>'
output += 'locale: <input name="locale" type="text"><br><br>'
output += 'locus: <input name="locus" type="text"><br><br>'
output += 'excavation: <input name="excavation" type="text"><br><br>'
output += 'river: <input name="river" type="text"><br><br>'
output += 'classification: <input name="classification" type="text"><br><br>'
output += 'rightsAndReproduction: <input name="rightsAndReproduction" type="text"><br><br>'
output += 'linkResource: <input name="linkResource" type="text"><br><br>'
output += 'metadataDate: <input name="metadataDate" type="text"><br><br>'
output += 'repository: <input name="repository" type="text"><br><br>'
output += 'objectURL: <input name="objectURL" type="text"><br><br>'
output += 'tags: <input name="tags" type="text"><br><br>'
output += 'objectWikidata_URL: <input name="objectWikidata_URL" type="text"><br><br>'
output += 'isTimelineWork: <input name="isTimelineWork" type="text"><br><br>'
output += 'GalleryNumber: <input name="GalleryNumber" type="text"><br><br>'
output += 'constituentID: <input name="constituentID" type="text"><br><br>'
output += 'role: <input name="role" type="text"><br><br>'
output += 'name: <input name="name" type="text"><br><br>'
output += 'constituentULAN_URL: <input name="constituentULAN_URL" type="text"><br><br>'
output += 'constituentWikidata_URL: <input name="constituentWikidata_URL" type="text"><br><br>'
output += 'gender: <input name="gender" type="text"><br><br>'
output += '<input type="submit" value="Add">'
output += '</form>'
output += '</body></html>'
self.wfile.write(output.encode())
if self.path.startswith('/update_data'):
value = str(self.path[22:])[3:]
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><body>'
output += '<form method="POST" enctype="multipart/form-data" action="/update_data">'
output += select_db_row("https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data", int(value))
output += """<script>
function redirect_to_viewtable()
{
window.location.href="/viewtable"
}
</script>"""
output += '</form>'
output += '</body></html>'
self.wfile.write(output.encode())
except PermissionError as per_err:
logging.error('%s: %s', per_err.__class__.__name__, per_err)
except TypeError as type_err:
logging.error('%s: %s', type_err.__class__.__name__, type_err)
except Exception as err:
logging.error('%s: %s', err.__class__.__name__, err)
def do_POST(self):
try:
if self.path.endswith('/new'):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
content_len = int(self.headers.get('Content-length'))
pdict['CONTENT-LENGTH'] = content_len
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
file = fields.get('task')[0]
file = file.decode("cp1252")
with open('file.csv', mode='w', encoding='utf-8') as f:
for data in file.split('\r\r'):
f.write(data)
df = pd.read_csv('file.csv')
df.to_json('jsondata.json', orient='records')
put_object_to_s3('jsondata.json')
# csv_to_db_func('file.csv')
# upload_to_s3('file.csv')
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
self.wfile.write(file.encode())
if self.path.startswith('/delete_data'):
value_id = self.path[22:]
delete_db_row("https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data", value_id)
# with open("jsondata.json", "r") as jsonFile:
# data = json.load(jsonFile)
# data = [i for i in data if not (i['objectID'] == int(value_id))]
# with open("jsondata.json", "w") as jsonFile:
# json.dump(data, jsonFile)
# put_object_to_s3('jsondata.json')
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
if self.path.startswith('/to_S3'):
upload_to_s3()
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
if self.path.endswith('/add'):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
saviour = pdict['boundary']
content_len = int(self.headers.get('Content-length'))
pdict['CONTENT-LENGTH'] = content_len
if ctype == 'multipart/form-data':
guess = self.rfile
fields = cgi.parse_multipart(self.rfile, pdict)
i=0
for key in fields:
if fields[key][0] == '':
i = i+1
if i != 0:
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ''
output += '<html><head><meta charset="utf-8"></head><body>'
output += '<h2>All fields must be filled</h2>'
output += '<h3><a href="/viewtable">Back to viewtable</a></h3>'
output += '</body></html>'
self.wfile.write(output.encode())
else:
for key in fields:
fields[key] = fields[key][0]
print(fields)
insert_db_row("https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data", fields)
# with open('jsondata.json') as json_file:
# obj_list = json.load(json_file)
# obj_list.append(fields)
# with open('jsondata.json', 'w') as json_file:
# json.dump(obj_list, json_file,
# indent=4,
# separators=(',', ': '))
# put_object_to_s3('jsondata.json')
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
if self.path.startswith('/update_data'):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
content_len = int(self.headers.get('Content-length'))
pdict['CONTENT-LENGTH'] = content_len
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
# conn = connect(host='localhost',
# database="csvfile_upload",
# user='root',
# password='yogesh1304')
#
# if conn.is_connected():
# cursor = conn.cursor()
# query = "SELECT objectId From csvfile_upload.csvfile_data"
# cursor.execute(query)
# primary_keys = cursor.fetchall()
# primary_key = fields['objectId']
# list1 = []
# for i in range(len(primary_keys)):
# list1.append(i)
# list2 = []
# for i in range(len(list1)):
# list2.append(str(primary_keys[i][0]))
# if primary_key[0] in list2:
for key in fields:
fields[key] = fields[key][0]
update_db_row("https://w101gqjv56.execute-api.ap-south-1.amazonaws.com/test/s3-json-data", fields, int(fields['objectID']))
# with open("jsondata.json", "r") as jsonFile:
# data = json.load(jsonFile)
# for i in data:
# if int(fields['objectId']) == i['objectID']:
# i.update(fields)
# with open("jsondata.json", "w") as jsonFile:
# json.dump(data, jsonFile)
# put_object_to_s3('jsondata.json')
# else:
# self.send_response(200)
# self.send_header('content-type', 'text/html')
# self.end_headers()
#
# output = ''
# output += '<html><head><meta charset="utf-8"></head><body>'
# output += '<h2>You cannot update primary key</h2>'
# output += '<h3><a href="/viewtable">Back to viewtable</a></h3>'
# output += '</body></html>'
#
# self.wfile.write(output.encode())
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
except PermissionError as per_err:
logging.error('%s: %s', per_err.__class__.__name__, per_err)
except TypeError as type_err:
logging.error('%s: %s', type_err.__class__.__name__, type_err)
except Exception as err:
logging.error('%s: %s', err.__class__.__name__, err)
def main():
port = 8000
server = HTTPServer(('', port), RequestHandler)
print("Server started on localhost: ", port)
server.serve_forever()
if __name__ == "__main__":
main()
|
the-stack_106_19688
|
import json
import os
from typing import List, Mapping, Tuple, Union
import numpy as np
from skimage.io import imread
from slicedimage import ImageFormat
from starfish.experiment.builder import FetchedTile, TileFetcher, write_experiment_json
from starfish.types import Axes, Coordinates, Features, Number
from starfish.util import click
class ImagingMassCytometryTile(FetchedTile):
def __init__(self, file_path: str) -> None:
"""Initialize a TileFetcher for Imaging Mass Cytometry Data"""
self.file_path = file_path
self._tile_data = imread(self.file_path)
@property
def shape(self) -> Mapping[Axes, int]:
return {Axes.Y: self._tile_data.shape[0], Axes.X: self._tile_data.shape[1]}
@property
def coordinates(self) -> Mapping[Union[str, Coordinates], Union[Number, Tuple[Number, Number]]]:
# TODO ambrosejcarr: ask about what these coordinates should correspond to.
return {
Coordinates.X: (0.0, 0.0001),
Coordinates.Y: (0.0, 0.0001),
Coordinates.Z: (0.0, 0.0001),
}
def tile_data(self) -> np.ndarray:
return self._tile_data
class ImagingMassCytometryTileFetcher(TileFetcher):
def __init__(self, input_dir: str) -> None:
"""Implement a TileFetcher for an Imaging Mass Cytometry Experiment.
This Tile Fetcher constructs spaceTx format from IMC experiments with a specific directory
structure:
input_dir
└── <Fov_name>
└── <Fov_name>
├── <target_name1>.tiff
├── ...
└── <target_nameN>.tiff
Notes
-----
- In Imaging Mass Cytometry, each channel specifies a unique target, so channel == target
- Imaging Mass Cytometry experiments have only one imaging round, round is hard coded as 1
- The spatial organization of the fields of view are not known to the starfish developers,
so they are filled by dummy coordinates
"""
self.input_dir = input_dir
@property
def _ch_dict(self) -> Mapping[int, str]:
channels = [
"CD44(Gd160Di)",
"CD68(Nd146Di)",
"CarbonicAnhydraseIX(Er166Di)",
"Creb(La139Di)",
"Cytokeratin7(Dy164Di)",
"Cytokeratin8-18(Yb174Di)",
"E-cadherin(Er167Di)",
"EpCAM(Dy161Di)",
"Fibronectin(Dy163Di)",
"GATA3(Pr141Di)",
"Her2(Eu151Di)",
"HistoneH3(Yb176Di)",
"Ki-67(Er168Di)",
"PRAB(Gd158Di)",
"S6(Er170Di)",
"SMA(Nd148Di)",
"Twist(Nd145Di)",
"Vimentin(Dy162Di)",
"b-catenin(Ho165Di)",
]
mapping = dict(enumerate(channels))
return mapping
def ch_dict(self, ch: int) -> str:
return self._ch_dict[ch]
@property
def _fov_map(self) -> Mapping[int, str]:
fov_names: List[str] = [
d for d in os.listdir(self.input_dir) if os.path.isdir(os.path.join(self.input_dir, d))
]
mapping = dict(enumerate(fov_names))
return mapping
def fov_map(self, fov: int) -> str:
return self._fov_map[fov]
def get_tile(self, fov: int, r: int, ch: int, z: int) -> FetchedTile:
fov_name = self.fov_map(fov)
basename = f'{self.ch_dict(ch)}.tiff'
file_path = os.path.join(self.input_dir, fov_name, fov_name, basename)
return ImagingMassCytometryTile(file_path)
def generate_codebook(self) -> Mapping:
mappings = []
for idx, target in self._ch_dict.items():
mappings.append({
Features.CODEWORD: [{
Axes.ROUND.value: 0, Axes.CH.value: idx, Features.CODE_VALUE: 1
}],
Features.TARGET: target
})
return {
"version": "0.0.0",
"mappings": mappings
}
@click.command()
@click.option("--input_dir", type=str, help="input directory containing images")
@click.option("--output_dir", type=str, help="output directory for formatted data")
def cli(input_dir, output_dir):
"""CLI entrypoint for spaceTx format construction for Imaging Mass Cytometry
Raw data (input for this tool) for this experiment can be found at:
s3://spacetx.starfish.data.public/browse/raw/20181015/imaging_cytof/\
BodenmillerBreastCancerSamples/
Processed data (output of this tool) can be found at:
s3://spacetx.starfish.data.public/browse/formatted/20181023/imaging_cytof/\
BodenmillerBreastCancerSamples/
"""
os.makedirs(output_dir, exist_ok=True)
primary_tile_fetcher = ImagingMassCytometryTileFetcher(os.path.expanduser(input_dir))
primary_image_dimensions = {
Axes.ROUND: 1,
Axes.CH: len(primary_tile_fetcher._ch_dict),
Axes.ZPLANE: 1
}
def postprocess_func(experiment_json_doc):
experiment_json_doc["codebook"] = "codebook.json"
return experiment_json_doc
with open(os.path.join(output_dir, "codebook.json"), 'w') as f:
codebook = primary_tile_fetcher.generate_codebook()
json.dump(codebook, f)
write_experiment_json(
path=output_dir,
fov_count=len(primary_tile_fetcher._fov_map),
tile_format=ImageFormat.TIFF,
primary_image_dimensions=primary_image_dimensions,
aux_name_to_dimensions={},
primary_tile_fetcher=primary_tile_fetcher,
postprocess_func=postprocess_func,
)
if __name__ == "__main__":
cli()
|
the-stack_106_19690
|
from datetime import datetime
import mock
from farmos_ext import Farm
from farmos_ext.farmobj import FarmObj
@mock.patch("farmos_ext.Farm")
def test_farmobj_empty(mock_farm):
obj = FarmObj(mock_farm, {})
assert not obj.name
assert obj.farm == mock_farm
@mock.patch("farmos_ext.Farm")
def test_farmobj_not_empty(mock_farm):
obj = FarmObj(mock_farm, {
"name": "test",
})
assert obj.name == 'test'
assert obj.farm == mock_farm
|
the-stack_106_19693
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of dependency tracker
for contrail config daemons
"""
from collections import OrderedDict
# This class tracks dependencies among different objects based on a reaction map.
# Objects could be derived from DBBase. Each object has an object_type and the
# mapping from object_type to the class is specified using object_class_map
class DependencyTracker(object):
def __init__(self, object_class_map, reaction_map):
self._reaction_map = reaction_map
self._object_class_map = object_class_map
self.resources = OrderedDict()
# end __init__
def _add_resource(self, obj_type, obj_key):
if obj_type in self.resources:
if obj_key in self.resources[obj_type]:
# already visited
return False
self.resources[obj_type].append(obj_key)
else:
self.resources[obj_type] = [obj_key]
return True
# end _add_resource
def evaluate(self, obj_type, obj, from_type='self'):
if obj_type not in self._reaction_map:
return
if not self._add_resource(obj_type, obj.get_key()):
return
for ref_type in self._reaction_map[obj_type][from_type]:
ref = getattr(obj, ref_type, None)
if ref is None:
refs = getattr(obj, ref_type+'s', [])
else:
refs = [ref]
ref_class = self._object_class_map[ref_type]
for ref in refs:
ref_obj = ref_class.get(ref)
if ref_obj is None:
return
self.evaluate(ref_type, ref_obj, obj_type)
# end evaluate
# end DependencyTracker
|
the-stack_106_19696
|
def visualize_el_preds(data_and_predictions, output_fp='visualization.html'):
f = open(output_fp, 'w+')
for data in data_and_predictions:
inst_type = data['type']
ctx_left = data['context_left']
mention = data['mention']
ctx_right = data['context_right']
# Input
f.write(f'<span style="color:red">[{inst_type}]</span> {ctx_left} <b>{mention}</b> {ctx_right}</br></br>\n')
# Predictions
for p in data['top_entities']:
eid, e_title, e_url, e_text = p['id'], p['title'], p['url'], p['text']
f.write(f'[<a href="{e_url}">{eid}</a> <i>{e_title}</i>] ')
f.write(f'{e_text[:200]} ...')
f.write('</br></br>\n')
# Separators
f.write('</br><hr>\n')
f.close()
print(f'Generated a visualization file {output_fp}')
|
the-stack_106_19698
|
import inspect
import os
from unittest.mock import Mock
import pytest
from _pytest.monkeypatch import MonkeyPatch
import hypercorn.__main__
from hypercorn.config import Config
def test_load_config_none() -> None:
assert isinstance(hypercorn.__main__._load_config(None), Config)
def test_load_config_pyfile(monkeypatch: MonkeyPatch) -> None:
mock_config = Mock()
monkeypatch.setattr(hypercorn.__main__, "Config", mock_config)
hypercorn.__main__._load_config("file:assets/config.py")
mock_config.from_pyfile.assert_called()
def test_load_config_pymodule(monkeypatch: MonkeyPatch) -> None:
mock_config = Mock()
monkeypatch.setattr(hypercorn.__main__, "Config", mock_config)
hypercorn.__main__._load_config("python:assets.config")
mock_config.from_object.assert_called()
def test_load_config(monkeypatch: MonkeyPatch) -> None:
mock_config = Mock()
monkeypatch.setattr(hypercorn.__main__, "Config", mock_config)
hypercorn.__main__._load_config("assets/config")
mock_config.from_toml.assert_called()
@pytest.mark.parametrize(
"flag, set_value, config_key",
[
("--access-logformat", "jeff", "access_log_format"),
("--backlog", 5, "backlog"),
("--ca-certs", "/path", "ca_certs"),
("--certfile", "/path", "certfile"),
("--ciphers", "DHE-RSA-AES128-SHA", "ciphers"),
("--worker-class", "trio", "worker_class"),
("--keep-alive", 20, "keep_alive_timeout"),
("--keyfile", "/path", "keyfile"),
("--pid", "/path", "pid_path"),
("--root-path", "/path", "root_path"),
("--workers", 2, "workers"),
],
)
def test_main_cli_override(
flag: str, set_value: str, config_key: str, monkeypatch: MonkeyPatch
) -> None:
run_multiple = Mock()
monkeypatch.setattr(hypercorn.__main__, "run", run_multiple)
path = os.path.join(os.path.dirname(__file__), "assets/config_ssl.py")
raw_config = Config.from_pyfile(path)
hypercorn.__main__.main(["--config", f"file:{path}", flag, str(set_value), "asgi:App"])
run_multiple.assert_called()
config = run_multiple.call_args_list[0][0][0]
for name, value in inspect.getmembers(raw_config):
if (
not inspect.ismethod(value)
and not name.startswith("_")
and name not in {"log", config_key}
):
assert getattr(raw_config, name) == getattr(config, name)
assert getattr(config, config_key) == set_value
def test_verify_mode_conversion(monkeypatch: MonkeyPatch) -> None:
run_multiple = Mock()
monkeypatch.setattr(hypercorn.__main__, "run", run_multiple)
with pytest.raises(SystemExit):
hypercorn.__main__.main(["--verify-mode", "CERT_UNKNOWN", "asgi:App"])
hypercorn.__main__.main(["--verify-mode", "CERT_REQUIRED", "asgi:App"])
run_multiple.assert_called()
|
the-stack_106_19699
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel integration tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
from tensorflow.examples.saved_model.integration_tests import distribution_strategy_utils as ds_utils
from tensorflow.examples.saved_model.integration_tests import integration_scripts as scripts
from tensorflow.python.distribute import combinations
class SavedModelTest(scripts.TestCase, parameterized.TestCase):
def __init__(self, method_name="runTest", has_extra_deps=False):
super(SavedModelTest, self).__init__(method_name)
self.has_extra_deps = has_extra_deps
def skipIfMissingExtraDeps(self):
"""Skip test if it requires extra dependencies.
b/132234211: The extra dependencies are not available in all environments
that run the tests, e.g. "tensorflow_hub" is not available from tests
within "tensorflow" alone. Those tests are instead run by another
internal test target.
"""
if not self.has_extra_deps:
self.skipTest("Missing extra dependencies")
def test_text_rnn(self):
export_dir = self.get_temp_dir()
self.assertCommandSucceeded("export_text_rnn_model", export_dir=export_dir)
self.assertCommandSucceeded("use_text_rnn_model", model_dir=export_dir)
def test_rnn_cell(self):
export_dir = self.get_temp_dir()
self.assertCommandSucceeded("export_rnn_cell", export_dir=export_dir)
self.assertCommandSucceeded("use_rnn_cell", model_dir=export_dir)
def test_text_embedding_in_sequential_keras(self):
self.skipIfMissingExtraDeps()
export_dir = self.get_temp_dir()
self.assertCommandSucceeded(
"export_simple_text_embedding", export_dir=export_dir)
self.assertCommandSucceeded(
"use_model_in_sequential_keras", model_dir=export_dir)
def test_text_embedding_in_dataset(self):
if tf.test.is_gpu_available():
self.skipTest("b/132156097 - fails if there is a gpu available")
export_dir = self.get_temp_dir()
self.assertCommandSucceeded(
"export_simple_text_embedding", export_dir=export_dir)
self.assertCommandSucceeded(
"use_text_embedding_in_dataset", model_dir=export_dir)
@combinations.generate(
combinations.combine(
named_strategy=list(ds_utils.named_strategies.values()),
retrain_flag_value=["true", "false"],
regularization_loss_multiplier=[0, 2]),
test_combinations=[combinations.NamedGPUCombination()])
def test_mnist_cnn(self, named_strategy, retrain_flag_value,
regularization_loss_multiplier):
self.skipIfMissingExtraDeps()
fast_test_mode = True
temp_dir = self.get_temp_dir()
feature_extrator_dir = os.path.join(temp_dir, "mnist_feature_extractor")
# TODO(b/135043074): remove this if-else.
if named_strategy is None:
full_model_dir = os.path.join(temp_dir, "full_model")
else:
full_model_dir = None
self.assertCommandSucceeded(
"export_mnist_cnn",
fast_test_mode=fast_test_mode,
export_dir=feature_extrator_dir)
self.assertCommandSucceeded(
"use_mnist_cnn",
fast_test_mode=fast_test_mode,
input_saved_model_dir=feature_extrator_dir,
output_saved_model_dir=full_model_dir,
strategy=str(named_strategy),
retrain=retrain_flag_value,
regularization_loss_multiplier=regularization_loss_multiplier)
if full_model_dir is not None:
self.assertCommandSucceeded(
"deploy_mnist_cnn",
fast_test_mode=fast_test_mode,
saved_model_dir=full_model_dir)
if __name__ == "__main__":
scripts.MaybeRunScriptInstead()
tf.test.main()
|
the-stack_106_19702
|
from flask import Blueprint, current_app
from flask import request, send_file, render_template, flash, redirect, url_for
from io import BytesIO
from wlan_api.activation import insert_vouchers_into_database
from wlan_api.generate import generate_vouchers
from wlan_api.pdf import VoucherPrint
from wlan_api.pdf.pdfjam import merge_final_pdf
vpg = Blueprint('vpg', __name__, static_folder='static', template_folder='templates')
@vpg.route('/', methods=['GET'])
def home():
return render_template('home.html')
@vpg.route('/pdf/step', methods=['POST'])
def pdf_step():
roll = int(request.form['roll'])
count = int(request.form['count'])
return render_template('pdf/step.html', roll=roll, count=count)
def create_pdf_buffer(vouchers):
voucher_buffer = BytesIO()
report = VoucherPrint(voucher_buffer, vouchers)
report.print_vouchers()
voucher_buffer.seek(0)
return voucher_buffer, len(vouchers)
@vpg.route('/pdf/generate', methods=['POST'])
def pdf_generate():
roll = int(request.form['roll'])
count = int(request.form['count'])
ads_file = request.files['ads_pdf']
if ads_file.filename == '':
flash("Error: Please provide an Ads file!")
return redirect(url_for('vpg.home'))
voucher_config = current_app.config['VOUCHER']
vouchers = generate_vouchers(roll, count, voucher_config['key'],
voucher_config['alphabet'],
voucher_config['length'])
voucher_buffer, voucher_count = create_pdf_buffer(vouchers)
if voucher_buffer is None:
flash("Error: Failed to generate pdf!")
return redirect(url_for('home'))
final_pdf = merge_final_pdf(voucher_buffer, voucher_count, ads_file)
if final_pdf is None:
flash("Error: Failed to shuffle ads!")
return redirect(url_for('home'))
return send_file(BytesIO(final_pdf),
mimetype='application/pdf',
as_attachment=True,
attachment_filename="vouchers_tatdf_roll%s.csv.pdf" % roll)
@vpg.route('/activation/step', methods=['POST'])
def activate_step():
roll = int(request.form['roll'])
count = int(request.form['count'])
voucher_config = current_app.config['VOUCHER']
vouchers = generate_vouchers(roll, count, voucher_config['key'],
voucher_config['alphabet'],
voucher_config['length'])
flash(insert_vouchers_into_database(vouchers, current_app.config['MYSQL']))
return render_template('activation/step.html')
|
the-stack_106_19703
|
"""Top-level package for FastCCD Support IOC."""
__author__ = """Ronald J Pandolfi"""
__email__ = '[email protected]'
__version__ = '0.1.0'
from . import utils
from caproto.server import PVGroup, get_pv_pair_wrapper
from caproto.server.autosave import autosaved, AutosaveHelper
pvproperty_with_rbv = get_pv_pair_wrapper(setpoint_suffix='',
readback_suffix='_RBV')
class FastAutosaveHelper(AutosaveHelper):
period = 1
def wrap_autosave(pvgroup: PVGroup):
pvgroup.readback = autosaved(pvgroup.readback)
pvgroup.setpoint = autosaved(pvgroup.setpoint)
return pvgroup
|
the-stack_106_19704
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Main entry-point to run timetests tests.
Default run:
$ pytest test_timetest.py
Options[*]:
--test_conf Path to test config
--exe Path to timetest binary to execute
--niter Number of times to run executable
[*] For more information see conftest.py
"""
from pathlib import Path
import logging
import os
import shutil
from scripts.run_timetest import run_timetest
from test_runner.utils import expand_env_vars
REFS_FACTOR = 1.2 # 120%
def test_timetest(instance, executable, niter, cl_cache_dir, model_cache_dir, test_info, temp_dir, validate_test_case,
prepare_db_info):
"""Parameterized test.
:param instance: test instance. Should not be changed during test run
:param executable: timetest executable to run
:param niter: number of times to run executable
:param cl_cache_dir: directory to store OpenCL cache
:param model_cache_dir: directory to store IE model cache
:param test_info: custom `test_info` field of built-in `request` pytest fixture
:param temp_dir: path to a temporary directory. Will be cleaned up after test run
:param validate_test_case: custom pytest fixture. Should be declared as test argument to be enabled
:param prepare_db_info: custom pytest fixture. Should be declared as test argument to be enabled
"""
# Prepare model to get model_path
model_path = instance["model"].get("path")
assert model_path, "Model path is empty"
model_path = Path(expand_env_vars(model_path))
# Copy model to a local temporary directory
model_dir = temp_dir / "model"
shutil.copytree(model_path.parent, model_dir)
model_path = model_dir / model_path.name
# Run executable
exe_args = {
"executable": Path(executable),
"model": Path(model_path),
"device": instance["device"]["name"],
"niter": niter
}
logging.info("Run timetest once to generate any cache")
retcode, msg, _, _ = run_timetest({**exe_args, "niter": 1}, log=logging)
assert retcode == 0, f"Run of executable for warm up failed: {msg}"
if cl_cache_dir:
assert os.listdir(cl_cache_dir), "cl_cache isn't generated"
if model_cache_dir:
assert os.listdir(model_cache_dir), "model_cache isn't generated"
retcode, msg, aggr_stats, raw_stats = run_timetest(exe_args, log=logging)
assert retcode == 0, f"Run of executable failed: {msg}"
# Add timetest results to submit to database and save in new test conf as references
test_info["results"] = aggr_stats
test_info["raw_results"] = raw_stats
|
the-stack_106_19705
|
#!/usr/bin/env python
from re import sub
from sys import argv,exit
from os import path,getenv
from glob import glob
import argparse
parser = argparse.ArgumentParser(description='make forest')
parser.add_argument('--region',metavar='region',type=str,default=None)
toProcess = parser.parse_args().region
argv=[]
import ROOT as root
from PandaCore.Tools.Misc import *
from PandaCore.Tools.Load import *
import PandaCore.Tools.Functions # kinematics
import PandaAnalysis.Monotop.CombinedSelection as sel
Load('PandaAnalysisFlat','LimitTreeBuilder')
baseDir = getenv('PANDA_FLATDIR')
lumi = 36560
factory = root.LimitTreeBuilder()
if toProcess:
factory.SetOutFile(baseDir+'/limits/limitForest_%s.root'%toProcess)
else:
factory.SetOutFile(baseDir+'/limits/limitForest_all.root')
def dataCut(basecut,trigger):
return tAND(trigger,basecut)
treelist = []
def getTree(fpath):
global treelist
fIn = root.TFile(baseDir+fpath+'.root')
tIn = fIn.Get('events')
treelist.append(tIn)
return tIn,fIn
def enable(regionName):
if toProcess:
return (toProcess==regionName)
else:
return True
def shiftBtags(label,tree,varmap,cut,baseweight):
ps = []
for shift in ['BUp','BDown','MUp','MDown']:
for cent in ['sf_btag','sf_sjbtag']:
shiftedlabel = '_'
if 'sj' in cent:
shiftedlabel += 'sj'
if 'B' in shift:
shiftedlabel += 'btag'
else:
shiftedlabel += 'mistag'
if 'Up' in shift:
shiftedlabel += 'Up'
else:
shiftedlabel += 'Down'
weight = sel.weights[baseweight+'_'+cent+shift]%lumi
shiftedProcess = root.Process(label,tree,varmap,cut,weight)
shiftedProcess.syst = shiftedlabel
ps.append(shiftedProcess)
return ps
# input
tZll,fZll = getTree('ZJets')
tZvv,fZvv = getTree('ZtoNuNu')
tWlv,fWlv = getTree('WJets')
tPho,fPho = getTree('GJets')
tTTbar,fTT = getTree('TTbar')
tVV,fVV = getTree('Diboson')
tQCD,fQCD = getTree('QCD')
tST,fST = getTree('SingleTop')
tMET,fMET = getTree('MET')
tSingleEle,fSEle = getTree('SingleElectron')
tSinglePho,fSPho = getTree('SinglePhoton')
#tSig,fSig = getTree('monotop-nr-v3-1700-100_med-1700_dm-100') # this is just a sample point
tAllSig = {}; fAllSig = {}
if enable('signal'):
signalFiles = glob(baseDir+'/Vector*root')
for f in signalFiles:
fname = f.split('/')[-1].replace('.root','')
signame = fname
replacements = {
'Vector_MonoTop_NLO_Mphi-':'',
'_gSM-0p25_gDM-1p0_13TeV-madgraph':'',
'_Mchi-':'_',
}
for k,v in replacements.iteritems():
signame = signame.replace(k,v)
tAllSig[signame],fAllSig[signame] = getTree(fname)
factory.cd()
regions = {}
processes = {}
vms = {}
for region_type,met_type,phi_type in [('signal','pfmet','pfmetphi'),
('w','pfUWmag','pfUWphi'),
('z','pfUZmag','pfUZphi'),
('a','pfUAmag','pfUAphi')]:
vms[region_type] = root.VariableMap()
vms[region_type].AddVar('met',met_type)
# vms[region_type].AddVar('metphi',phi_type)
vms[region_type].AddVar('genBosonPt','genBosonPt')
# vms[region_type].AddVar('genBosonPhi','genBosonPhi')
for x in ['fj1Tau32','top_ecf_bdt']:
vms[region_type].AddVar(x,x)
# test region
if enable('test'):
regions['test'] = root.Region('test')
cut = sel.cuts['signal']
weight = sel.weights['signal']%lumi
processes['test'] = [
root.Process('Data',tMET,vms['signal'],dataCut(cut,sel.triggers['met']),'1'),
root.Process('Diboson',tVV,vms['signal'],cut,weight),
]
btag_shifts = []
for p in processes['test']:
if p.name=='Data':
continue
btag_shifts += shiftCSV(p.name,p.GetInput(),vms['signal'],cut,'signal')
processes['test'] += btag_shifts
for p in processes['test']:
regions['test'].AddProcess(p)
factory.AddRegion(regions['test'])
# signal region
if enable('signal'):
regions['signal'] = root.Region('signal')
cut = sel.cuts['signal']
weight = sel.weights['signal']%lumi
vm = vms['signal']
processes['signal'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
# root.Process('signal',tSig,vm,cut,weight),
]
for signame,tsig in tAllSig.iteritems():
processes['signal'].append( root.Process(signame,tsig,vm,cut,weight) )
btag_shifts = []
for p in processes['signal']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'signal')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'signal')
processes['signal'] += btag_shifts
for p in processes['signal']:
regions['signal'].AddProcess(p)
factory.AddRegion(regions['signal'])
#singlemuonw
if enable('singlemuonw'):
regions['singlemuonw'] = root.Region('singlemuonw')
cut = sel.cuts['singlemuonw']
weight = sel.weights['singlemuonw']%lumi
vm = vms['w']
processes['singlemuonw'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singlemuonw']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singlemuonw')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singlemuonw')
processes['singlemuonw'] += btag_shifts
for p in processes['singlemuonw']:
regions['singlemuonw'].AddProcess(p)
factory.AddRegion(regions['singlemuonw'])
#singleelectronw
if enable('singleelectronw'):
regions['singleelectronw'] = root.Region('singleelectronw')
cut = sel.cuts['singleelectronw']
weight = sel.weights['singleelectronw']%lumi
vm = vms['w']
processes['singleelectronw'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singleelectronw']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singleelectronw')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singleelectronw')
processes['singleelectronw'] += btag_shifts
for p in processes['singleelectronw']:
regions['singleelectronw'].AddProcess(p)
factory.AddRegion(regions['singleelectronw'])
#singlemuontop
if enable('singlemuontop'):
regions['singlemuontop'] = root.Region('singlemuontop')
cut = sel.cuts['singlemuontop']
weight = sel.weights['singlemuontop']%lumi
vm = vms['w']
processes['singlemuontop'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singlemuontop']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singlemuontop')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singlemuontop')
processes['singlemuontop'] += btag_shifts
for p in processes['singlemuontop']:
regions['singlemuontop'].AddProcess(p)
factory.AddRegion(regions['singlemuontop'])
#singleelectrontop
if enable('singleelectrontop'):
regions['singleelectrontop'] = root.Region('singleelectrontop')
cut = sel.cuts['singleelectrontop']
weight = sel.weights['singleelectrontop']%lumi
vm = vms['w']
processes['singleelectrontop'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singleelectrontop']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singleelectrontop')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singleelectrontop')
processes['singleelectrontop'] += btag_shifts
for p in processes['singleelectrontop']:
regions['singleelectrontop'].AddProcess(p)
factory.AddRegion(regions['singleelectrontop'])
#dimuon
if enable('dimuon'):
regions['dimuon'] = root.Region('dimuon')
cut = sel.cuts['dimuon']
weight = sel.weights['dimuon']%lumi
vm = vms['z']
processes['dimuon'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
for p in processes['dimuon']:
regions['dimuon'].AddProcess(p)
factory.AddRegion(regions['dimuon'])
#dielectron
if enable('dielectron'):
regions['dielectron'] = root.Region('dielectron')
cut = sel.cuts['dielectron']
weight = sel.weights['dielectron']%lumi
vm = vms['z']
processes['dielectron'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
for p in processes['dielectron']:
regions['dielectron'].AddProcess(p)
factory.AddRegion(regions['dielectron'])
#photon
if enable('photon'):
regions['photon'] = root.Region('photon')
cut = sel.cuts['photon']
weight = sel.weights['photon']%lumi
vm = vms['a']
processes['photon'] = [
root.Process('Data',tSinglePho,vm,dataCut(cut,sel.triggers['pho']),'1'),
root.Process('Pho',tPho,vm,cut,weight),
root.Process('QCD',tSinglePho,vm,dataCut(cut,sel.triggers['pho']),'sf_phoPurity'),
]
for p in processes['photon']:
regions['photon'].AddProcess(p)
factory.AddRegion(regions['photon'])
PInfo('makeLimitForest','Starting '+str(toProcess))
factory.Run()
PInfo('makeLimitForest','Finishing '+str(toProcess))
factory.Output()
PInfo('makeLimitForest','Outputted '+str(toProcess))
|
the-stack_106_19706
|
# A handler to manage the data which needs to end up in the ISPyB xml out
# file.
import os
import time
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Phil import PhilIndex
def sanitize(path):
"""Replace double path separators with single ones."""
double = os.sep * 2
return path.replace(double, os.sep)
class ISPyBXmlHandler:
def __init__(self, project):
self._crystals = {}
self._per_crystal_data = {}
self._project = project
self._name_map = {
"High resolution limit": "resolutionLimitHigh",
"Low resolution limit": "resolutionLimitLow",
"Completeness": "completeness",
"Multiplicity": "multiplicity",
"CC half": "ccHalf",
"Anomalous completeness": "anomalousCompleteness",
"Anomalous correlation": "ccAnomalous",
"Anomalous multiplicity": "anomalousMultiplicity",
"Total observations": "nTotalObservations",
"Total unique": "nTotalUniqueObservations",
"Rmerge(I+/-)": "rMerge",
"Rmeas(I)": "rMeasAllIPlusIMinus",
"Rmeas(I+/-)": "rMeasWithinIPlusIMinus",
"Rpim(I)": "rPimAllIPlusIMinus",
"Rpim(I+/-)": "rPimWithinIPlusIMinus",
"Partial Bias": "fractionalPartialBias",
"I/sigma": "meanIOverSigI",
}
def add_xcrystal(self, xcrystal):
if not xcrystal.get_name() in self._crystals:
self._crystals[xcrystal.get_name()] = xcrystal
# should ideally drill down and get the refined cell constants for
# each sweep and the scaling statistics for low resolution, high
# resolution and overall...
@staticmethod
def write_date(fout):
"""Write the current date and time out as XML."""
fout.write(
"<recordTimeStamp>%s</recordTimeStamp>\n"
% time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
)
@staticmethod
def write_cell(fout, cell):
"""Write out a UNIT CELL as XML..."""
fout.write("<cell_a>%f</cell_a>" % cell[0])
fout.write("<cell_b>%f</cell_b>" % cell[1])
fout.write("<cell_c>%f</cell_c>" % cell[2])
fout.write("<cell_alpha>%f</cell_alpha>" % cell[3])
fout.write("<cell_beta>%f</cell_beta>" % cell[4])
fout.write("<cell_gamma>%f</cell_gamma>" % cell[5])
@staticmethod
def write_refined_cell(fout, cell):
"""Write out a REFINED UNIT CELL as XML..."""
fout.write("<refinedCell_a>%f</refinedCell_a>" % cell[0])
fout.write("<refinedCell_b>%f</refinedCell_b>" % cell[1])
fout.write("<refinedCell_c>%f</refinedCell_c>" % cell[2])
fout.write("<refinedCell_alpha>%f</refinedCell_alpha>" % cell[3])
fout.write("<refinedCell_beta>%f</refinedCell_beta>" % cell[4])
fout.write("<refinedCell_gamma>%f</refinedCell_gamma>" % cell[5])
def write_scaling_statistics(self, fout, scaling_stats_type, stats_dict):
"""Write out the SCALING STATISTICS block..."""
fout.write("<AutoProcScalingStatistics>\n")
fout.write(
"<scalingStatisticsType>%s</scalingStatisticsType>\n" % scaling_stats_type
)
for name in stats_dict:
if name not in self._name_map:
continue
out_name = self._name_map[name]
if out_name in ["nTotalObservations", "nTotalUniqueObservations"]:
fout.write("<%s>%d</%s>" % (out_name, int(stats_dict[name]), out_name))
else:
fout.write("<%s>%s</%s>" % (out_name, stats_dict[name], out_name))
fout.write("</AutoProcScalingStatistics>\n")
def write_xml(self, file, command_line="", working_phil=None):
if working_phil is not None:
PhilIndex.merge_phil(working_phil)
params = PhilIndex.get_python_object()
fout = open(file, "w")
fout.write('<?xml version="1.0"?>')
fout.write("<AutoProcContainer>\n")
for crystal in sorted(self._crystals):
xcrystal = self._crystals[crystal]
cell = xcrystal.get_cell()
spacegroup = xcrystal.get_likely_spacegroups()[0]
fout.write("<AutoProc><spaceGroup>%s</spaceGroup>" % spacegroup)
self.write_refined_cell(fout, cell)
fout.write("</AutoProc>")
fout.write("<AutoProcScalingContainer>")
fout.write("<AutoProcScaling>")
self.write_date(fout)
fout.write("</AutoProcScaling>")
statistics_all = xcrystal.get_statistics()
reflection_files = xcrystal.get_scaled_merged_reflections()
for key in statistics_all:
pname, xname, dname = key
# FIXME should assert that the dname is a
# valid wavelength name
keys = [
"High resolution limit",
"Low resolution limit",
"Completeness",
"Multiplicity",
"I/sigma",
"Rmerge(I+/-)",
"CC half",
"Anomalous completeness",
"Anomalous correlation",
"Anomalous multiplicity",
"Total observations",
"Total unique",
"Rmeas(I)",
"Rmeas(I+/-)",
"Rpim(I)",
"Rpim(I+/-)",
"Partial Bias",
]
stats = [k for k in keys if k in statistics_all[key]]
xwavelength = xcrystal.get_xwavelength(dname)
sweeps = xwavelength.get_sweeps()
for j, name in enumerate(["overall", "innerShell", "outerShell"]):
statistics_cache = {}
for s in stats:
if isinstance(statistics_all[key][s], type([])):
statistics_cache[s] = statistics_all[key][s][j]
elif isinstance(statistics_all[key][s], type(())):
statistics_cache[s] = statistics_all[key][s][j]
# send these to be written out
self.write_scaling_statistics(fout, name, statistics_cache)
for sweep in sweeps:
fout.write("<AutoProcIntegrationContainer>\n")
if "#" in sweep.get_template():
image_name = sweep.get_image_name(0)
else:
image_name = os.path.join(
sweep.get_directory(), sweep.get_template()
)
fout.write(
"<Image><fileName>%s</fileName>" % os.path.split(image_name)[-1]
)
fout.write(
"<fileLocation>%s</fileLocation></Image>"
% sanitize(os.path.split(image_name)[0])
)
fout.write("<AutoProcIntegration>\n")
cell = sweep.get_integrater_cell()
self.write_cell(fout, cell)
# FIXME this is naughty
intgr = sweep._get_integrater()
start, end = intgr.get_integrater_wedge()
fout.write("<startImageNumber>%d</startImageNumber>" % start)
fout.write("<endImageNumber>%d</endImageNumber>" % end)
# FIXME this is naughty
indxr = sweep._get_indexer()
fout.write(
"<refinedDetectorDistance>%f</refinedDetectorDistance>"
% indxr.get_indexer_distance()
)
beam = indxr.get_indexer_beam_centre_raw_image()
fout.write("<refinedXBeam>%f</refinedXBeam>" % beam[0])
fout.write("<refinedYBeam>%f</refinedYBeam>" % beam[1])
fout.write("</AutoProcIntegration>\n")
fout.write("</AutoProcIntegrationContainer>\n")
fout.write("</AutoProcScalingContainer>")
# file unpacking nonsense
if not command_line:
from xia2.Handlers.CommandLine import CommandLine
command_line = CommandLine.get_command_line()
pipeline = params.xia2.settings.pipeline
fout.write("<AutoProcProgramContainer><AutoProcProgram>")
fout.write(
"<processingCommandLine>%s</processingCommandLine>"
% sanitize(command_line)
)
fout.write("<processingPrograms>xia2 %s</processingPrograms>" % pipeline)
fout.write("</AutoProcProgram>")
data_directory = self._project.path / "DataFiles"
log_directory = self._project.path / "LogFiles"
for k in reflection_files:
reflection_file = reflection_files[k]
if not isinstance(reflection_file, type("")):
continue
reflection_file = FileHandler.get_data_file(
self._project.path, reflection_file
)
basename = os.path.basename(reflection_file)
if data_directory.joinpath(basename).exists():
# Use file in DataFiles directory in preference (if it exists)
reflection_file = str(data_directory.joinpath(basename))
fout.write("<AutoProcProgramAttachment><fileType>Result")
fout.write(
"</fileType><fileName>%s</fileName>"
% os.path.split(reflection_file)[-1]
)
fout.write(
"<filePath>%s</filePath>"
% sanitize(os.path.split(reflection_file)[0])
)
fout.write("</AutoProcProgramAttachment>\n")
g = log_directory.glob("*merging-statistics.json")
for merging_stats_json in g:
fout.write("<AutoProcProgramAttachment><fileType>Graph")
fout.write(
"</fileType><fileName>%s</fileName>"
% os.path.split(str(merging_stats_json))[-1]
)
fout.write("<filePath>%s</filePath>" % sanitize(str(log_directory)))
fout.write("</AutoProcProgramAttachment>\n")
# add the xia2.txt file...
fout.write("<AutoProcProgramAttachment><fileType>Log")
fout.write("</fileType><fileName>xia2.txt</fileName>")
fout.write("<filePath>%s</filePath>" % sanitize(os.getcwd()))
fout.write("</AutoProcProgramAttachment>\n")
fout.write("</AutoProcProgramContainer>")
fout.write("</AutoProcContainer>\n")
fout.close()
def json_object(self, command_line=""):
result = {}
for crystal in sorted(self._crystals):
xcrystal = self._crystals[crystal]
cell = xcrystal.get_cell()
spacegroup = xcrystal.get_likely_spacegroups()[0]
result["AutoProc"] = {}
tmp = result["AutoProc"]
tmp["spaceGroup"] = spacegroup
for name, value in zip(["a", "b", "c", "alpha", "beta", "gamma"], cell):
tmp["refinedCell_%s" % name] = value
result["AutoProcScalingContainer"] = {}
tmp = result["AutoProcScalingContainer"]
tmp["AutoProcScaling"] = {
"recordTimeStamp": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
}
statistics_all = xcrystal.get_statistics()
reflection_files = xcrystal.get_scaled_merged_reflections()
for key in list(statistics_all.keys()):
pname, xname, dname = key
# FIXME should assert that the dname is a
# valid wavelength name
keys = [
"High resolution limit",
"Low resolution limit",
"Completeness",
"Multiplicity",
"I/sigma",
"Rmerge(I+/-)",
"CC half",
"Anomalous completeness",
"Anomalous correlation",
"Anomalous multiplicity",
"Total observations",
"Total unique",
"Rmeas(I)",
"Rmeas(I+/-)",
"Rpim(I)",
"Rpim(I+/-)",
"Partial Bias",
]
stats = [k for k in keys if k in statistics_all[key]]
xwavelength = xcrystal.get_xwavelength(dname)
sweeps = xwavelength.get_sweeps()
tmp["AutoProcScalingStatistics"] = []
tmp2 = tmp["AutoProcScalingStatistics"]
for j, name in enumerate(["overall", "innerShell", "outerShell"]):
statistics_cache = {"scalingStatisticsType": name}
for s in stats:
if s in self._name_map:
n = self._name_map[s]
else:
continue
if isinstance(statistics_all[key][s], type([])):
statistics_cache[n] = statistics_all[key][s][j]
elif isinstance(statistics_all[key][s], type(())):
statistics_cache[n] = statistics_all[key][s][j]
tmp2.append(statistics_cache)
tmp["AutoProcIntegrationContainer"] = []
tmp2 = tmp["AutoProcIntegrationContainer"]
for sweep in sweeps:
if "#" in sweep.get_template():
image_name = sweep.get_image_name(0)
else:
image_name = os.path.join(
sweep.get_directory(), sweep.get_template()
)
cell = sweep.get_integrater_cell()
intgr_tmp = {}
for name, value in zip(
["a", "b", "c", "alpha", "beta", "gamma"], cell
):
intgr_tmp["cell_%s" % name] = value
# FIXME this is naughty
indxr = sweep._get_indexer()
intgr = sweep._get_integrater()
start, end = intgr.get_integrater_wedge()
intgr_tmp["startImageNumber"] = start
intgr_tmp["endImageNumber"] = end
intgr_tmp["refinedDetectorDistance"] = indxr.get_indexer_distance()
beam = indxr.get_indexer_beam_centre_raw_image()
intgr_tmp["refinedXBeam"] = beam[0]
intgr_tmp["refinedYBeam"] = beam[1]
tmp2.append(
{
"Image": {
"fileName": os.path.split(image_name)[-1],
"fileLocation": sanitize(os.path.split(image_name)[0]),
},
"AutoProcIntegration": intgr_tmp,
}
)
# file unpacking nonsense
result["AutoProcProgramContainer"] = {}
tmp = result["AutoProcProgramContainer"]
tmp2 = {}
if not command_line:
from xia2.Handlers.CommandLine import CommandLine
command_line = CommandLine.get_command_line()
tmp2["processingCommandLine"] = sanitize(command_line)
tmp2["processingProgram"] = "xia2"
tmp["AutoProcProgram"] = tmp2
tmp["AutoProcProgramAttachment"] = []
tmp2 = tmp["AutoProcProgramAttachment"]
data_directory = self._project.path / "DataFiles"
for k in reflection_files:
reflection_file = reflection_files[k]
if not isinstance(reflection_file, type("")):
continue
reflection_file = FileHandler.get_data_file(
self._project.path, reflection_file
)
basename = os.path.basename(reflection_file)
if data_directory.joinpath(basename).exists():
# Use file in DataFiles directory in preference (if it exists)
reflection_file = str(data_directory.joinpath(basename))
tmp2.append(
{
"fileType": "Result",
"fileName": os.path.split(reflection_file)[-1],
"filePath": sanitize(os.path.split(reflection_file)[0]),
}
)
tmp2.append(
{
"fileType": "Log",
"fileName": "xia2.txt",
"filePath": sanitize(os.getcwd()),
}
)
return result
|
the-stack_106_19707
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for chef_validator.engine.validate """
from mock import mock
import docker.errors
from chef_validator.engine.validate import ValidateEngine
from chef_validator.tests.unit.base import ValidatorTestCase
class ValidateEngineTestCase(ValidatorTestCase):
"""Tests for class ValidateEngine """
def setUp(self):
"""Create a ValidateEngine instance """
super(ValidateEngineTestCase, self).setUp()
self.validate = ValidateEngine()
def test_validate_cookbook(self):
"""Tests for method validate_cookbook """
self.validate.d.cookbook_deployment_test = mock.MagicMock(
return_value="OK")
test_input = "MyInput"
cookbook = recipe = image = request = test_input
expected = "OK"
observed = self.validate.validate_cookbook(
cookbook,
recipe,
image,
request)
self.assertEqual(observed, expected)
def tearDown(self):
"""Cleanup the ValidateEngine instance """
super(ValidateEngineTestCase, self).tearDown()
self.m.UnsetStubs()
self.m.ResetAll()
|
the-stack_106_19708
|
#!/usr/bin/env python
# Copyright 2021 Roboception GmbH
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the {copyright_holder} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import rospy
from tf2_msgs.msg import TFMessage
from geometry_msgs.msg import TransformStamped
from rc_reason_msgs.srv import SetLoadCarrier, GetLoadCarriers, DeleteLoadCarriers
from rc_reason_msgs.srv import DetectLoadCarriers, DetectFillingLevel
from rc_reason_msgs.srv import GetRegionsOfInterest3D, SetRegionOfInterest3D, DeleteRegionsOfInterest3D
from rc_reason_msgs.srv import GetRegionsOfInterest2D, SetRegionOfInterest2D, DeleteRegionsOfInterest2D
from visualization_msgs.msg import Marker, MarkerArray
from std_msgs.msg import ColorRGBA
from .rest_client import RestClient
from .transform_helpers import lc_to_marker, load_carrier_to_tf
class LoadCarrierClient(RestClient):
def __init__(self):
super(LoadCarrierClient, self).__init__('rc_load_carrier')
# client only parameters
self.publish_tf = rospy.get_param("~publish_tf", True)
self.publish_markers = rospy.get_param("~publish_markers", True)
self.pub_tf = rospy.Publisher('/tf', TFMessage, queue_size=10)
self.pub_markers = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=10)
self.lc_markers = []
self.add_rest_service(DetectLoadCarriers, 'detect_load_carriers', self.lc_cb)
self.add_rest_service(DetectFillingLevel, 'detect_filling_level', self.lc_cb)
self.add_rest_service(SetLoadCarrier, 'set_load_carrier', self.generic_cb)
self.add_rest_service(GetLoadCarriers, 'get_load_carriers', self.generic_cb)
self.add_rest_service(DeleteLoadCarriers, 'delete_load_carriers', self.generic_cb)
self.add_rest_service(SetRegionOfInterest3D, 'set_region_of_interest', self.generic_cb)
self.add_rest_service(GetRegionsOfInterest3D, 'get_regions_of_interest', self.generic_cb)
self.add_rest_service(DeleteRegionsOfInterest3D, 'delete_regions_of_interest', self.generic_cb)
self.add_rest_service(GetRegionsOfInterest2D, 'get_regions_of_interest_2d', self.generic_cb)
self.add_rest_service(SetRegionOfInterest2D, 'set_region_of_interest_2d', self.generic_cb)
self.add_rest_service(DeleteRegionsOfInterest2D, 'delete_regions_of_interest_2d', self.generic_cb)
def generic_cb(self, srv_name, srv_type, request):
response = self.call_rest_service(srv_name, srv_type, request)
return response
def lc_cb(self, srv_name, srv_type, request):
response = self.call_rest_service(srv_name, srv_type, request)
self.publish_lcs(response.load_carriers)
return response
def publish_lcs(self, lcs):
if lcs and self.publish_tf:
transforms = [load_carrier_to_tf(lc, i) for i, lc in enumerate(lcs)]
self.pub_tf.publish(TFMessage(transforms=transforms))
if self.publish_markers:
self.publish_lc_markers(lcs)
def publish_lc_markers(self, lcs):
new_markers = []
for i, lc in enumerate(lcs):
m = lc_to_marker(lc, i, self.rest_name + "_lcs")
if i < len(self.lc_markers):
self.lc_markers[i] = m
else:
self.lc_markers.append(m)
new_markers.append(m)
for i in range(len(lcs), len(self.lc_markers)):
# delete old markers
self.lc_markers[i].action = Marker.DELETE
self.pub_markers.publish(MarkerArray(markers=self.lc_markers))
self.lc_markers = new_markers
def main():
client = LoadCarrierClient()
try:
rospy.spin()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
the-stack_106_19709
|
# Copyright 2021 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import logging
from typing import List
from adobe.pdfservices.operation.pdfops.options.extractpdf.extract_element_type import ExtractElementType
from adobe.pdfservices.operation.pdfops.options.extractpdf.extract_renditions_element_type import \
ExtractRenditionsElementType
from adobe.pdfservices.operation.pdfops.options.extractpdf.table_structure_type import TableStructureType
class ExtractPDFOptions():
""" An Options Class that defines the options for ExtractPDFOperation.
.. code-block:: python
extract_pdf_options: ExtractPDFOptions = ExtractPDFOptions.builder() \\
.with_elements_to_extract([ExtractElementType.TEXT, ExtractElementType.TABLES]) \\
.with_get_char_info(True) \\
.with_table_structure_format(TableStructureType.CSV) \\
.with_elements_to_extract_renditions([ExtractRenditionsElementType.FIGURES, ExtractRenditionsElementType.TABLES]) \\
.with_include_styling_info(True) \\
.build()
"""
def __init__(self, elements_to_extract, elements_to_extract_renditions, get_char_info, table_output_format,
include_styling_info=None):
self._elements_to_extract = elements_to_extract
self._elements_to_extract_renditions = elements_to_extract_renditions
self._get_char_info = get_char_info
self._table_output_format = table_output_format
self._include_styling_info = include_styling_info
self._logger = logging.getLogger(__name__)
@property
def elements_to_extract(self):
""" List of pdf element types to be extracted in a structured format from input file"""
return self._elements_to_extract
@property
def elements_to_extract_renditions(self):
""" List of pdf element types whose renditions needs to be extracted from input file"""
return self._elements_to_extract_renditions
@property
def get_char_info(self):
""" Boolean specifying whether to add character level bounding boxes to output json """
return self._get_char_info
@property
def table_output_format(self):
""" export table in specified format - currently csv supported """
return self._table_output_format
@property
def include_styling_info(self):
""" Boolean specifying whether to add PDF Elements Styling Info to output json """
return self._include_styling_info
@staticmethod
def builder():
"""Returns a Builder for :class:`ExtractPDFOptions`
:return: The builder class for ExtractPDFOptions
:rtype: ExtractPDFOptions.Builder
"""
return ExtractPDFOptions.Builder()
class Builder:
""" The builder for :class:`ExtractPDFOptions`.
"""
def __init__(self):
self._elements_to_extract = None
self._elements_to_extract_renditions = None
self._table_output_format = None
self._get_char_info = None
self._include_styling_info = None
def _init_elements_to_extract(self):
if not self._elements_to_extract:
self._elements_to_extract = []
def _init_elements_to_extract_renditions(self):
if not self._elements_to_extract_renditions:
self._elements_to_extract_renditions = []
def with_element_to_extract(self, element_to_extract: ExtractElementType):
"""
adds a pdf element type for extracting structured information.
:param element_to_extract: ExtractElementType to be extracted
:type element_to_extract: ExtractElementType
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if element_to_extract is None.
"""
if element_to_extract and element_to_extract in ExtractElementType:
self._init_elements_to_extract()
self._elements_to_extract.append(element_to_extract)
else:
raise ValueError("Only ExtractElementType enum is accepted for element_to_extract")
return self
def with_elements_to_extract(self, elements_to_extract: List[ExtractElementType]):
"""
adds a list of pdf element types for extracting structured information.
:param elements_to_extract: List of ExtractElementType to be extracted
:type elements_to_extract: List[ExtractElementType]
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if elements_to_extract is None or empty list.
"""
if elements_to_extract and all(element in ExtractElementType for element in elements_to_extract):
self._init_elements_to_extract()
self._elements_to_extract.extend(elements_to_extract)
else:
raise ValueError("Only ExtractElementType enum List is accepted for elements_to_extract")
return self
def with_element_to_extract_renditions(self, element_to_extract_renditions: ExtractRenditionsElementType):
"""
adds a pdf element type for extracting rendition.
:param element_to_extract_renditions: ExtractRenditionsElementType whose renditions have to be extracted
:type element_to_extract_renditions: ExtractRenditionsElementType
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if element_to_extract_renditions is None.
"""
if element_to_extract_renditions and element_to_extract_renditions in ExtractRenditionsElementType:
self._init_elements_to_extract_renditions()
self._elements_to_extract_renditions.append(element_to_extract_renditions)
else:
raise ValueError("Only ExtractRenditionsElementType enum is accepted for element_to_extract_renditions")
return self
def with_elements_to_extract_renditions(self, elements_to_extract_renditions: List[ExtractRenditionsElementType]):
"""
adds a list of pdf element types for extracting rendition.
:param elements_to_extract_renditions: List of ExtractRenditionsElementType whose renditions have to be extracted
:type elements_to_extract_renditions: List[ExtractRenditionsElementType]
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if elements_to_extract is None or empty list.
"""
if elements_to_extract_renditions and all(
element in ExtractRenditionsElementType for element in elements_to_extract_renditions):
self._init_elements_to_extract_renditions()
self._elements_to_extract_renditions.extend(elements_to_extract_renditions)
else:
raise ValueError("Only ExtractRenditionsElementType enum List is accepted for elements_to_extract_renditions")
return self
def with_table_structure_format(self, table_structure: TableStructureType):
"""
adds the table structure format (currently csv only) for extracting structured information.
:param table_structure: TableStructureType to be extracted
:type table_structure: TableStructureType
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if table_structure is None.
"""
if table_structure and table_structure in TableStructureType:
self._table_output_format = table_structure
else:
raise ValueError("Only TableStructureType enum is accepted for table_structure_format")
return self
def with_get_char_info(self, get_char_info: bool):
"""
sets the Boolean specifying whether to add character level bounding boxes to output json
:param get_char_info: Set True to extract character level bounding boxes information
:type get_char_info: bool
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
"""
self._get_char_info = get_char_info
return self
def with_include_styling_info(self, include_styling_info: bool):
"""
sets the Boolean specifying whether to add PDF Elements Styling Info to output json
:param include_styling_info: Set True to extract PDF Elements Styling Info
:type include_styling_info: bool
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
"""
self._include_styling_info = include_styling_info
return self
def build(self):
return ExtractPDFOptions(self._elements_to_extract, self._elements_to_extract_renditions,
self._get_char_info,
self._table_output_format, self._include_styling_info)
|
the-stack_106_19710
|
"""
Support for deCONZ devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/deconz/
"""
import logging
import voluptuous as vol
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.discovery import SERVICE_DECONZ
from homeassistant.const import (
CONF_API_KEY, CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import discovery, aiohttp_client
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util.json import load_json, save_json
REQUIREMENTS = ['pydeconz==35']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'deconz'
DATA_DECONZ_ID = 'deconz_entities'
CONFIG_FILE = 'deconz.conf'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_API_KEY): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=80): cv.port,
})
}, extra=vol.ALLOW_EXTRA)
SERVICE_FIELD = 'field'
SERVICE_ENTITY = 'entity'
SERVICE_DATA = 'data'
SERVICE_SCHEMA = vol.Schema({
vol.Exclusive(SERVICE_FIELD, 'deconz_id'): cv.string,
vol.Exclusive(SERVICE_ENTITY, 'deconz_id'): cv.entity_id,
vol.Required(SERVICE_DATA): dict,
})
CONFIG_INSTRUCTIONS = """
Unlock your deCONZ gateway to register with Home Assistant.
1. [Go to deCONZ system settings](http://{}:{}/edit_system.html)
2. Press "Unlock Gateway" button
[deCONZ platform documentation](https://home-assistant.io/components/deconz/)
"""
async def async_setup(hass, config):
"""Set up services and configuration for deCONZ component."""
result = False
config_file = await hass.async_add_job(
load_json, hass.config.path(CONFIG_FILE))
async def async_deconz_discovered(service, discovery_info):
"""Call when deCONZ gateway has been found."""
deconz_config = {}
deconz_config[CONF_HOST] = discovery_info.get(CONF_HOST)
deconz_config[CONF_PORT] = discovery_info.get(CONF_PORT)
await async_request_configuration(hass, config, deconz_config)
if config_file:
result = await async_setup_deconz(hass, config, config_file)
if not result and DOMAIN in config and CONF_HOST in config[DOMAIN]:
deconz_config = config[DOMAIN]
if CONF_API_KEY in deconz_config:
result = await async_setup_deconz(hass, config, deconz_config)
else:
await async_request_configuration(hass, config, deconz_config)
return True
if not result:
discovery.async_listen(hass, SERVICE_DECONZ, async_deconz_discovered)
return True
async def async_setup_deconz(hass, config, deconz_config):
"""Set up a deCONZ session.
Load config, group, light and sensor data for server information.
Start websocket for push notification of state changes from deCONZ.
"""
_LOGGER.debug("deCONZ config %s", deconz_config)
from pydeconz import DeconzSession
websession = async_get_clientsession(hass)
deconz = DeconzSession(hass.loop, websession, **deconz_config)
result = await deconz.async_load_parameters()
if result is False:
_LOGGER.error("Failed to communicate with deCONZ")
return False
hass.data[DOMAIN] = deconz
hass.data[DATA_DECONZ_ID] = {}
for component in ['binary_sensor', 'light', 'scene', 'sensor']:
hass.async_add_job(discovery.async_load_platform(
hass, component, DOMAIN, {}, config))
deconz.start()
async def async_configure(call):
"""Set attribute of device in deCONZ.
Field is a string representing a specific device in deCONZ
e.g. field='/lights/1/state'.
Entity_id can be used to retrieve the proper field.
Data is a json object with what data you want to alter
e.g. data={'on': true}.
{
"field": "/lights/1/state",
"data": {"on": true}
}
See Dresden Elektroniks REST API documentation for details:
http://dresden-elektronik.github.io/deconz-rest-doc/rest/
"""
field = call.data.get(SERVICE_FIELD)
entity_id = call.data.get(SERVICE_ENTITY)
data = call.data.get(SERVICE_DATA)
deconz = hass.data[DOMAIN]
if entity_id:
entities = hass.data.get(DATA_DECONZ_ID)
if entities:
field = entities.get(entity_id)
if field is None:
_LOGGER.error('Could not find the entity %s', entity_id)
return
await deconz.async_put_state(field, data)
hass.services.async_register(
DOMAIN, 'configure', async_configure, schema=SERVICE_SCHEMA)
@callback
def deconz_shutdown(event):
"""
Wrap the call to deconz.close.
Used as an argument to EventBus.async_listen_once - EventBus calls
this method with the event as the first argument, which should not
be passed on to deconz.close.
"""
deconz.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, deconz_shutdown)
return True
async def async_request_configuration(hass, config, deconz_config):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
async def async_configuration_callback(data):
"""Set up actions to do when our configuration callback is called."""
from pydeconz.utils import async_get_api_key
websession = async_get_clientsession(hass)
api_key = await async_get_api_key(websession, **deconz_config)
if api_key:
deconz_config[CONF_API_KEY] = api_key
result = await async_setup_deconz(hass, config, deconz_config)
if result:
await hass.async_add_job(
save_json, hass.config.path(CONFIG_FILE), deconz_config)
configurator.async_request_done(request_id)
return
else:
configurator.async_notify_errors(
request_id, "Couldn't load configuration.")
else:
configurator.async_notify_errors(
request_id, "Couldn't get an API key.")
return
instructions = CONFIG_INSTRUCTIONS.format(
deconz_config[CONF_HOST], deconz_config[CONF_PORT])
request_id = configurator.async_request_config(
"deCONZ", async_configuration_callback,
description=instructions,
entity_picture="/static/images/logo_deconz.jpeg",
submit_caption="I have unlocked the gateway",
)
@config_entries.HANDLERS.register(DOMAIN)
class DeconzFlowHandler(data_entry_flow.FlowHandler):
"""Handle a deCONZ config flow."""
VERSION = 1
def __init__(self):
"""Initialize the deCONZ flow."""
self.bridges = []
self.deconz_config = {}
async def async_step_init(self, user_input=None):
"""Handle a flow start."""
from pydeconz.utils import async_discovery
if DOMAIN in self.hass.data:
return self.async_abort(
reason='one_instance_only'
)
if user_input is not None:
for bridge in self.bridges:
if bridge[CONF_HOST] == user_input[CONF_HOST]:
self.deconz_config = bridge
return await self.async_step_link()
session = aiohttp_client.async_get_clientsession(self.hass)
self.bridges = await async_discovery(session)
if len(self.bridges) == 1:
self.deconz_config = self.bridges[0]
return await self.async_step_link()
elif len(self.bridges) > 1:
hosts = []
for bridge in self.bridges:
hosts.append(bridge[CONF_HOST])
return self.async_show_form(
step_id='init',
data_schema=vol.Schema({
vol.Required(CONF_HOST): vol.In(hosts)
})
)
return self.async_abort(
reason='no_bridges'
)
async def async_step_link(self, user_input=None):
"""Attempt to link with the deCONZ bridge."""
from pydeconz.utils import async_get_api_key
errors = {}
if user_input is not None:
session = aiohttp_client.async_get_clientsession(self.hass)
api_key = await async_get_api_key(session, **self.deconz_config)
if api_key:
self.deconz_config[CONF_API_KEY] = api_key
return self.async_create_entry(
title='deCONZ',
data=self.deconz_config
)
else:
errors['base'] = 'no_key'
return self.async_show_form(
step_id='link',
errors=errors,
)
async def async_setup_entry(hass, entry):
"""Set up a bridge for a config entry."""
if DOMAIN in hass.data:
_LOGGER.error(
"Config entry failed since one deCONZ instance already exists")
return False
result = await async_setup_deconz(hass, None, entry.data)
if result:
return True
return False
|
the-stack_106_19715
|
# -*- coding: utf-8 -*-
"""The dynamic output module CLI arguments helper."""
from plaso.lib import errors
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.output import dynamic
class DynamicOutputArgumentsHelper(interface.ArgumentsHelper):
"""Dynamic output module CLI arguments helper."""
NAME = 'dynamic'
CATEGORY = 'output'
DESCRIPTION = 'Argument helper for the dynamic output module.'
_DEFAULT_FIELDS = [
'datetime', 'timestamp_desc', 'source', 'source_long',
'message', 'parser', 'display_name', 'tag']
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
default_fields = ','.join(cls._DEFAULT_FIELDS)
argument_group.add_argument(
'--fields', dest='fields', type=str, action='store',
default=default_fields, help=(
'Defines which fields should be included in the output.'))
default_fields = ', '.join(cls._DEFAULT_FIELDS)
argument_group.add_argument(
'--additional_fields', '--additional-fields', dest='additional_fields',
type=str, action='store', default='', help=(
'Defines extra fields to be included in the output, in addition to '
'the default fields, which are {0:s}.'.format(default_fields)))
@classmethod
def ParseOptions(cls, options, output_module): # pylint: disable=arguments-renamed
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when the output filename was not provided.
"""
if not isinstance(output_module, dynamic.DynamicOutputModule):
raise errors.BadConfigObject(
'Output module is not an instance of DynamicOutputModule')
default_fields = ','.join(cls._DEFAULT_FIELDS)
fields = cls._ParseStringOption(
options, 'fields', default_value=default_fields)
additional_fields = cls._ParseStringOption(options, 'additional_fields')
if additional_fields:
fields = ','.join([fields, additional_fields])
output_module.SetFields([
field_name.strip() for field_name in fields.split(',')])
manager.ArgumentHelperManager.RegisterHelper(DynamicOutputArgumentsHelper)
|
the-stack_106_19716
|
#!/usr/bin/env python
import glob
import os
import os.path
import sys
if sys.version_info < (3, 6, 0):
sys.stderr.write("ERROR: You need Python 3.6 or later to use mypy.\n")
exit(1)
# we'll import stuff from the source tree, let's ensure is on the sys path
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
# This requires setuptools when building; setuptools is not needed
# when installing from a wheel file (though it is still needed for
# alternative forms of installing, as suggested by README.md).
from setuptools import setup, find_packages
from setuptools.command.build_py import build_py
from mypy.version import __version__ as version
description = 'Optional static typing for Python'
long_description = '''
Mypy -- Optional Static Typing for Python
=========================================
Add type annotations to your Python programs, and use mypy to type
check them. Mypy is essentially a Python linter on steroids, and it
can catch many programming errors by analyzing your program, without
actually having to run it. Mypy has a powerful type system with
features such as type inference, gradual typing, generics and union
types.
'''.lstrip()
def find_package_data(base, globs, root='mypy'):
"""Find all interesting data files, for setup(package_data=)
Arguments:
root: The directory to search in.
globs: A list of glob patterns to accept files.
"""
rv_dirs = [root for root, dirs, files in os.walk(base)]
rv = []
for rv_dir in rv_dirs:
files = []
for pat in globs:
files += glob.glob(os.path.join(rv_dir, pat))
if not files:
continue
rv.extend([os.path.relpath(f, root) for f in files])
return rv
class CustomPythonBuild(build_py):
def pin_version(self):
path = os.path.join(self.build_lib, 'mypy')
self.mkpath(path)
with open(os.path.join(path, 'version.py'), 'w') as stream:
stream.write('__version__ = "{}"\n'.format(version))
def run(self):
self.execute(self.pin_version, ())
build_py.run(self)
cmdclass = {'build_py': CustomPythonBuild}
package_data = ['py.typed']
package_data += find_package_data(os.path.join('mypy', 'typeshed'), ['*.py', '*.pyi'])
package_data += [os.path.join('mypy', 'typeshed', 'stdlib', 'VERSIONS')]
package_data += find_package_data(os.path.join('mypy', 'xml'), ['*.xsd', '*.xslt', '*.css'])
USE_MYPYC = False
# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH
if len(sys.argv) > 1 and sys.argv[1] == '--use-mypyc':
sys.argv.pop(1)
USE_MYPYC = True
if os.getenv('MYPY_USE_MYPYC', None) == '1':
USE_MYPYC = True
if USE_MYPYC:
MYPYC_BLACKLIST = tuple(os.path.join('mypy', x) for x in (
# Need to be runnable as scripts
'__main__.py',
'pyinfo.py',
os.path.join('dmypy', '__main__.py'),
# Uses __getattr__/__setattr__
'split_namespace.py',
# Lies to mypy about code reachability
'bogus_type.py',
# We don't populate __file__ properly at the top level or something?
# Also I think there would be problems with how we generate version.py.
'version.py',
# Skip these to reduce the size of the build
'stubtest.py',
'stubgenc.py',
'stubdoc.py',
'stubutil.py',
)) + (
# Don't want to grab this accidentally
os.path.join('mypyc', 'lib-rt', 'setup.py'),
)
everything = (
[os.path.join('mypy', x) for x in find_package_data('mypy', ['*.py'])] +
[os.path.join('mypyc', x) for x in find_package_data('mypyc', ['*.py'], root='mypyc')])
# Start with all the .py files
all_real_pys = [x for x in everything
if not x.startswith(os.path.join('mypy', 'typeshed') + os.sep)]
# Strip out anything in our blacklist
mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]
# Strip out any test code
mypyc_targets = [x for x in mypyc_targets
if not x.startswith((os.path.join('mypy', 'test') + os.sep,
os.path.join('mypyc', 'test') + os.sep,
os.path.join('mypyc', 'doc') + os.sep,
os.path.join('mypyc', 'test-data') + os.sep,
))]
# ... and add back in the one test module we need
mypyc_targets.append(os.path.join('mypy', 'test', 'visitors.py'))
# The targets come out of file system apis in an unspecified
# order. Sort them so that the mypyc output is deterministic.
mypyc_targets.sort()
use_other_mypyc = os.getenv('ALTERNATE_MYPYC_PATH', None)
if use_other_mypyc:
# This bit is super unfortunate: we want to use a different
# mypy/mypyc version, but we've already imported parts, so we
# remove the modules that we've imported already, which will
# let the right versions be imported by mypyc.
del sys.modules['mypy']
del sys.modules['mypy.version']
del sys.modules['mypy.git']
sys.path.insert(0, use_other_mypyc)
from mypyc.build import mypycify
opt_level = os.getenv('MYPYC_OPT_LEVEL', '3')
force_multifile = os.getenv('MYPYC_MULTI_FILE', '') == '1'
ext_modules = mypycify(
mypyc_targets + ['--config-file=mypy_bootstrap.ini'],
opt_level=opt_level,
# Use multi-file compilation mode on windows because without it
# our Appveyor builds run out of memory sometimes.
multi_file=sys.platform == 'win32' or force_multifile,
)
else:
ext_modules = []
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development',
]
setup(name='mypy',
version=version,
description=description,
long_description=long_description,
author='Jukka Lehtosalo',
author_email='[email protected]',
url='http://www.mypy-lang.org/',
license='MIT License',
py_modules=[],
ext_modules=ext_modules,
packages=find_packages(),
package_data={'mypy': package_data},
scripts=['scripts/mypyc'],
entry_points={'console_scripts': ['mypy=mypy.__main__:console_entry',
'stubgen=mypy.stubgen:main',
'stubtest=mypy.stubtest:main',
'dmypy=mypy.dmypy.client:console_entry',
]},
classifiers=classifiers,
cmdclass=cmdclass,
# When changing this, also update mypy-requirements.txt.
install_requires=["typed_ast >= 1.4.0, < 1.5.0; python_version<'3.8'",
'typing_extensions>=3.7.4',
'mypy_extensions >= 0.4.3, < 0.5.0',
'tomli>=1.1.0,<1.2.0',
],
# Same here.
extras_require={'dmypy': 'psutil >= 4.0', 'python2': 'typed_ast >= 1.4.0, < 1.5.0'},
python_requires=">=3.6",
include_package_data=True,
project_urls={
'News': 'http://mypy-lang.org/news.html',
'Documentation': 'https://mypy.readthedocs.io/en/stable/introduction.html',
'Repository': 'https://github.com/python/mypy',
},
)
|
the-stack_106_19717
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import glob
import os
import re
from typing import List
from shrike.compliant_logging.exceptions import (
PublicValueError,
print_prefixed_stack_trace_and_raise,
)
class StackTraceExtractor:
"""
A class to perform extraction of stack traces, exception types and
optionally exception messages from files that might contain other
sensitive data.
Attributes
----------
show_exception_message : bool
True to extract exception messages. False to skip them.
prefix : bool
Prefix to prepend extracted lines with. Defaults to "SystemLog".
Methods
-------
extract(path):
Extracts traces and exceptions from file to stdout.
"""
def __init__(
self,
show_exception_message: bool = False,
prefix: str = "SystemLog",
):
self.in_python_traceback = False
self.show_exception_message = show_exception_message
self.prefix = prefix
def _parse_trace_python(self, string: str):
r = re.compile(r"Traceback \(most recent call last\):")
m = r.search(string)
if m:
self.in_python_traceback = True
return None
r = re.compile(r"File (?P<file>.*), line (?P<line>\d*), in (?P<method>.*)")
m = r.search(string)
if m:
return m
r = re.compile(r"(?P<type>.*Error): (?P<message>.*)")
m = r.search(string)
if m and self.in_python_traceback:
self.in_python_traceback = False
return m
return None
@staticmethod
def _parse_trace_csharp(string: str):
r = re.compile(
r"at (?P<namespace>.*)\.(?P<class>.*)\.(?P<method>.*) in (?P<file>.*):line (?P<line>\d*)" # noqa:501
)
m = r.search(string)
if m:
return m
r = re.compile(r"Unhandled exception. (?P<type>.*): (?P<message>.*)")
m = r.search(string)
if m:
return m
return None
def _parse_file(self, file: str) -> None:
print(f"{self.prefix}: Parsing file {os.path.abspath(file)}")
with open(file, "r") as f:
for line in f:
m = StackTraceExtractor._parse_trace_csharp(line)
if m and m.groupdict().get("type"):
print(f"{self.prefix}: type: {m.groupdict()['type']}")
if self.show_exception_message:
print(f"{self.prefix}: message: {m.groupdict()['message']}")
continue
elif m and m.groupdict().get("namespace"):
print(f"{self.prefix}: namespace: {m.groupdict()['namespace']}")
print(f"{self.prefix}: class: {m.groupdict()['class']}")
print(f"{self.prefix}: method: {m.groupdict()['method']}")
print(f"{self.prefix}: file: {m.groupdict()['file']}")
print(f"{self.prefix}: line: {m.groupdict()['line']}")
print()
continue
m = self._parse_trace_python(line)
if m and m.groupdict().get("type"):
print(f"{self.prefix}: type: {m.groupdict()['type']}")
if self.show_exception_message:
print(f"{self.prefix}: message: {m.groupdict()['message']}")
print()
elif m and m.groupdict().get("file"):
print(f"{self.prefix}: file: {m.groupdict()['file']}")
print(f"{self.prefix}: line: {m.groupdict()['line']}")
print(f"{self.prefix}: method: {m.groupdict()['method']}")
def _get_files(self, path) -> List[str]:
if os.path.isfile(path):
print(f"{self.prefix}: Input is a file")
return [path]
if os.path.isdir(path):
print(f"{self.prefix}: Input is a directory")
files = glob.glob(path + "/*.err")
return files
else:
raise PublicValueError("Provided path is neither a file nor a directory")
def extract(self, path: str) -> None:
"""
Run extraction on the given resources. Extracted traces and exceptions
will be printed to stdout.
Args:
path (str): file or path. If path, extraction will be performed on
all files with '.err' extension within that directory (not recursive).
Hidden files will be ignored.
"""
try:
for file in self._get_files(path):
self._parse_file(file)
assert False
except BaseException as e:
print(f"{self.prefix}: There is a problem with the exceptionExtractor.")
print_prefixed_stack_trace_and_raise(err=e, keep_message=True)
|
the-stack_106_19718
|
"""
Run by the evaluator, tries to make a GET request to a given server
"""
import argparse
import logging
import os
import random
import socket
import sys
import time
import traceback
import urllib.request
import requests
socket.setdefaulttimeout(1)
import external_sites
import actions.utils
from plugins.plugin_client import ClientPlugin
BASEPATH = os.path.dirname(os.path.abspath(__file__))
class HTTPClient(ClientPlugin):
"""
Defines the HTTP client.
"""
name = "http"
def __init__(self, args):
"""
Initializes the HTTP client.
"""
ClientPlugin.__init__(self)
self.args = args
@staticmethod
def get_args(command):
"""
Defines required args for this plugin
"""
super_args = ClientPlugin.get_args(command)
parser = argparse.ArgumentParser(description='HTTP Client', prog="http/client.py")
parser.add_argument('--host-header', action='store', default="", help='specifies host header for HTTP request')
parser.add_argument('--injected-http-contains', action='store', default="", help='checks if injected http response contains string')
args, _ = parser.parse_known_args(command)
args = vars(args)
super_args.update(args)
return super_args
def run(self, args, logger, engine=None):
"""
Try to make a forbidden GET request to the server.
"""
fitness = 0
url = args.get("server", "")
assert url, "Cannot launch HTTP test with no server"
if not url.startswith("http://"):
url = "http://" + url
headers = {}
if args.get('host_header'):
headers["Host"] = args.get('host_header')
# If we've been given a non-standard port, append that to the URL
port = args.get("port", 80)
if port != 80:
url += ":%s" % str(port)
if args.get("bad_word"):
url += "?q=%s" % args.get("bad_word")
injected_http = args.get("injected_http_contains")
try:
res = requests.get(url, allow_redirects=False, timeout=3, headers=headers)
logger.debug(res.text)
# If we need to monitor for an injected response, check that here
if injected_http and injected_http in res.text:
fitness -= 90
else:
fitness += 100
except requests.exceptions.ConnectTimeout as exc:
logger.exception("Socket timeout.")
fitness -= 100
except (requests.exceptions.ConnectionError, ConnectionResetError) as exc:
logger.exception("Connection RST.")
fitness -= 90
except urllib.error.URLError as exc:
logger.debug(exc)
fitness += -101
# Timeouts generally mean the strategy killed the TCP stream.
# HTTPError usually mean the request was destroyed.
# Punish this more harshly than getting caught by the censor.
except (requests.exceptions.Timeout, requests.exceptions.HTTPError) as exc:
logger.debug(exc)
fitness += -120
except Exception:
logger.exception("Exception caught in HTTP test to site %s.", url)
fitness += -100
return fitness * 4
|
the-stack_106_19721
|
# Copyright 2021 The Commplax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install commplax"""
from setuptools import setup, find_packages
_dct = {}
with open('commplax/version.py') as f:
exec(f.read(), _dct)
__version__ = _dct['__version__']
setup(name='commplax',
version=__version__,
description='differentiable DSP library for optical communication',
author='Commplax team',
author_email='[email protected]',
url='https://github.com/remifan/commplax',
packages=find_packages(),
install_requires=[
'jax>=0.2.13',
'jaxlib>=0.1.66',
'flax>=0.3.4',
'seaborn',
'quantumrandom'
],
extras_require={
'dev': [
'attr',
'mock',
'pytest',
'parameterized',
'ipykernel',
'ipympl',
],
'fs': [
'zarr',
's3fs',
'fsspec'
],
'all': [
'zarr[jupyter]==2.9.5',
's3fs',
'fsspec',
'plotly',
'tqdm'
]
},
license='Apache-2.0',
)
|
the-stack_106_19722
|
# System configs
GLOBAL_HOST = '0.0.0.0'
LOCAL_HOST = '127.0.0.1'
# Slave configs
MIN_HEARTBEAT_SPAN = 0.2
DEFAULT_HEARTBEAT_SPAN = 3.0
DEFAULT_HEARTBEAT_TOLERANCE = 15.0
DEFAULT_SLAVE_PORT = 7236
# Master configs
MIN_HEARTBEAT_CHECK_SPAN = 0.1
DEFAULT_HEARTBEAT_CHECK_SPAN = 1.0
DEFAULT_MASTER_PORT = 7235
# Two-side configs
DEFAULT_CHANNEL = 0
|
the-stack_106_19723
|
# -*- coding: utf-8-*-
import random
import re
import jasperpath
import pygame
import time
#mark1
new_word = "KISS"
WORDS = ("%s" %new_word)
PRIORITY = 4
image = 'kiss.png'
size = width, height = 320, 320
red = (255,0,0)
white = (255,255,255)
black = 0, 0, 0
x=0
y=0
def handle(self, text, mic, profile):
self.blitimg(image, size, black, x, y)
time.sleep(3)
def isValid(text):
"""
Returns True if the input is related to the meaning of life.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\b%s\b' %new_word, text, re.IGNORECASE))
|
the-stack_106_19724
|
import csv
import glob
import os
import xml.etree.ElementTree as ET
import igibson
from igibson.objects.articulated_object import URDFObject
from igibson.utils.assets_utils import download_assets
download_assets()
def get_categories():
dir = os.path.join(igibson.ig_dataset_path, "objects")
return [cat for cat in os.listdir(dir) if os.path.isdir(get_category_directory(cat))]
def get_category_directory(category):
return os.path.join(igibson.ig_dataset_path, "objects", category)
def get_urdf(objdir):
return os.path.join(objdir, os.path.basename(objdir) + ".urdf")
def get_obj(objdir):
return URDFObject(get_urdf(objdir), name="obj", model_path=objdir)
def get_metadata_filename(objdir):
return os.path.join(objdir, "misc", "metadata.json")
def main():
# Collect the relevant categories.
categories = get_categories()
# Now collect the actual objects.
objects = []
for cat in categories:
cd = get_category_directory(cat)
for obj in os.listdir(cd):
objects.append((cat, obj))
scene_files = list(glob.glob(os.path.join(igibson.ig_dataset_path, "scenes", "**", "*task*.urdf"), recursive=True))
by_scene = {}
by_object = {x: [] for x in objects}
for sf in scene_files:
tree = ET.parse(sf)
sn = os.path.splitext(os.path.basename(sf))[0]
scene_objs = []
for pair in objects:
nodes = tree.findall(".//link[@category='%s'][@model='%s']" % pair)
if nodes:
scene_objs.append(pair)
by_object[pair].append(sn)
by_scene[sn] = scene_objs
print("%d objects in %s" % (len(scene_objs), sn))
with open("by_object.csv", "w") as f:
w = csv.writer(f)
w.writerows(["%s/%s" % pair] + scenes for pair, scenes in sorted(by_object.items()))
with open("by_scene.csv", "w") as f:
w = csv.writer(f)
w.writerows([scene] + ["%s/%s" % pair for pair in objects] for scene, objects in sorted(by_scene.items()))
if __name__ == "__main__":
main()
|
the-stack_106_19726
|
'''
들어간 차 목록 큐 enter
나온 차 목록 큐 leave
enter 맨 앞의 차가 이미 나왔으면
enter 맨 앞의 차 제거
enter 맨 앞의 차 = leave 맨 앞의 차면
추월 아님
아니면
추월한거임
'''
from collections import deque
import sys
input = sys.stdin.readline
# input
N = int(input())
enter = deque([input().rstrip() for _ in range(N)])
leave = deque([input().rstrip() for _ in range(N)])
# process
sol = 0
left_car_names = set()
while leave:
while enter and enter[0] in left_car_names:
enter.popleft()
if enter[0] == leave[0]:
enter.popleft()
left_car_names.add(leave.popleft())
else:
sol += 1
left_car_names.add(leave.popleft())
# output
print(sol)
|
the-stack_106_19727
|
import argparse
import torch.nn as nn
from util.misc import *
from util.graph_def import *
from models.nets import ARCHITECTURES
from data.loaders import load_data, DATASETS
from util.schedules import linear_interpolation
from util.hessian import hessian_spectral_norm_approx
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
# Experiment params
parser.add_argument('--dataset', default=None, choices=DATASETS,
help='dataset to train on')
# Architecture and init
parser.add_argument('--nn', default=None, choices=ARCHITECTURES,
help='neural network architecture')
parser.add_argument('--hidden_size', default=128, type=int,
help='number of hidden units for MLP')
parser.add_argument('--num_layers', default=2, type=int,
help='number of hidden layers for MLP/CNN')
parser.add_argument('--num_blocks', default=2, type=int,
help='number of residual blocks for ResNet')
parser.add_argument('--wrn_n', default=6, type=int,
help='N for WRN (number of blocks per stage, with num_layers=6N+4)')
parser.add_argument('--wrn_k', default=2, type=int,
help='k for WRN (widening factor)')
parser.add_argument('--wrn_reduced_memory', default=False, action='store_true',
help='Use stride=2 in WRN\'s conv1 to reduce memory footprint in very deep nets')
parser.add_argument('--init', default='orthogonal_proposed',
choices=['he', 'orthogonal',
'he_datadep', 'orthogonal_datadep',
'he_proposed', 'orthogonal_proposed'],
help='Initialization scheme.\n'
'he/orthogonal: pytorch default WN init with He/orthogonal init for weights\n'
'{he/orthogonal}_datadep: data-dependent WN init with He/orthogonal init for weights\n'
'{he/orthogonal}_proposed: proposed WN init with He/orthogonal init for weights')
parser.add_argument('--init_extra_param', default=None, choices=[None, 'hanin'],
help='extra param for WRN init; used for baselines in the 10k layer experiments mostly')
parser.add_argument('--weight_norm', default=False, action='store_true',
help='whether to use Weight Normalization')
parser.add_argument('--batch_norm', default=False, action='store_true',
help='whether to use Batch Normalization')
# Hyperparameters
parser.add_argument('--seed', default=1, type=int,
help='random seed')
parser.add_argument('--num_epochs', default=200, type=int,
help='number of optimization epochs')
parser.add_argument('--optimizer', default='sgd', choices=['sgd'],
help='optimizer type')
parser.add_argument('--batch_size', default=128, type=int,
help='batch size')
parser.add_argument('--mini_batch_size', default=None, type=int,
help='for very large models, the batch size will be split into several batches of this size')
parser.add_argument('--lr', default=0.01, type=float,
help='initial learning rate')
parser.add_argument('--lr_annealing_type', default=0, type=int, choices=list(range(MAX_LR_SCHEDULE + 1)),
help='lr annealing type')
parser.add_argument('--momentum', default=0, type=float,
help='momentum for SGD')
parser.add_argument('--weight_decay', default=0, type=float,
help='weight decay rate')
parser.add_argument('--cutout', default=False, action='store_true',
help='whether to use cutout')
parser.add_argument('--warmup_epochs', default=0, type=float,
help='duration of lr warmup period in epochs')
parser.add_argument('--warmup_lr', default=0, type=float,
help='initial lr for warmup')
# Validation set
parser.add_argument('--val_fraction', default=0.1, type=float,
help='fraction of withheld validation data')
# Logging
parser.add_argument('--save_interval', default=1, type=int,
help='number of epochs between checkpoints')
parser.add_argument('--model', default=None,
help='model name (will log to log/model_prefix/model_name)')
parser.add_argument('--model_prefix', default=None,
help='prefix for the model name (will log to log/model_prefix/model_name)')
parser.add_argument("--tb", action="store_true", default=False,
help="log into Tensorboard")
# Other
parser.add_argument('--cudnn_deterministic', default=False, action='store_true',
help='disable stochastic cuDNN operations (enable them for faster execution)')
parser.add_argument('--log_every_iter', default=False, action='store_true',
help='whether to log train loss after every SGD update')
parser.add_argument('--hessian', default=False, action='store_true',
help='whether to compute spectral norm instead of training')
args = parser.parse_args()
# Make sure that we chose a valid init scheme
if 'proposed' in args.init or 'datadep' in args.init:
assert args.weight_norm, "'{}' init will only work with Weight Normalized networks".format(args.init)
if args.mini_batch_size is None:
args.mini_batch_size = args.batch_size
assert args.batch_size % args.mini_batch_size == 0
# Set random seed
set_seed(args.seed)
if args.cudnn_deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set up directories
model_name = get_model_name(args)
save_dir = get_model_dir(model_name)
# Load training status
try:
status = load_status(save_dir)
except OSError:
status = {"num_epochs": 0, "sgd_steps": 0, "best_val_acc": -1., "test_acc": -1., "best_model_epoch": -1}
# Save config and status files
save_config(args, save_dir)
save_status(status, save_dir)
# Set up loggers
logger = get_loggers(save_dir)
logger.info("\nLogging to %s\n" % os.path.abspath(save_dir))
# Create data loaders
trainloader, validloader, testloader, num_classes = load_data(args)
input_size = get_input_size(args)
# Create model
sample_batch, _ = iter(trainloader).__next__()
model = create_model(args, input_size, num_classes, sample_batch=sample_batch)
logger.info(model)
# Create optimizer
optimizer = create_optimizer(model, args)
try:
load_model(model, save_dir, 'model')
load_model(optimizer, save_dir, 'optimizer')
loaded_model = True
except:
loaded_model = False
if torch.cuda.is_available():
model.cuda()
# Ugly fix to a bug in PyTorch with optimizer loading when CUDA is available
# https://github.com/pytorch/pytorch/issues/2830
if loaded_model:
optimizer = create_optimizer(model, args)
load_model(optimizer, save_dir, 'optimizer')
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
# CSV logger
csv_file, csv_writer = get_csv_writer(save_dir)
csv_header = ["epoch", "train_loss", "val_loss", "train_acc", "val_acc", "lr"]
if not loaded_model: # avoid writing a header in the middle of the file
csv_writer.writerow(csv_header)
csv_file.flush()
# Tensorboard
if args.tb:
from tensorboardX import SummaryWriter
tb_writer = SummaryWriter(save_dir)
num_epochs = status["num_epochs"]
sgd_steps = status["sgd_steps"]
best_val_acc = status["best_val_acc"]
logger.info("Parameter count: %s" % pretty_number(sum(p.numel() for p in model.parameters() if p.requires_grad)))
if loaded_model:
logger.info("Model loaded successfully\n")
else:
logger.info("Training from scratch\n")
# Train
try:
batches_per_update = args.batch_size // args.mini_batch_size
lr_scheduler = create_lr_schedule(args)
criterion = nn.CrossEntropyLoss()
if args.hessian:
spectral_norm = hessian_spectral_norm_approx(model, trainloader, criterion, M=40, seed=args.seed, logger=logger)
exit(0)
while num_epochs < args.num_epochs:
# Update learning rate
lr = lr_scheduler.value(num_epochs)
for param_group in optimizer.param_groups:
param_group['lr'] = lr * model.get_lr_multiplier(param_group)
if num_epochs < args.warmup_epochs:
num_batches = len(trainloader)
offset = int(args.warmup_lr == 0)
alphas = [(num_epochs + i / num_batches) / args.warmup_epochs for i in range(offset, num_batches+offset)]
lr_list = [linear_interpolation(args.warmup_lr, lr, alpha ** 2) for alpha in alphas]
else:
lr_list = None
train_loss, train_acc = train_epoch(model, optimizer, trainloader, criterion,
batches_per_update=batches_per_update,
log_fn=logger.info if args.log_every_iter else None,
lr_list=lr_list)
val_loss, val_acc = evaluate_model(model, validloader, criterion)
# Log to file and stdout
logger.info('[Epoch %d] train_loss = %.5f val_loss = %.5f train_acc = %.5f val_acc = %.5f' %
(num_epochs, train_loss, val_loss, train_acc, val_acc))
# Log to CSV file
csv_data = [num_epochs, train_loss, val_loss, train_acc, val_acc, lr]
csv_writer.writerow(csv_data)
csv_file.flush()
# Log to TensorBoard
if args.tb:
for field, value in zip(csv_header, csv_data):
tb_writer.add_scalar(field, value, csv_data[0]) # csv_data[0] = num_epochs
# Save best model
if val_acc > best_val_acc:
best_val_acc = val_acc
status["best_model_epoch"] = num_epochs
save_model(model, save_dir, 'model_best')
# Save last model
save_model(model, save_dir, 'model')
save_model(model, save_dir, 'optimizer')
# Update and save status
num_epochs += 1
sgd_steps += len(trainloader)
status["num_epochs"] = num_epochs
status["sgd_steps"] = sgd_steps
status["best_val_acc"] = best_val_acc
save_status(status, save_dir)
logger.info("Finished training!")
except KeyboardInterrupt:
logger.info("\nCTRL+C received. Stopping training...")
except Exception as e:
logger.info("Something went wrong:")
logger.info(e)
logger.info("Stopping training...")
finally:
if model_exists(save_dir, 'model_best'):
if testloader is not None:
logger.info("Evaluating best model (epoch %d)..." % status["best_model_epoch"])
load_model(model, save_dir, 'model_best')
_, test_acc = evaluate_model(model, testloader)
status["test_acc"] = test_acc
logger.info("Test accuracy: %.5f" % test_acc)
save_status(status, save_dir)
else:
logger.info('No test data was provided. Skipping evaluation...')
status["test_acc"] = status["best_val_acc"]
save_status(status, save_dir)
logger.info("Validation accuracy: %.5f" % status["best_val_acc"])
else:
logger.info('No checkpoints found. Skipping evaluation...')
|
the-stack_106_19728
|
# -*- coding: utf-8 -*-
import numpy as np
import torch
from torch import nn
from kbcr.models import ComplEx, Multi
from kbcr.models.reasoning import SimpleHoppy
from kbcr.reformulators import LinearReformulator, AttentiveReformulator
import pytest
@pytest.mark.light
def test_multi():
nb_entities = 10
nb_predicates = 5
embedding_size = 10
init_size = 1.0
rs = np.random.RandomState(0)
for _ in range(16):
for nb_hops in range(1, 6):
for use_attention in [True, False]:
for pt in {'max', 'min', 'sum', 'mixture'}:
with torch.no_grad():
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
entity_embeddings.weight.data *= init_size
predicate_embeddings.weight.data *= init_size
base = ComplEx(entity_embeddings)
models = []
for i in range(nb_hops):
if use_attention:
reformulator = AttentiveReformulator(i, predicate_embeddings)
else:
reformulator = LinearReformulator(i, embedding_size * 2)
h_model = SimpleHoppy(base, entity_embeddings, hops=reformulator)
models += [h_model]
model = Multi(models=models, pooling_type=pt, embedding_size=embedding_size * 2)
xs = torch.from_numpy(rs.randint(nb_entities, size=32))
xp = torch.from_numpy(rs.randint(nb_predicates, size=32))
xo = torch.from_numpy(rs.randint(nb_entities, size=32))
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
scores = model.forward(xp_emb, xs_emb, xo_emb)
inf = model.score(xp_emb, xs_emb, xo_emb)
scores_sp, scores_po = scores
inf = inf.cpu().numpy()
scores_sp = scores_sp.cpu().numpy()
scores_po = scores_po.cpu().numpy()
for i in range(xs.shape[0]):
np.testing.assert_allclose(inf[i], scores_sp[i, xo[i]], rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(inf[i], scores_po[i, xs[i]], rtol=1e-5, atol=1e-5)
if __name__ == '__main__':
pytest.main([__file__])
|
the-stack_106_19730
|
#!/usr/bin/env python
"""
SETUP.py - Setup utility for TAMOC: Texas A&M Oilspill Calculator
This script manages the installation of the TAMOC package into a standard
Python distribution.
For more information on TAMOC, see README.txt, LICENSE.txt, and CHANGES.txt.
Notes
-----
To install, use:
> python setup.py build
> python setup.py install
To uninstall, use:
> pip uninstall TAMOC
To create a source distribution, use:
> python setup.py sdist --formats=gztar,zip
Author
------
S. Socolofsky, January 2012, Texas A&M University <[email protected]>.
"""
import os
import setuptools
from numpy.distutils.core import Extension
# Describe some attributes of the software
classifiers = """\
Development Status :: beta
Environment :: Console
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: MIT
Operating System :: OS Independent
Programming Language :: Python
Topic :: Scientific/Engineering
Topic :: Software Development :: Libraries :: Python Modules
"""
# Define the sample programs to include
bin_files = ['./bin/dbm/air_eos.py',
'./bin/dbm/co2_eos.py',
'./bin/dbm/dead_oil.py',
'./bin/dbm/droplet_rise.py',
'./bin/dbm/equilibrium.py',
'./bin/dbm/gas_bubbles.py',
'./bin/dbm/hydrocarbon_drops.py',
'./bin/ambient/profile_extending.py',
'./bin/ambient/profile_append.py',
'./bin/ambient/profile_from_ctd.py',
'./bin/ambient/profile_from_lab.py',
'./bin/ambient/profile_from_roms.py',
'./bin/ambient/profile_from_txt.py',
'./bin/sbm/bubble.py',
'./bin/sbm/drop_biodeg.py',
'./bin/sbm/drop.py',
'./bin/sbm/sbm_file_io.py',
'./bin/sbm/particle.py',
'./bin/sbm/seep_bubble.py',
'./bin/spm/spm_blowout_sim.py',
'./bin/spm/lake_bub.py',
'./bin/spm/lake_part.py',
'./bin/spm/spm_file_io.py',
'./bin/sintef/particle_size_distribution.py',
'./bin/psm/blowout_jet.py',
'./bin/psm/oil_jet.py',
'./bin/params/scales.py',
'./bin/bpm/bpm_blowout_sim.py',
'./bin/bpm/crossflow_plume.py',
'./bin/bpm/blowout_obj.py']
# Define the external Fortran sources
ext_dbm_f = Extension(name = 'dbm_f',
sources = ['tamoc/src/dbm_eos.f95',
'tamoc/src/dbm_phys.f95',
'tamoc/src/math_funcs.f95'])
def get_version(pkg_name):
"""
Reads the version string from the package __init__ and returns it
"""
with open(os.path.join(pkg_name, "__init__.py")) as init_file:
for line in init_file:
parts = line.strip().partition("=")
if parts[0].strip() == "__version__":
return parts[2].strip().strip("'").strip('"')
return None
# Provide the setup utility
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(
name='TAMOC',
version=get_version("tamoc"),
description='Texas A&M Oilspill Calculator',
long_description=open('README.rst').read(),
license='LICENSE.txt',
author='Scott A. Socolofsky',
author_email='[email protected]',
url="https://ceprofs.civil.tamu.edu/ssocolofsky/",
scripts=bin_files,
packages=['tamoc'],
package_data={'tamoc': ['data/*.csv', 'data/*.cnv', 'data/*.dat']},
platforms=['any'],
ext_package='tamoc',
ext_modules=[ext_dbm_f],
classifiers=filter(None, classifiers.split("\n")),
)
|
the-stack_106_19731
|
from collections import defaultdict
from insights.core import filters
from insights.parsers.ps import PsAux, PsAuxcww
from insights.specs import Specs
from insights.specs.default import DefaultSpecs
import pytest
def setup_function(func):
if func is test_get_filter:
filters.add_filter(Specs.ps_aux, "COMMAND")
if func is test_get_filter_registry_point:
filters.add_filter(Specs.ps_aux, "COMMAND")
filters.add_filter(DefaultSpecs.ps_aux, "MEM")
if func is test_filter_dumps_loads:
filters.add_filter(Specs.ps_aux, "COMMAND")
def teardown_function(func):
if func is test_get_filter:
del filters.FILTERS[Specs.ps_aux]
if func is test_get_filter_registry_point:
del filters.FILTERS[Specs.ps_aux]
del filters.FILTERS[DefaultSpecs.ps_aux]
if func is test_filter_dumps_loads:
del filters.FILTERS[Specs.ps_aux]
if func is test_add_filter_to_parser:
del filters.FILTERS[Specs.ps_aux]
if func is test_add_filter_to_parser_patterns_list:
del filters.FILTERS[Specs.ps_aux]
def test_filter_dumps_loads():
r = filters.dumps()
assert r is not None
filters.FILTERS = defaultdict(set)
filters.loads(r)
assert Specs.ps_aux in filters.FILTERS
assert filters.FILTERS[Specs.ps_aux] == set(["COMMAND"])
def test_get_filter():
f = filters.get_filters(Specs.ps_aux)
assert "COMMAND" in f
f = filters.get_filters(DefaultSpecs.ps_aux)
assert "COMMAND" in f
def test_get_filter_registry_point():
s = set(["COMMAND", "MEM"])
f = filters.get_filters(DefaultSpecs.ps_aux)
assert f & s == s
f = filters.get_filters(Specs.ps_aux)
assert "COMMAND" in f
assert "MEM" not in f
def test_add_filter_to_parser():
filter_string = "bash"
filters.add_filter(PsAux, filter_string)
spec_filters = filters.get_filters(Specs.ps_aux)
assert filter_string in spec_filters
parser_filters = filters.get_filters(PsAux)
assert not parser_filters
def test_add_filter_to_parser_patterns_list():
filters_list = ["bash", "systemd", "Network"]
filters.add_filter(PsAux, filters_list)
spec_filters = filters.get_filters(Specs.ps_aux)
assert all(f in spec_filters for f in filters_list)
parser_filters = filters.get_filters(PsAux)
assert not parser_filters
def test_add_filter_to_parser_non_filterable():
filter_string = "bash"
filters.add_filter(PsAuxcww, filter_string)
spec_filters = filters.get_filters(Specs.ps_auxcww)
assert not spec_filters
parser_filters = filters.get_filters(PsAuxcww)
assert not parser_filters
def test_add_filter_exception_not_filterable():
with pytest.raises(Exception):
filters.add_filter(Specs.ps_auxcww, "bash")
def test_add_filter_exception_raw():
with pytest.raises(Exception):
filters.add_filter(Specs.metadata_json, "[]")
def test_add_filter_exception_empty():
with pytest.raises(Exception):
filters.add_filter(Specs.ps_aux, "")
|
the-stack_106_19733
|
import os
import mock
import jukebox.scanner
@mock.patch('mutagen.File')
@mock.patch('os.walk')
def test_dir_scanner_scan(walk, File):
walk.return_value = [
('base', [], ['file_name']),
]
File.return_value = {
'title': ['fun1', 'fun2'],
'album': [],
'artist': ['bob'],
}
storage = mock.Mock(name='storage')
scanner = jukebox.scanner.DirScanner(storage, 'path')
scanner.scan()
walk.assert_called_with('path')
File.assert_called_with(os.path.join('base', 'file_name'), easy=True)
song = storage.add_song.call_args[0][0]
assert 'fun1' == song.title
assert None == song.album
assert 'bob' == song.artist
|
the-stack_106_19734
|
# ----------------------------------------------------------------------
# Alcatel.OS62xx.get_vlans
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetvlans import IGetVlans
from noc.core.text import parse_table
class Script(BaseScript):
name = "Alcatel.OS62xx.get_vlans"
interface = IGetVlans
def execute(self):
vlans = self.cli("show vlan")
r = []
for v in parse_table(vlans, allow_wrap=True):
r += [{"vlan_id": int(v[0]), "name": v[1]}]
return r
|
the-stack_106_19735
|
# Python file with all essential functions used during CiliateAnnotation program execution
import subprocess
import sys
import datetime
import time
import os.path
import errno
import math
import regex as re
from datetime import datetime
from settings import *
from functools import reduce
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Define log comment function for the ease of use
def logComment(comment):
logComment.logFile.write(datetime.now().strftime("%I:%M%p %B %d %Y") + ' - ' + comment + '\n')
logComment.logFile.flush()
# Log file
logComment.logFile = None
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Define create directory function
def safeCreateDirectory(dir):
try:
os.makedirs(dir)
logComment('Directory ' + dir + ' created')
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function creates all necessary output folders and files
def createOutputDirectories(Output_dir):
# Create BLAST database directory
safeCreateDirectory(Output_dir + '/blast')
# Create hsp, hsp/rough, hsp/fine directtories
safeCreateDirectory(Output_dir + '/hsp_mac')
safeCreateDirectory(Output_dir + '/hsp_mac/rough')
safeCreateDirectory(Output_dir + '/hsp_mac/fine')
# Create output directory for MAC mds
safeCreateDirectory(Output_dir + '/Annotated_MDS')
# Create output directory for MIC annotation
safeCreateDirectory(Output_dir + '/MIC_Annotation')
# Create output directory for masked contig seqeunces
safeCreateDirectory(Output_dir + '/Masked_Contigs')
# Create database input directory and database load files
if Options['DatabaseUpdate']:
safeCreateDirectory(Output_dir + '/Database_Input')
temp = open(Output_dir + '/Database_Input/hsp.tsv', 'w')
temp.close()
temp = open(Output_dir + '/Database_Input/mds.tsv', 'w')
temp.close()
temp = open(Output_dir + '/Database_Input/tel.tsv', 'w')
temp.close()
temp = open(Output_dir + '/Database_Input/arr.tsv', 'w')
temp.close()
# Create output gff3 directory and files
safeCreateDirectory(Output_dir + '/GFF')
gffFile = open(Output_dir + "/GFF/mac_annotation.gff3", "w")
gffFile.write("##gff-version 3\n")
gffFile.close()
gffFile = open(Output_dir + "/GFF/mic_annotation.gff3", "w")
gffFile.write("##gff-version 3\n")
gffFile.close()
# Create output directory and files for MIC scrambling patterns
safeCreateDirectory(Output_dir + "/Scrambling")
temp = open(Output_dir + "/Scrambling/all.tsv", "w")
temp.close()
temp = open(Output_dir + "/Scrambling/good_maps.tsv", "w")
temp.close()
# Create Output directory for MIC annotation
safeCreateDirectory(Output_dir + "/MDS_IES_MIC")
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function runs rough BLAST and returns the hsp result list
def run_Rough_BLAST(Output_dir, contig):
# Set up parameters
param = ("blastn -task " + Options['RoughBlastTask'] + " -word_size " + str(Options['RoughBlastWordSize']) + " -max_hsps 10000 -max_target_seqs 10000 -dust").split(" ")
if Options['RoughBlastDust']:
param.append("yes")
else:
param.append("no")
if Options['RoughBlastUngapped']:
param.append("-ungapped")
if Options['BlastMaskLowercase']:
param.append("-lcase_masking")
param.append("-query")
param.append(Output_dir + "/Masked_Contigs/" + str(contig) + ".fa")
param.append("-db")
param.append(Output_dir + "/blast/mic")
param.append("-num_threads")
param.append(str(Options['ThreadCount']))
param.append("-outfmt")
param.append("10 qseqid sseqid pident length mismatch qstart qend sstart send evalue bitscore qcovs")
# Run BLAST command
rough_out = subprocess.check_output(param)
# Filter empty rows
roughVal = [x.rstrip() for x in rough_out.decode(sys.stdout.encoding).split('\n') if x != "" and float(x.rstrip().split(",")[11]) >= Options['MIC_Coverage_Threshold']]
# Check if there are any hsps
if not roughVal:
return ""
# Save rough results to file
rough_file = open(Output_dir + '/hsp_mac/rough/' + contig + '.csv', 'w')
for x in roughVal[:-1]:
rough_file.write(x + '\n')
rough_file.write(roughVal[-1])
rough_file.close()
# Filter duplicates out and parse result
res = [x.split(',') for x in list(set(roughVal))]
return res
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function runs fine BLAST and returns hsp result list
def run_Fine_BLAST(Output_dir, contig):
param = ("blastn -task " + Options['FineBlastTask'] + " -word_size " + str(Options['FineBlastWordSize']) + " -max_hsps 10000 -max_target_seqs 10000 -dust").split(" ")
if Options['FineBlastDust']:
param.append("yes")
else:
param.append("no")
if Options['FineBlastUngapped']:
param.append("-ungapped")
if Options['BlastMaskLowercase']:
param.append("-lcase_masking")
param.append("-query")
param.append(Output_dir + "/Masked_Contigs/" + str(contig) + ".fa")
param.append("-db")
param.append(Output_dir + "/blast/mic")
param.append("-num_threads")
param.append(str(Options['ThreadCount']))
param.append("-outfmt")
param.append("10 qseqid sseqid pident length mismatch qstart qend sstart send evalue bitscore qcovs")
fine_out = subprocess.check_output(param)
# Filter empty rows
fineVal = [x.rstrip() for x in fine_out.decode(sys.stdout.encoding).split('\n') if x != "" and float(x.rstrip().split(",")[11]) >= Options['MIC_Coverage_Threshold']]
# Check if there are any hsps
if not fineVal:
return ""
# Save fine results to file
fine_file = open(Output_dir + '/hsp_mac/fine/' + str(contig) + '.csv', 'w')
for x in fineVal[:-1]:
fine_file.write(x + "\n")
fine_file.write(fineVal[-1])
fine_file.close()
# Filter duplicates out and parse result
res = [x.split(',') for x in list(set(fineVal))]
return res
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function goes through the list of hight scoring pairs and updates corresponding MIC hsp for later use
def update_MIC_hsp(MIC_maps, Output_dir):
# Sort hsp according to MIC name
MIC_maps.sort(key=lambda x: x[1])
# Get the first MIC and open corresponding file
current = MIC_maps[0][1]
out_file = open(Output_dir + "/hsp_mic/" + current + ".csv", "a")
# For each hsp, store it into the file
for hsp in MIC_maps:
# if we have a different mic, open a new file
if current != hsp[1]:
current = hsp[1]
out_file.close()
out_file = open(Output_dir + "/hsp_mic/" + current + ".csv", "a")
# Flip coordinates, to indicate inversion
start = hsp[7] if int(hsp[7]) < int(hsp[8]) else hsp[8]
end = hsp[8] if start == hsp[7] else hsp[7]
# Store hsp value with query and search values flipped
out_file.write(hsp[1] + "," + hsp[0] + "," + hsp[2] + "," + hsp[3] + "," + hsp[4] + "," + start + "," + end + "," + hsp[5] + "," + hsp[6] + "," + hsp[9] + "," + hsp[10] + "," + hsp[11] + "\n")
out_file.close()
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function reads hsp file that was generated by run_Rough_BLAST or run_Fine_BLAST functions previously
def readBLAST_file(Filename):
# Initialize list to read input
res = list()
# Read file line by line
for line in open(Filename, "r"):
if line == "":
continue
# Parse and append line into the res list
parsed = line.rstrip().split(",")
if float(parsed[11]) >= Options['MIC_Coverage_Threshold']:
res.append(parsed)
# Return list if it not empty and empty string otherwise
if res:
return res
else:
return ""
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function goes through the list of high scoring pairs that are associated with the MIC and constructs MDSs for the MAC
def getMDS_Annotation(MDS_List, HSP_List, MAC_start, MAC_end):
# Get Gaps List
Gaps = getGapsList(MDS_List, MAC_start, MAC_end)
# Build list of MDSs
for hsp in HSP_List:
mds_toAdd = [int(hsp[5]), int(hsp[6]),0]
# If current MDS does not overlap with any gap, then skip it
if not [x for x in Gaps if x[1] > mds_toAdd[0] and x[0] < mds_toAdd[1]]:
continue
# Go through MDSs that overlap with current MDS and see if any can be merged or removed
for x in sorted([mds for mds in MDS_List if mds[1] > mds_toAdd[0] and mds[0] < mds_toAdd[1]]):
# If x is a subset of mds_toAdd, then remove x
if x[0] >= mds_toAdd[0] and x[1] <= mds_toAdd[1]:
MDS_List.remove(x)
# Check if two MDSs can be merged
elif int((mds_toAdd[0] + mds_toAdd[1])/2) in range(x[0], x[1]) or int((x[0] + x[1])/2) in range(mds_toAdd[0], mds_toAdd[1]):
mds_toAdd[0] = min(mds_toAdd[0], x[0])
mds_toAdd[1] = max(mds_toAdd[1], x[1])
MDS_List.remove(x)
# Add MDS to the MDS list, update gaps list, check if we are done
MDS_List.append(mds_toAdd)
Gaps = getGapsList(MDS_List, MAC_start, MAC_end)
if not Gaps:
break
# Sort the MDS List
MDS_List.sort(key=lambda x: x[0])
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function takes a list of MDSs (sorted by the MDS begining coordinate) and returns the intervals of the MAC covering
def getCovering_Intervals(MDS_List):
# Construct intervals by using MDS List and checking for gaps between consecutive MDSs
MAC_Interval = list()
for mds in sorted(MDS_List, key = lambda x: x[0]):
if not MAC_Interval:
MAC_Interval.append([mds[0], mds[1]])
else:
if MAC_Interval[-1][1] >= mds[0] - 1:
MAC_Interval[-1][1] = max(mds[1], MAC_Interval[-1][1])
else:
MAC_Interval.append([mds[0], mds[1]])
return MAC_Interval
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function checks whether there are any gaps in the MAC annotation
def addGaps(MDS_List, MAC_start, MAC_end):
# If MDS List is empty, then return the whole MAC interval as a gap
if not MDS_List:
MDS_List.append([MAC_start, MAC_end, 1])
return
# Get the list of covered MAC Interval(s)
MAC_Interval = getCovering_Intervals(MDS_List)
# If we have more than one interval, then there are gaps we need to add to the annotation
if len(MAC_Interval) > 1:
prev = MAC_Interval[0]
for interv in MAC_Interval[1:]:
MDS_List.append([prev[1]+1, interv[0]-1,1])
prev = interv
# Check for gaps at the begining of MAC and at the end of MAC
if MAC_Interval[0][0] - MAC_start > 0:
MDS_List.append([MAC_start, MAC_Interval[0][0] - 1, 1])
if MAC_end - MAC_Interval[-1][1] > 0:
MDS_List.append([MAC_Interval[-1][1] + 1, MAC_end, 1])
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function assigns MDS number to hsps that correspond to some MDS in MAC
def mapHSP_to_MDS(MIC_maps, MDS_List):
# Go through the list of hsps and assign MDS number, or -1 to each hsp
for hsp in MIC_maps:
# Set hsp to no MDS for now
hsp.append(-1)
# Get list of MDSs that were mapped from current hsp
overlap = [x for x in MDS_List if (x[2] != 1) and (int(hsp[5]) < x[1] and int(hsp[6]) > x[0])]
if not overlap:
continue
# Define reduce function to decide what MDS the hsp is going to match the best (has biggest overlap)
match = lambda a, b: a if min(a[1], int(hsp[6])) - max(a[0], int(hsp[5])) > min(b[1], int(hsp[6])) - max(b[0], int(hsp[5])) else b
matched_MDS = reduce(match, overlap)
# Assign hsp to MDS
hsp[-1] = matched_MDS[-1]
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function calculates the list of gaps in the MAC annotation
def getGapsList(MDS_List, MAC_start, MAC_end):
# If no MDS, return the whole contig interval
if not MDS_List:
return [[MAC_start, MAC_end]]
# Sort MDS list
MDS_List.sort(key=lambda x: x[0])
# Add gap at the begining, if needed
Gaps = list()
if MDS_List[0][0] - MAC_start > 0:
Gaps.append([MAC_start, MDS_List[0][0]-1])
# If there are more than one MDS, then add gaps in between MDSs
if len(MDS_List) > 1:
prev = MDS_List[0]
for x in MDS_List[1:]:
if x[0] - prev[1] > 0:
Gaps.append([prev[1], x[0]])
prev = x
# Check if there is a gap at the end
if MAC_end - MDS_List[-1][1] > 0:
Gaps.append([MDS_List[-1][1]+1, MAC_end])
return Gaps
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function outputs annotation results into the database load file
def updateDatabaseInput(MDS_List, MIC_maps, MIC_to_HSP, left_Tel, right_Tel, mac_length, Output_dir, contig):
# If MIC_maps are not empty, then update hsp file
if MIC_maps:
# Open hsp file to append
hspFile = open(Output_dir + '/Database_Input/hsp.tsv', 'a')
# Output hsps to file
for hsp in MIC_maps:
# Get MIC start, end, and orientation, and telomeres
micStart = hsp[7]
micEnd = hsp[8]
micOrient = "+"
if int(hsp[7]) > int(hsp[8]):
micStart = hsp[8]
micEnd = hsp[7]
micOrient = "-"
tels = 1 if left_Tel != None and right_Tel != None else 0
# Print hsp
hspFile.write("\\N\t\\N\t" + hsp[0] + "\t\\N\t" + str(tels) + "\t" + str(hsp[-1]) + "\t" + hsp[5] + "\t" + hsp[6] + "\t\\N\t" + hsp[1] +
"\t\\N\t" + micStart + "\t" + micEnd + "\t" + micOrient + "\t" + hsp[3] + "\t" + hsp[2] + "\t" + hsp[4] + "\t" + hsp[9] + "\t" + hsp[10] +
"\t" + hsp[11] + "\n")
# close hsp file
hspFile.close()
# If MDS list is not empty, then update mds file
if MDS_List:
# Open mds file to append
mdsFile = open(Output_dir + '/Database_Input/mds.tsv', 'a')
# Output mdss to file
for mds in MDS_List:
#Print mds
mdsFile.write("\\N\t\\N\t" + contig + "\t" + str(mds[-1]) + "\t" + str(mds[0]) + "\t" +
str(mds[1]) + "\t" + str(mds[1] - mds[0] + 1) + "\t" + str(mds[2]) + "\n")
# Close mds file
mdsFile.close()
# Update telomeres file
telFile = open(Output_dir + '/Database_Input/tel.tsv', 'a')
# Get number of telomeres
tel_num = 0
if left_Tel:
tel_num += 1
if right_Tel:
tel_num += 1
# Output to file
telFile.write("\\N\t\\N\t\\N\t" + contig + "\t" + str(mac_length) + "\t" + str(tel_num) + "\t")
# Info about left telomere
if left_Tel:
telFile.write(str(left_Tel[0]) + "\t" + str(left_Tel[1]) + "\t" + str(left_Tel[1] - left_Tel[0] + 1) + "\t")
else:
telFile.write("\\N\t\\N\t0\t")
# infor about right telomere
if right_Tel:
telFile.write(str(right_Tel[0]) + "\t" + str(right_Tel[1]) + "\t" + str(right_Tel[1] - right_Tel[0] + 1) + "\n")
else:
telFile.write("\\N\t\\N\t0\n")
telFile.close()
# Update arrange table file
arrFile = open(Output_dir + '/Database_Input/arr.tsv', 'a')
# For each mic to mac map, output database entry
for mic in MIC_to_HSP:
# Get hsp list and declare variables
hsp_list = sorted(MIC_to_HSP[mic], key=lambda x: int(x[7]) if int(x[7]) < int(x[8]) else int(x[8]))
Arrangement = []
nuc_shared = 0
mismatch = 0
dist_mds = set()
# Iterate through hsp_list and get all needed information
for hsp in hsp_list:
Arrangement.append(-hsp[-1] if int(hsp[7]) > int(hsp[8]) else hsp[-1])
nuc_shared += (int(hsp[3]) - int(hsp[4]))
mismatch += int(hsp[4])
dist_mds.add(hsp[-1])
""""# Put Arrangement into the least inversion form
Arrangement_I = []
for i in range(len(Arrangement) - 1, -1, -1):
m = Arrangement[i]
Arrangement_I.append(-1 * m)
if getNumber_Inv_MDS(Arrangement_I) < getNumber_Inv_MDS(Arrangement):
Arrangement = Arrangement_I
#Arrangement = toCanonicalForm(Arrangement, len(MDS_List))
"""
# Build arrangement string
arrangement = ""
for m in Arrangement[:-1]:
if m > 0:
arrangement += str(m) + ":0|"
else:
arrangement += str(-m) + ":1|"
if Arrangement[-1] > 0:
arrangement += str(Arrangement[-1]) + ":0"
else:
arrangement += str(-Arrangement[-1]) + ":1"
# Output arrangement table entry
arrFile.write("\\N\t\\N\t" + contig + "\t\\N\t" + str(mac_length) + "\t" + hsp_list[0][11] + "\t\\N\t" + mic + "\t\\N\t\\N\t\\N\t" + str(nuc_shared) + "\t" +
str(len(dist_mds)) + "\t" + str(mismatch) + "\t" + ("1" if len(MDS_List) == len(dist_mds) else "0") + "\t" +
("1" if is_Scrambled(hsp_list, len(MDS_List), len(MDS_List)==len(dist_mds)) else "0") + "\t" + arrangement + "\n")
arrFile.close()
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function sorts hsp list
def sortHSP_List(MIC_maps):
# Define sort function that will sort by:
# 1) Higher Coverage, 2) Higher Length, 3) Higher Persent identity match, 4) Lower Bitscore, 5) MIC, 6) the hsp start position in the MAC
sort_func = key=lambda x: (float(x[11]), int(x[3]), float(x[2]), -float(x[10]), x[1], -int(x[5]))
# Run sort
MIC_maps.sort(key = sort_func, reverse=True)
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function runs telomeric regular expression, identifies telomere (if any) and stores telomeric sequences in the list
# Note: side = 5 is for 5' telomere and side = 3 is for 3' telomere
def identifyTelomere(reg_exp, seq, tel_seq, side):
# Telomere to return
tel_toReturn = None
# Get the list of telomeric sequences
telomeres = reg_exp.finditer(seq)
tel_positions = sorted([(m.span()[0] + 1, m.span()[1] + 1) for m in telomeres])
# If this is a 5' telomeres
if side == 5:
# Go through each telomeric seq and build a telomere
ind = 0
for coord in tel_positions:
if coord[1] - coord[0] >= Options['TelomereLength'] and coord[0] <= Options['TelomereEndLimit']:
tel_toReturn = [coord[0], coord[1]]
ind += 1
break
if coord[0] > Options['TelomereEndLimit']:
break
ind += 1
# check if left (5') telomeres can be extended
if tel_toReturn and tel_positions:
for tel in tel_positions[ind:]:
# If two telomeric sequences are within tolerance error, merge them
if tel[0] - tel_toReturn[1] > Options["TelomericErrorTolerance"]:
break
tel_toReturn[1] = tel[1]
# If this is a 3' telomeres
elif side == 3:
# Go through each telomeric seq and build a telomere
ind = 0
for coord in reversed(tel_positions):
if coord[1] - coord[0] >= Options['TelomereLength'] and len(seq) - coord[1] <= Options['TelomereEndLimit']:
tel_toReturn = [coord[0], coord[1]]
ind += 1
break
if len(seq) - coord[1] > Options['TelomereEndLimit']:
break
ind += 1
# check if right (3') telomeres can be extended
if tel_toReturn and tel_positions:
for i in range(len(tel_positions) - ind - 1, -1, -1):
tel = tel_positions[i]
# If two telomeric sequences are within tolerance error, merge them
if tel_toReturn[0] - tel[1] > Options["TelomericErrorTolerance"]:
break
tel_toReturn[0] = tel[0]
# We have an error
else:
print("Error in identifyTelomere function, side = ", side, " while allowed values are 3 and 5")
sys.exit()
# Remove too short, non-telomeric sequences
if tel_toReturn:
tel_positions = [x for x in tel_positions if (x[1] - x[0] + 1) >= Options['TelomereLength'] or (tel_toReturn[0] <= x[0] and tel_toReturn[1] >= x[1])]
else:
tel_positions = [x for x in tel_positions if (x[1] - x[0] + 1) >= Options['TelomereLength']]
# Append telomeric positions to tel_seq list
tel_seq += tel_positions
return tel_toReturn
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function updates gff files, option distinguishes between 1 - mac and 2 - mic annotation files
def updateGFF(contig, MDS_List, Output_dir, option):
# Output gff annotation for MAC
if option == 1:
# Open gff file
gff = open(Output_dir + "/GFF/mac_annotation.gff3", "a")
# Output every mds
for mds in MDS_List:
gff.write(contig + "\tMI-ASS\tmds\t" + str(mds[0]) + "\t" + str(mds[1]) + "\t" + ".\t.\t.\tName=mds_" + str(mds[-1]) + ";Target=" + contig + "\n")
gff.close()
# Output gff annotation for MIC
else:
# Open gff file
gff = open(Output_dir + "/GFF/mic_annotation.gff3", "a")
# Output every mds
for mds in MDS_List:
gff.write(contig + "\tMI-ASS\t" + ("mds" if mds[2] == 0 else "ies") + "\t" + str(mds[0]) + "\t" + str(mds[1]) + "\t" + ".\t.\t.\tName=" + ("mds_" if mds[2] == 0 else "ies_") + str(mds[-1]) + ";Target=" + contig + "\n")
gff.close()
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function identifies best MIC contigs from which MAC was mapped and identifies scrambling
def identify_MIC_patterns(MIC_maps, MDS_List, MIC_to_HSP, Output_dir):
if not MIC_maps:
return
# Variables for updating statistics on scrambled and complete
stat_Scrambled = False
stat_Complete = False
stat_CompletScrambled = False
# map MIC contigs to number of distinct mdss it has
cont_to_mds = {}
for mic in MIC_to_HSP:
mdsNUM = len(set([x[-1] for x in MIC_to_HSP[mic]]))
cont_to_mds[mic] = mdsNUM
MICs = list(MIC_to_HSP.keys())
# Sort by:
# 1) The biggest number of distinct MDSs MIC has and 2) The highest MIC coverage
MICs.sort(key=lambda x: (cont_to_mds[x], float(MIC_to_HSP[x][0][11])), reverse=True)
# Output all arrangements
out = open(Output_dir + "/Scrambling/all.tsv", "a")
for mic in MICs:
# Also, build MIC arrangement
Arrangement = []
next = sorted(MIC_to_HSP[mic], key=lambda x: int(x[7]) if int(x[7]) < int(x[8]) else int(x[8]))
out.write(MIC_maps[0][0] + "\t" + mic + "\t" + "{")
for hsp in next[:-1]:
out.write(("-" if int(hsp[7]) > int(hsp[8]) else "") + str(hsp[-1]) + ",")
Arrangement.append(-hsp[-1] if int(hsp[7]) > int(hsp[8]) else hsp[-1])
out.write(("-" if int(next[-1][7]) > int(next[-1][8]) else "") + str(next[-1][-1]) + "}\t")
Arrangement.append(-next[-1][-1] if int(next[-1][7]) > int(next[-1][8]) else next[-1][-1])
# Check if this is a complete mapping
if cont_to_mds[mic] == len(MDS_List):
stat_Complete = True
out.write("Complete\t")
else:
out.write("Incomplete\t")
# check if it is a scrambled contig
if(is_Scrambled(next, len(MDS_List), cont_to_mds[mic] == len(MDS_List))):
stat_Scrambled = True
out.write("Scrambled\t")
# Update stats for complete and scrambled
if cont_to_mds[mic] == len(MDS_List):
stat_CompletScrambled = True
else:
out.write("Non-Scrambled\t")
# Get reduced arrangement and put it into canonical form
relabelMDS(Arrangement)
Arrangement_1 = []
removeRepeatingLetters(Arrangement, Arrangement_1)
mdsNumber = reduceArrangement(Arrangement_1, Arrangement)
Arrangement = toCanonicalForm(Arrangement, mdsNumber)
# Print to file
out.write("{" + arrangementToString(Arrangement) + "}\n")
# Select the best MIC to MAC maps taken from the sorting procedure
best_mic = MICs[0]
hsp_list = sorted(MIC_to_HSP[best_mic], key=lambda x: int(x[7]) if int(x[7]) < int(x[8]) else int(x[8]))
# Process this contig and its hsp
process_MIC_MAC_map(hsp_list, cont_to_mds[best_mic] == len(MDS_List), len(MDS_List), Output_dir)
# If this is a complete map, then check if there are any other good mic maps and process them
if cont_to_mds[best_mic] == len(MDS_List):
for mic in MICs[1:]:
if cont_to_mds[mic] != len(MDS_List):
break
next = sorted(MIC_to_HSP[mic], key=lambda x: int(x[7]) if int(x[7]) < int(x[8]) else int(x[8]))
process_MIC_MAC_map(next, cont_to_mds[mic] == len(MDS_List), len(MDS_List), Output_dir)
# Else, check for other MICs that have similar MDS number and MAC coverage
else:
rest_mic = [x for x in MICs if cont_to_mds[x] == cont_to_mds[best_mic] and float(MIC_to_HSP[x][0][11]) == float(MIC_to_HSP[best_mic][0][11])]
rest_mic.remove(best_mic)
for mic in rest_mic:
next = sorted(MIC_to_HSP[mic], key=lambda x: int(x[7]) if int(x[7]) < int(x[8]) else int(x[8]))
process_MIC_MAC_map(next, cont_to_mds[mic] == len(MDS_List), len(MDS_List), Output_dir)
# Update stats
if stat_Complete:
identify_MIC_patterns.scrambled += 1
if stat_Scrambled:
identify_MIC_patterns.complete += 1
if stat_CompletScrambled:
identify_MIC_patterns.complete_scrambled += 1
out.close()
identify_MIC_patterns.scrambled = 0
identify_MIC_patterns.complete = 0
identify_MIC_patterns.complete_scrambled = 0
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function checks whether a given arrangement given by hsp list is scrambled
# Note: is_complete can be set to False when it is not known if a map is complete
def is_Scrambled(MIC, mdsNum, is_complete):
# Get string of MIC pattern
s = ""
if is_complete:
# Build string directly
for hsp in MIC[:-1]:
s += ("-" if int(hsp[7]) > int(hsp[8]) else "") + str(hsp[-1]) + ","
s += ("-" if int(MIC[-1][7]) > int(MIC[-1][8]) else "") + str(MIC[-1][-1])
else:
# Get set of MDSs and turn it into sorted list
present_mdss = sorted(list({int(x[-1]) for x in MIC}))
# Get MDS index map
ind = 1
MDS_map = dict()
for mds in present_mdss:
MDS_map[mds] = ind
ind += 1
# Update mdsNum
mdsNum = ind - 1
# Build string
for hsp in MIC[:-1]:
s += ("-" if int(hsp[7]) > int(hsp[8]) else "") + str(MDS_map[hsp[-1]]) + ","
s += ("-" if int(MIC[-1][7]) > int(MIC[-1][8]) else "") + str(MDS_map[MIC[-1][-1]])
# Get regular expressions for non-scrambled patterns
r1 = ""
for i in range(1, mdsNum):
r1 += str(i) + ",(-?[0-9]*,)*"
r1 += str(mdsNum)
r2 = ""
for i in range(mdsNum, 1, -1):
r2 += "-" + str(i) + ",(-?[0-9]*,)*"
r2 += "-1"
#print("Reg exp 1: ", r1)
#print("Reg exp 2: ", r2)
# Check for non-scrambled pattern 1
r1_comp = re.compile(r1)
if r1_comp.search(s):
return False
# Check for non-scrambled pattern 2
r2_comp = re.compile(r2)
if r2_comp.search(s):
return False
# If program have not returned, then it is a scrambled pattern
return True
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function saves current hsp as best MIC to MAC map, brings MIC arrangement into the standard form and reduces it
def process_MIC_MAC_map(hsp_list, is_complete, mdsNum, Output_dir):
# First, get MIC arrangement
Arrangement_0 = []
if is_complete:
# Build arrangement directly
for hsp in hsp_list:
Arrangement_0.append(-hsp[-1] if int(hsp[7]) > int(hsp[8]) else hsp[-1])
else:
# Get set of MDSs and turn it into sorted list
present_mdss = sorted(list({int(x[-1]) for x in hsp_list}))
# Get MDS index map
ind = 1
MDS_map = dict()
for mds in present_mdss:
MDS_map[mds] = ind
ind += 1
# Update mdsNum
mdsNum = ind - 1
# Build arrangement
for hsp in hsp_list:
Arrangement_0.append(-MDS_map[hsp[-1]] if int(hsp[7]) > int(hsp[8]) else MDS_map[hsp[-1]])
# Remove consecutive repeating letters (ex: 1, 2, 3, 3, 4, 5, 5, 5 - > 1, 2, 3, 4, 5)
Arrangement = []
removeRepeatingLetters(Arrangement_0, Arrangement)
# Get arrangement in the canonical form
Arrangement = toCanonicalForm(Arrangement, mdsNum)
# Get reduced arrangement
Reduced = []
mdsNum = reduceArrangement(Arrangement, Reduced)
# Put reduced arrangement into the canonical form
Reduced = toCanonicalForm(Reduced, mdsNum)
# Output result
out = open(Output_dir + "/Scrambling/good_maps.tsv", "a")
out.write(hsp_list[0][0] + "\t" + hsp_list[0][1] + "\t" + "{" + arrangementToString(Arrangement) + "}\t{" + arrangementToString(Reduced) + "}\n")
out.close()
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function takes Arrangement, relabels all mdss to start from MDS 1, and return mds number
def relabelMDS(Arrangement):
# Get set of MDSs and turn it into sorted list
present_mdss = sorted(list({abs(x) for x in Arrangement}))
# Get MDS index map
ind = 1
MDS_map = dict()
for mds in present_mdss:
MDS_map[mds] = ind
ind += 1
mdsNumber = ind-1
# Relabel arrangement
for i in range(0, len(Arrangement)):
Arrangement[i] = (-MDS_map[abs(Arrangement[i])] if Arrangement[i] < 0 else MDS_map[abs(Arrangement[i])])
return mdsNumber
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function takes Arrangement_init, removes repeating letters and stores result in Arrangement_fin
def removeRepeatingLetters(Arrangement_init, Arrangement_fin):
Arrangement_fin[:] = [Arrangement_init[0]]
if len(Arrangement_init) > 1:
prev = Arrangement_init[0]
for m in Arrangement_init[1:]:
if m != prev:
Arrangement_fin.append(m)
prev = m
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function takes Arrangement, reduces it by merging consecutive mdss, relabels mdss, stores result into the Arrangment_red list, and returns
# number of mdss
def reduceArrangement(Arrangement, Arrangement_red):
reduced = [Arrangement[0]]
if len(Arrangement) > 1:
prev = Arrangement[0]
for m in Arrangement[1:]:
# If both positive and increasing, continue
if m > 0 and prev > 0 and m == prev + 1:
prev = m
continue
# If both negative and decreasing, continue
elif m < 0 and prev < 0 and m == prev + 1:
prev = m
continue
# Else, append it to the reduced list
reduced.append(m)
prev = m
# Get index map
ind = 1
Ind_map = dict()
for mds in sorted(list({abs(x) for x in reduced})):
Ind_map[mds] = ind
ind += 1
# Get mds number
mdsNum = ind-1
# Map mdss to for reduced arrangement
Arrangement_red[:] = []
for mds in reduced:
Arrangement_red.append(-Ind_map[abs(mds)] if mds < 0 else Ind_map[abs(mds)])
# Return number of mdss
return mdsNum
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function counts number of inverted MDSs in the arrangement
def getNumber_Inv_MDS(Arrangement):
count = 0
for m in Arrangement:
if m < 0:
count += 1
return count
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function puts arrangement to string
def arrangementToString(Arrangement):
s = ""
for m in Arrangement[:-1]:
s += str(m) + ", "
s += str(Arrangement[-1])
return s
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function returns the position of the first inversion in the arrangement
def firstInv(Arrangement):
for i in range(0, len(Arrangement)):
if Arrangement[i] < 0:
return i+1
# No inversions
return 0
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
# This function takes MIC arrangement return the canonical form of it
def toCanonicalForm(Arrangement, mdsNum):
# Put arrangement in the right order:
# 1) minimal number of inversions, 2) lowest lexicographical order, 3) delay first inversion
# Get the other 3 arrangements first
Arrangement_I = []
for i in range(len(Arrangement) - 1, -1, -1):
m = Arrangement[i]
Arrangement_I.append(-1 * m)
Arrangement_A = []
for m in Arrangement:
if m > 0:
Arrangement_A.append(-1 * (mdsNum + 1 - m))
else:
Arrangement_A.append(mdsNum + 1 - abs(m))
Arrangement_AI = []
for i in range(len(Arrangement_A) - 1, -1, -1):
m = Arrangement_A[i]
Arrangement_AI.append(-1 * m)
# Put all arrangements in the list and sort it by above criterias
Arrangement_List = [Arrangement, Arrangement_I, Arrangement_A, Arrangement_AI]
Arrangement_List.sort(key=lambda x: (getNumber_Inv_MDS(x), arrangementToString(x), -firstInv(x)))
return Arrangement_List[0]
#------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
the-stack_106_19736
|
'''
Common parameters:
- variant_genotypes <numpy float array of shape (n_variants, n_samples, n_alleles [3])>: For every variant and in the gene
(indexed by the first dimension) and every sample (indexed by the second dimension), what are the three probabilities of
it being either: i) homozygous allele-1, ii) heterozygous, iii) homozygous allele-2. The three probabilities must be
non-negative summing up to 1.
- effect_scores: <numpy float array of shape (n_variants,)>: The effect score (in the range 0 to 1) of every relevant
variant in the gene.
- allele1_refs: <numpy bool array of shape (n_variants,)>: Whether allele-1 is the reference variant (True) or allele-2
(False) for every relevant variant in the gene.
'''
import numpy as np
def calc_gene_dominant_effect_scores(variant_genotypes, effect_scores, allele1_refs, u, p, dtype = np.float32):
x = calc_xy(variant_genotypes, effect_scores, allele1_refs, u, also_y = False, dtype = dtype)
return calc_D(x, p)
def calc_gene_recessive_effect_scores(variant_genotypes, effect_scores, allele1_refs, u, p, q, dtype = np.float32):
x, y = calc_xy(variant_genotypes, effect_scores, allele1_refs, u, dtype = dtype)
return calc_R(x, y, p, q)
def calc_xy(variant_genotypes, effect_scores, allele1_refs, u, also_y = True, dtype = np.float32):
'''
The shape of x, y: (n_variants, n_samples)
'''
variant_genotypes = variant_genotypes.astype(dtype)
effect_scores = effect_scores.astype(dtype)
p = np.where(allele1_refs.reshape(-1, 1, 1), variant_genotypes, variant_genotypes[:, :, ::-1])
s = effect_scores.reshape(-1, 1)
p0 = p[:, :, 0]
p1 = p[:, :, 1]
p2 = p[:, :, 2]
x = p0 + p1 * s + p2 * (u * s + (1 - u) * np.square(s))
if also_y:
y = p1 * (1 - s) + p2 * ((1 - u) * 2 * s * (1 - s))
return x, y
else:
return x
def calc_D(x, p):
minus_log_x = -np.log(x)
lp_minus_log_x = np.linalg.norm(minus_log_x, p, axis = 0)
return np.exp(-lp_minus_log_x)
def calc_R(x, y, p, q):
x_zero_mask = (x == 0)
num_zero_x = x_zero_mask.sum(axis = 0)
no_zero_x_mask = (num_zero_x == 0)
one_zero_x_mask = (num_zero_x == 1)
R = np.zeros_like(num_zero_x, dtype = x.dtype)
if one_zero_x_mask.any():
R[one_zero_x_mask] = calc_R_with_one_zero_x(x[:, one_zero_x_mask], y[:, one_zero_x_mask], \
x_zero_mask[:, one_zero_x_mask], p)
if no_zero_x_mask.any():
R[no_zero_x_mask] = calc_R_with_no_zero_x(x[:, no_zero_x_mask], y[:, no_zero_x_mask], p, q)
return R
def calc_R_with_one_zero_x(x, y, x_zero_mask, p):
if p == 1:
return y[np.where(x_zero_mask)]
else:
return np.where(x_zero_mask, y, x).prod(axis = 0)
def calc_R_with_no_zero_x(x, y, p, q):
zeta = np.linalg.norm(y / x, q, axis = 0)
D = calc_D(x, p)
return (zeta + 1) * D
|
the-stack_106_19738
|
""" Util functions for csgo package
"""
import json
import numpy as np
import re
import subprocess
class AutoVivification(dict):
"""Implementation of perl's autovivification feature. Stolen from https://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python"""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
def check_go_version():
"""Function to check the Golang version of the current machine, returns True if greater than 1.14.0"""
try:
proc = subprocess.Popen(["go", "version"], stdout=subprocess.PIPE)
parsed_resp = proc.stdout.read().splitlines()
if len(parsed_resp) != 1:
raise ValueError("Error finding Go version")
else:
go_version_text = parsed_resp[0].decode("utf-8")
go_version = re.findall(r"\d\.\d+", go_version_text)
if int(go_version[0].replace(".", "")) >= 114:
return True
else:
return False
except Exception as e:
print(e)
return False
def is_in_range(value, min, max):
if value >= min and value <= max:
return True
else:
return False
def transform_csv_to_json(sampleCsv):
"""From Adi."""
finalDic = {}
for curMap in sampleCsv["mapName"].unique():
mapDic = {}
for i in sampleCsv[sampleCsv["mapName"] == curMap].index:
curTile = sampleCsv.iloc[i]
curDic = {}
for curFeature in sampleCsv.columns:
if curFeature not in ["mapName", "areaId"]:
curDic[curFeature] = curTile[curFeature]
mapDic[curTile["areaId"]] = curDic
finalDic[curMap] = mapDic
return finalDic
|
the-stack_106_19739
|
import math, random
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
from common.replay_buffer import ReplayBuffer
import matplotlib.pyplot as plt
env_id = "LunarLander-v2"
env = gym.make(env_id)
class NoisyLinear(nn.Module):
def __init__(self, in_features, out_features, std_init=0.4):
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.FloatTensor(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features))
self.register_buffer('weight_epsilon', torch.FloatTensor(out_features, in_features))
self.bias_mu = nn.Parameter(torch.FloatTensor(out_features))
self.bias_sigma = nn.Parameter(torch.FloatTensor(out_features))
self.register_buffer('bias_epsilon', torch.FloatTensor(out_features))
self.reset_parameters()
self.reset_noise()
def forward(self, x):
if self.training:
weight = self.weight_mu + self.weight_sigma.mul(autograd.Variable(self.weight_epsilon))
bias = self.bias_mu + self.bias_sigma.mul(autograd.Variable(self.bias_epsilon))
else:
weight = self.weight_mu
bias = self.bias_mu
return F.linear(x, weight, bias)
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.weight_mu.size(1))
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.weight_sigma.size(1)))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.bias_sigma.size(0)))
def reset_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(self._scale_noise(self.out_features))
def _scale_noise(self, size):
x = torch.randn(size)
x = x.sign().mul(x.abs().sqrt())
return x
class CategoricalDQN(nn.Module):
def __init__(self, num_inputs, num_actions, num_atoms, Vmin, Vmax):
super(CategoricalDQN, self).__init__()
self.num_inputs = num_inputs
self.num_actions = num_actions
self.num_atoms = num_atoms
self.Vmin = Vmin
self.Vmax = Vmax
self.linear1 = nn.Linear(num_inputs, 128)
self.linear2 = nn.Linear(128, 128)
self.noisy1 = NoisyLinear(128, 512)
self.noisy2 = NoisyLinear(512, self.num_actions * self.num_atoms)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.noisy1(x))
x = self.noisy2(x)
x = F.softmax(x.view(-1, self.num_atoms)).view(-1, self.num_actions, self.num_atoms)
return x
def reset_noise(self):
self.noisy1.reset_noise()
self.noisy2.reset_noise()
def act(self, state):
state = autograd.Variable(torch.FloatTensor(state).unsqueeze(0), volatile=True)
dist = self.forward(state).data.cpu()
dist = dist * torch.linspace(Vmin, Vmax, num_atoms)
action = dist.sum(2).max(1)[1].numpy()[0]
return action
def projection_distribution(next_state, rewards, dones):
batch_size = next_state.size(0)
delta_z = float(Vmax - Vmin) / (num_atoms - 1)
support = torch.linspace(Vmin, Vmax, num_atoms)
next_dist = target_model(next_state).data.cpu() * support
next_action = next_dist.sum(2).max(1)[1]
next_action = next_action.unsqueeze(1).unsqueeze(1).expand(next_dist.size(0), 1, next_dist.size(2))
next_dist = next_dist.gather(1, next_action).squeeze(1)
rewards = rewards.unsqueeze(1).expand_as(next_dist)
dones = dones.unsqueeze(1).expand_as(next_dist)
support = support.unsqueeze(0).expand_as(next_dist)
Tz = rewards + (1 - dones) * 0.99 * support
Tz = Tz.clamp(min=Vmin, max=Vmax)
b = (Tz - Vmin) / delta_z
l = b.floor().long()
u = b.ceil().long()
offset = torch.linspace(0, (batch_size - 1) * num_atoms, batch_size).long()\
.unsqueeze(1).expand(batch_size, num_atoms)
proj_dist = torch.zeros(next_dist.size())
proj_dist.view(-1).index_add_(0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1))
proj_dist.view(-1).index_add_(0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1))
return proj_dist
num_atoms = 51
Vmin = -10
Vmax = 10
current_model = CategoricalDQN(env.observation_space.shape[0], env.action_space.n, num_atoms, Vmin, Vmax)
target_model = CategoricalDQN(env.observation_space.shape[0], env.action_space.n, num_atoms, Vmin, Vmax)
optimizer = optim.Adam(current_model.parameters())
replay_buffer = ReplayBuffer(10000)
def update_target(current_model, target_model):
target_model.load_state_dict(current_model.state_dict())
update_target(current_model, target_model)
def compute_td_loss(batch_size):
state, action, reward, next_state, done = replay_buffer.sample(batch_size)
state = autograd.Variable(torch.FloatTensor(np.float32(state)))
next_state = autograd.Variable(torch.FloatTensor(np.float32(next_state)), volatile=True)
action = autograd.Variable(torch.LongTensor(action))
reward = torch.FloatTensor(reward)
done = torch.FloatTensor(np.float32(done))
proj_dist = projection_distribution(next_state, reward, done)
dist = current_model(state)
action = action.unsqueeze(1).unsqueeze(1).expand(batch_size, 1, num_atoms)
dist = dist.gather(1, action).squeeze(1)
dist.data.clamp_(0.01, 0.99)
loss = - (autograd.Variable(proj_dist) * dist.log()).sum(1).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
current_model.reset_noise()
target_model.reset_noise()
return loss
def plot(frame_idx, rewards, losses):
plt.figure(figsize=(20,5))
plt.subplot(131)
plt.title('frame %s. reward: %s' % (frame_idx, np.mean(rewards[-10:])))
plt.plot(rewards)
plt.subplot(132)
plt.title('loss')
plt.plot(losses)
plt.show()
num_frames = 100000
batch_size = 32
gamma = 0.99
losses = []
all_rewards = []
episode_reward = 0
state = env.reset()
for frame_idx in range(1, num_frames + 1):
action = current_model.act(state)
next_state, reward, done, _ = env.step(action)
replay_buffer.push(state, action, reward, next_state, done)
state = next_state
episode_reward += reward
if done:
state = env.reset()
all_rewards.append(episode_reward)
episode_reward = 0
if len(replay_buffer) > batch_size:
loss = compute_td_loss(batch_size)
losses.append(loss.item())
if frame_idx % 2000 == 0:
plot(frame_idx, all_rewards, losses)
if frame_idx % 100 == 0:
update_target(current_model, target_model)
|
the-stack_106_19740
|
import click
import cv2
import numpy as np
import shutil
from tqdm import tqdm
from pathlib import Path
@click.command()
@click.option("--input-dir", "-i", default="./annotations")
@click.option("--output-dir", "-o", default="./masks")
@click.option("--background-label", "-b", default=0)
@click.option("--removal-target-label", "-r", default=2)
def main(input_dir, output_dir, background_label, removal_target_label):
input_dir_pathlib = Path(input_dir)
output_dir_pathlib = Path(output_dir)
if output_dir_pathlib.exists():
shutil.rmtree(output_dir)
output_dir_pathlib.mkdir()
seg_path_list = [path for path in input_dir_pathlib.glob("*") if path.suffix in [".jpg", ".png"]]
for seg_path in tqdm(seg_path_list):
seg_mask = cv2.imread(str(seg_path), cv2.IMREAD_ANYDEPTH)
mask_name = seg_path.name
binary_mask = np.ones_like(seg_mask) * 255
binary_mask[seg_mask == removal_target_label] = 0
binary_mask = binary_mask.astype(np.uint8)
output_mask_pathstr = str(output_dir_pathlib.joinpath(mask_name))
cv2.imwrite(output_mask_pathstr, binary_mask)
cv2.waitKey(10)
if __name__ == "__main__":
main()
|
the-stack_106_19741
|
import argparse
import os
import torch
import torch.nn as nn
import torch.utils.data as data
class Configs(object):
@staticmethod
def base_config():
parser = argparse.ArgumentParser()
parser.add_argument("--classifier", type=str, default="vdpwi", choices=["vdpwi", "resnet"])
parser.add_argument("--clip_norm", type=float, default=50)
parser.add_argument("--cpu", action="store_true", default=False)
parser.add_argument("--dataset", type=str, default="sick", choices=["sick"])
parser.add_argument("--decay", type=float, default=0.95)
parser.add_argument("--input_file", type=str, default="local_saves/model.pt")
parser.add_argument("--lr", type=float, default=5E-4)
parser.add_argument("--mbatch_size", type=int, default=16)
parser.add_argument("--mode", type=str, default="train", choices=["train", "test"])
parser.add_argument("--momentum", type=float, default=0.1)
parser.add_argument("--n_epochs", type=int, default=35)
parser.add_argument("--n_labels", type=int, default=5)
parser.add_argument("--optimizer", type=str, default="rmsprop", choices=["adam", "sgd", "rmsprop"])
parser.add_argument("--output_file", type=str, default="local_saves/model.pt")
parser.add_argument("--res_fmaps", type=int, default=32)
parser.add_argument("--res_layers", type=int, default=16)
parser.add_argument("--restore", action="store_true", default=False)
parser.add_argument("--rnn_hidden_dim", type=int, default=250)
parser.add_argument("--weight_decay", type=float, default=1E-5)
parser.add_argument("--wordvecs_file", type=str, default="local_data/glove/glove.840B.300d.txt")
return parser.parse_known_args()[0]
@staticmethod
def sick_config():
parser = argparse.ArgumentParser()
parser.add_argument("--n_labels", type=int, default=5)
parser.add_argument("--sick_cache", type=str, default="local_data/sick/.vec-cache")
parser.add_argument("--sick_data", type=str, default="local_data/sick")
return parser.parse_known_args()[0]
class LabeledEmbeddedDataset(data.Dataset):
def __init__(self, sentence_indices1, sentence_indices2, labels, compare_labels=None):
assert len(sentence_indices1) == len(labels) == len(sentence_indices2)
self.sentence_indices1 = sentence_indices1
self.sentence_indices2 = sentence_indices2
self.labels = labels
self.compare_labels = compare_labels
def __getitem__(self, idx):
cmp_lbl = None if self.compare_labels is None else self.compare_labels[idx]
return self.sentence_indices1[idx], self.sentence_indices2[idx], self.labels[idx], cmp_lbl
def __len__(self):
return len(self.labels)
def load_sick():
config = Configs.sick_config()
def fetch_indices(name):
sentence_indices = []
filename = os.path.join(config.sick_data, dataset, name)
with open(filename) as f:
for line in f:
indices = [embed_ids.get(word, -1) for word in line.strip().split()]
indices = list(filter(lambda x: x >= 0, indices))
sentence_indices.append(indices)
return sentence_indices
def read_labels(filename):
labels = []
with open(filename) as f:
for line in f:
labels.append([float(val) for val in line.split()])
return labels
sets = []
embeddings = []
embed_ids = {}
with open(os.path.join(config.sick_cache)) as f:
for i, line in enumerate(f):
word, vec = line.split(" ", 1)
vec = list(map(float, vec.strip().split()))
embed_ids[word] = i
embeddings.append(vec)
padding_idx = len(embeddings)
embeddings.append([0.0] * 300)
for dataset in ("train", "dev", "test"):
sparse_filename = os.path.join(config.sick_data, dataset, "sim_sparse.txt")
truth_filename = os.path.join(config.sick_data, dataset, "sim.txt")
sparse_labels = read_labels(sparse_filename)
cmp_labels = read_labels(truth_filename)
indices1 = fetch_indices("a.toks")
indices2 = fetch_indices("b.toks")
sets.append(LabeledEmbeddedDataset(indices1, indices2, sparse_labels, cmp_labels))
embedding = nn.Embedding(len(embeddings), 300)
embedding.weight.data.copy_(torch.Tensor(embeddings))
embedding.weight.requires_grad = False
return embedding, sets
def load_dataset(dataset):
return _loaders[dataset]()
_loaders = dict(sick=load_sick)
|
the-stack_106_19745
|
"""fyle_qbo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from .views import VendorView, AccountView, ClassView, DepartmentView, BillView, BillScheduleView, CustomerView
urlpatterns = [
path('vendors/', VendorView.as_view({'get': 'get_vendors'})),
path('accounts/', AccountView.as_view({'get': 'get_accounts'})),
path('classes/', ClassView.as_view({'get': 'get_classes'})),
path('departments/', DepartmentView.as_view({'get': 'get_departments'})),
path('customers/', CustomerView.as_view({'get': 'get_customers'})),
path('bills/', BillView.as_view()),
path('bills/trigger/', BillScheduleView.as_view())
]
|
the-stack_106_19746
|
import easyocr
import sys
sys.path.append("../../")
sys.path.append(".")
import argparse
from common.utility import *
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
from moviepy.editor import VideoFileClip
def camera_time_pos(camNO):
return load_time_pos_setting(config_folder, camNO)
def getImgTime(frame, postion, verbose=True):
t_tl_y, t_br_y, t_tl_x, t_br_x = postion
frame_area = frame[t_tl_y:t_br_y, t_tl_x:t_br_x] # 裁剪时间
if verbose:
# print(result)
cv2.imshow('frame', frame)
cv2.imshow('frame area', frame_area)
cv2.waitKey(100)
result = reader.readtext(frame_area.copy())
if DEBUG:
print(f"recognize ocr result is {result}")
if len(result) == 0:
time_str = str(int(time.time()))
if DEBUG:
print(f"current frame can not be recognized by OCR")
return time_str
else:
time_str = ""
for ires in result:
time_str += ires[1] + ' '
print(f"str format time_str is {time_str}")
try:
time_str = time_str.replace("Z", '2').replace("z", '2').\
replace("O", '0').replace("o", '0').replace("a",'0').\
replace("k", '4').replace("Q", '0').replace("S", '5').\
replace("12021", "2021").replace("B", "8")
digital_time_str = re.findall('\d+', time_str)
digital_str = "".join(digital_time_str)
assert len(digital_str) == 14, 'orc result digital error!'
time_str = "_".join(digital_time_str)
assert len(time_str) == 19, 'orc result length is smaller than true label!'
except:
if DEBUG:
print("extract date frome OCR failed")
year = time_str[0:4]
month = time_str[5:7]
day = time_str[8:10]
hh = time_str[11:13]
mm = time_str[14:16]
ss = time_str[17:19]
time_str = f"{year}_{month}_{day}_{hh}_{mm}_{ss}"
return time_str.strip()
def isStartFrame(frame_time_str, start_time, current_unix_time):
'''
:param frame_time_str: 某一帧的识别时间
:param start_time: 设计的开始时间
:param current_unix_time: 目前的Unix时间,可能会出现该帧识别时间跳回很久之前的情况,
load video 的时候设置为-1
:return:
'''
try:
frame_time_date = time.strptime(frame_time_str, "%Y_%m_%d_%H_%M_%S")
frame_unix_time = time.mktime(frame_time_date)
start_time_date = time.strptime(start_time, "%Y_%m_%d_%H_%M_%S")
start_unix_time = time.mktime(start_time_date)
if frame_unix_time > current_unix_time:
if current_unix_time == -1:
current_unix_time = frame_unix_time
print(f"current_unix_time: {current_unix_time}")
else:
# 需要判断识别日期是否在合理范围内,合理才更新current unix time
print(f"frame_unix_time: {frame_unix_time}")
print(f"current_unix_time: {current_unix_time}")
assert frame_unix_time - start_unix_time < preloading_time, f"{frame_unix_time} is too larger than {current_unix_time}"
current_unix_time = frame_unix_time
if current_unix_time < start_unix_time:
if DEBUG:
print(f"current time is slower than start time with {start_unix_time - current_unix_time} s")
return False, 0, start_unix_time - current_unix_time, current_unix_time
elif current_unix_time == start_unix_time:
if DEBUG:
print("current frame == start time")
return True, 0, 0, current_unix_time
else:
return False, max_try, -1, current_unix_time
except:
if DEBUG:
print(f"OCR Result can not be formated by function time.strptime")
return False, 1, -1, current_unix_time
def getImgExp(frame, verbose=False):
'''exp_info 存储的值:
{
1_1: {
581, 93, 1020, 524
},
2_CK: {
1050, 79, 1514, 535
}
}
:param frame:
:param exp_info:
:return:
'''
# 获取当前实验区域最长的边:宽或者高,后续补全padding 到640*640需使用
# iarea = frame[0:h, int(3 * w / 16):int(3 * w / 4)]
if frame.shape[0] == 1080:
iarea = cv2.resize(frame, (720, 1280))
else:
iarea = frame
if verbose:
cv2.imshow('frame area', iarea)
cv2.waitKey(0)
return iarea
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--exp_floder", default="E:\\data\\3D_pre\\D2_T1", type=str, help="video shear dirpath")
parser.add_argument("--save_path", default="E:\\data\\3D_pre\\D2_T1\\cut_video", type=str, help="video save path")
parser.add_argument("--video_name",
# default="/home/data/HJZ/zef/0918-0919fish",
default="ch13_20211012022645.mp4",
type=str, help="video shear dirpath"
)
parser.add_argument("--start_time",
# default="/home/data/HJZ/zef/0918-0919fish",
default="2021_10_12_02_36_00",
# default="2020_08_15_07_27_00",
type=str, help="vstart_timeideo start to be cut time"
)
parser.add_argument("-spt", "--split_time",
default="2",
type=int, help="split time (s)"
)
parser.add_argument("-gpuno", "--gpuno",
default="0",
type=int, help="gpu no"
)
DEBUG = True
args = parser.parse_args()
exp_floder = args.exp_floder # 遍历路径
config_folder = args.exp_floder # 遍历路径
obj_path = args.save_path # 目标路径
if not os.path.exists(obj_path):
os.mkdir(obj_path)
video_name = args.video_name # 视频名称
time_str = video_name.split("_")[-1]
year = time_str[0:4]
month = time_str[4:6]
day = time_str[6:8]
hh = time_str[8:10]
mm = time_str[10:12]
ss = time_str[12:14]
video_start_time_str = f"{year}_{month}_{day}_{hh}_{mm}_{ss}"
start_time = args.start_time # 开始时间
spt = args.split_time # 切割间隔
reader = easyocr.Reader(['en'], gpu=True) # this needs to run only once to load the model into memory
processed_list = [
files
for files in os.listdir(obj_path)
]
DayTank_setting_path = os.path.join(exp_floder, 'settings.ini')
failed_time_list = load_Video_start_time(exp_floder, video_name, time_part="VideoStartTime_Failed")
# camera_id_list = ['D01', 'D02', 'D04']
camera_id_list = load_Cam_list(config_folder)
camNO, VideoTime = video_name.split('_')
# 打开文件,准备数据
# cap = cv2.VideoCapture(os.path.join(exp_floder, video_name))
# using moviepy
cap = FFMPEG_VideoReader(os.path.join(exp_floder, video_name), True)
cap.initialize()
# using moviepy
# 如果当前帧时间距离设定开始时间过远,time_gap为秒,则需要跳转到 距离开始帧3秒内
_, _, time_gap, current_unix_time = isStartFrame(
video_start_time_str, start_time, -1
)
# fps = int(cap.get(cv2.CAP_PROP_FPS))
fps = 25
preloading_time = 30
if time_gap > 11:
frame_gap = (time_gap - preloading_time) * fps
# cap.set(cv2.CAP_PROP_POS_FRAMES, int(frame_gap))
# cap.set(cv2.CAP_PROP_POS_MSEC, int(time_gap)*1000.0)
print(f"jumping {time_gap}")
# success, image = cap.read()
# print(f"jumping {success}")
# using moviepy
image = cap.get_frame(time_gap - preloading_time)
# using moviepy
if DEBUG:
cv2.imshow('jump frame area', image)
cv2.waitKey(100)
fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
# fourcc = cv2.VideoWriter_fourcc('X','2','6','4')
# total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cut_name = None
# starting_flag 用来判断第一帧图像的时间是否小于预计要求的开始时间 start_time
starting_flag = True
init_videoWriter = False
starting_flag_try_times = 0
max_try = preloading_time * fps
time_flag = 0
while True:
# success, frame = cap.read()
# print(f"current frame no: {cap.get(cv2.CAP_PROP_POS_FRAMES)}")
# frame = img_as_float(frame)
# frame = img_as_ubyte(frame)
# ====================== using moviepy ==========================#
# cap.skip_frames(10)
frame = cap.read_frame()
# ====================== using moviepy ==========================#
time_pos = camera_time_pos(camNO)
frame_time_str = getImgTime(frame, time_pos)
# cv2.imshow('frame area', frame)
# cv2.waitKey(100)
# if time_flag == 0:
# cap.set(cv2.CAP_PROP_POS_FRAMES, 45000)
# time_flag += 1
# continue
# ================= 检查是否是初始帧 ===================== #
if starting_flag:
# isStartFrame
# OCR识别失败,try_times=1,
# 当前帧小于开始帧,try_times=-1,
# 当前帧大于开始帧,try_times=max_try,
isStartFrame_flag, try_times, time_gap, current_unix_time = isStartFrame(frame_time_str, start_time, current_unix_time)
if DEBUG:
print(f"time gap is {time_gap}")
# 如果是开始帧,则之后的starting flag都置为False,不在检查帧时间
if isStartFrame_flag:
starting_flag = False
# 第一个视频
# ================= 正式开始截取视频 ===================== #
old_time_str = start_time
cut_name = start_time
exp_frame = getImgExp(frame)
# exp_frame: 2_CK: 截取的frame 画面
if DEBUG:
print(f"first time to cutting new video {frame_time_str}-{camNO}")
save_video_name = os.path.join(
obj_path, f"{frame_time_str}_{camNO}.avi"
)
videoWriter = cv2.VideoWriter(
save_video_name, fourcc, fps, (exp_frame.shape[1], exp_frame.shape[0]), True
)
init_videoWriter = True
# 如果不是开始帧,则查看是否小于最大尝试次数,否则退出
else:
if starting_flag_try_times < max_try:
starting_flag_try_times += try_times
if DEBUG:
print(
f"{frame_time_str} not starting frame {start_time} with {starting_flag_try_times}/{max_try}")
continue
else:
time_local = time.localtime(current_unix_time)
dt = time.strftime("%Y_%m_%d_%H_%M_%S", time_local)
failed_time_list.append(dt)
writeConfig(DayTank_setting_path, [('VideoStartTime_Failed', video_name, '\n'.join(failed_time_list))])
print(f"{start_time} finding starting frame in {video_name} failed")
exit(987)
# ================ 检查该帧是否已经处理过 ================ #
if f"{frame_time_str}_{camNO}.avi" in processed_list:
print(f"{frame_time_str}_{camNO} has been processed")
continue
if init_videoWriter:
if time_flag < spt * fps:
if DEBUG:
print(f"progress: {time_flag}/{spt * fps}")
time_flag += 1
exp_frame = getImgExp(frame)
videoWriter.write(exp_frame)
else:
print(f"{frame_time_str}_{camNO} have finished, saved in {save_video_name}")
break
exit(987654)
|
the-stack_106_19747
|
import datetime
from django.db import models
from base.abstracts import AbstractBaseModel
class GeoCoderLog(AbstractBaseModel):
number_of_request = models.IntegerField(default=0)
@classmethod
def get_today_record(cls):
record = cls.objects.filter(
created_at__range=(
datetime.datetime.combine(
datetime.date.today(),
datetime.time.min
),
datetime.datetime.combine(
datetime.date.today(),
datetime.time.max
)
)
)
if record:
return record[0]
record = cls.objects.create()
return record
@classmethod
def update_today_number_of_request(cls):
record = cls.get_today_record()
record.number_of_request += 1
record.save()
return record.number_of_request
@classmethod
def get_limit(cls):
record = cls.get_today_record()
return 2500 - record.number_of_request
|
the-stack_106_19749
|
#!/usr/bin/env python2.7
import sys
from numpy import *
from pylab import *
from matplotlib import rc, rcParams
dict=sys.argv[1].split("/")[2]
trie = genfromtxt('../data/trie_search_found_' + dict + '.output')
tst = genfromtxt('../data/tst_search_found_' + dict + '.output')
radix = genfromtxt('../data/radix_search_found_' + dict + '.output')
_map = genfromtxt('../data/map_search_found_' + dict + '.output')
umap = genfromtxt('../data/umap_search_found_' + dict + '.output')
data = [trie, tst, radix, _map, umap]
fig, ax = subplots()
index = arange(5)
width = 0.5
ax.bar(index,data, width, align='center')
xlabel('Data structures')
ylabel('Time(ms)')
title('Search found dictionary(' + dict + ')')
xticks(index, ('Trie', 'TST', 'Radix', 'Map', 'Umap'))
legend(loc='best')
grid(True)
savefig('../images/search_found/dict/search_found_' + dict + '_time_ALL.eps')
data = [trie, tst, radix]
fig, ax = subplots()
index = arange(3)
width = 0.5
ax.bar(index,data, width, align='center')
xlabel('Data structures')
ylabel('Time(ms)')
title('Search found dictionary(' + dict + ')')
xticks(index, ('Trie', 'TST', 'Radix'))
legend(loc='best')
grid(True)
savefig('../images/search_found/dict/search_found_' + dict + '_time_TTR.eps')
|
the-stack_106_19754
|
#!/usr/bin/env python
import re
from django.core.management.base import BaseCommand
from documents.models import Agency, Document, ProcessedDocument
class Command(BaseCommand):
help = """
Remove CSVs that got added as responsive documents in error.
"""
SKIP_AGENCIES = [
"Redmond Police Department",
"Black Diamond Police Department",
"West Richland Police Department",
]
def yes_no(self, message, default=False):
response = input(f"{message.strip()} ").strip().lower()
if not response:
return default
return response.startswith("y")
def handle(self, *args, **options):
matches = []
for doc in Document.objects.filter(file__endswith=".csv"):
# if we marked the agency complete, don't mess with it
if doc.agency.completed:
continue
# skip redmond, we actually got CSVs
if doc.agency.name in self.SKIP_AGENCIES:
continue
# false positive (delete it!)
if ".precleaned" in doc.file.name:
continue
print("\n====================")
print("Agency:", doc.agency.name)
print("Responsive document:", doc.file.name)
nocsv = re.sub(".csv", "", doc.file.name)
basepath = "/".join(nocsv.split("/")[:-1])
filename = nocsv.split("/")[-1]
# print("\t", "Raw filename:", filename)
nosheet = "-".join(filename.split("-")[:-1]) or filename
# print("\t", "Without sheet extension:", nosheet)
start_pattern = f"{basepath}/{nosheet}"
print("\t", f"Search pattern: `{start_pattern}`")
possible_parents = Document.objects.filter(
file__startswith=start_pattern
).exclude(pk=doc.pk)
n_poss = possible_parents.count()
print("----------")
print(f"{n_poss} possible parent{'s' if n_poss != 1 else ''}:")
for pp in possible_parents:
print(" - ", pp.file.name)
if n_poss == 1:
print("It's recommended we delete this!")
if self.yes_no("Delete doc? [Y/n]", default=True):
doc.delete()
print("Deleting.")
|
the-stack_106_19756
|
import os
import json
import numpy as np
from naming_conventions import languages, languages_readable
from uriel import Similarities
import uriel
import copy
import pickle
from collections import defaultdict
import matplotlib.pyplot as plt
from scipy import stats
from matplotlib import colors
import seaborn as sns
"""Hardcode All This"""
train_lans = [0,5,6,11,13,16,17,19]
low_lans = [1,2,4,7,15,23]
tran_expen = np.array([34.55,40.20,45.16,0,19.19,59.97,84.64,58.28,50.65, #finnish
54.12, 68.30, 32.94, 52.72, 75.45, 12.92, 33.56, 33.67, 72.09, #norwe
44.34, 59.53, 76.02, 18.09, 54.47, 36.66, 23.46, 29.72
]) / 100
tran_expmix = np.array([68.20,58.95,52.62,0,23.11,84.36,81.65,61.98,62.29,
59.54, 70.93, 85.66, 61.11, 89.44, 24.10, 44.56, 81.73, 85.11,
56.92, 81.91, 78.70, 32.78, 64.03, 49.74, 63.06, 29.71
])/ 100
class ModelAnalysis():
def __init__(self, model_str, seed_list=[1,2,5,6,7]):
spl = model_str.split('_')
name = spl[0]
params = spl[1]
z = ""
if 'finetune' not in name:
params += '_' + spl[2]
z = "_3"
self.model_str = name + "24_seedX_" + params + "_True" + z + "VAL_averaging"
self.model_str2 = name + "24_seedX_" + params + "_True" + z + "VAL"
self.seed_list = seed_list
self.files_highlr = {}
self.files_lowlr = {}
self.files_zeroshot = defaultdict(lambda:[])
self.las_highlr = {}
self.las_lowlr = {}
self.las_zeroshot = {}
self.whole_dict = {}
for l in languages:
self._set_files_for_language(l)
for l in languages:
self._set_las_scores(l)
print(name, [len(self.las_highlr[z]) for z in self.las_highlr], [len(self.las_lowlr[z]) for z in self.las_lowlr] )
def _set_files_for_language(self, lan):
files_highlr = []
files_lowlr = []
for outerseed in self.seed_list:
testingstr = "metatesting_0.001_True3_" + self.model_str.replace('X', str(outerseed))
testingstrlow = "metatesting_0.0001_True3_" + self.model_str.replace('X', str(outerseed))
zeroshotstr = "finalresults/zeroshot_" + self.model_str2.replace('X', str(outerseed)) + "/" + lan + "_performance.json"
if os.path.exists(zeroshotstr):
self.files_zeroshot[lan].append(zeroshotstr)
#else:
#print("File not found", zeroshotstr)
#raise ValueError()
for innerseed in range(0,5):
f = "finalresults/" + testingstr + "/" + lan + "_performance" + str(innerseed) +".json"
f2 = "finalresults/" + testingstrlow + "/" + lan + "_performance" + str(innerseed) +".json"
if os.path.exists(f):
files_highlr.append(f)
if os.path.exists(f2):
files_lowlr.append(f2)
#else:
# print("File not found", f2)
self.files_highlr[lan] = files_highlr
self.files_lowlr[lan] = files_lowlr
def _set_las_scores(self, lan):
scores = []
for f in self.files_highlr[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append(result['LAS']['aligned_accuracy'])
#print(scores)
self.las_highlr[lan] = np.array(scores)
scores = []
for f in self.files_lowlr[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append(result['LAS']['aligned_accuracy'])
self.las_lowlr[lan] = np.array(scores)
scores = []
for f in self.files_zeroshot[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append([result['LAS']['aligned_accuracy']]*5)
self.las_zeroshot[lan] = np.array(scores)
def get_mean_sd_high(self, lan, r=99):
b = self.las_highlr[lan]
return round(np.mean(b), r), round(np.std(b),r)
def get_mean_sd_low(self, lan, r=99):
b = self.las_lowlr[lan]
return round(np.mean(b), r), round(np.std(b), r)
def get_mean_sd_zero(self, lan, r=99):
b = self.las_zeroshot[lan]
return round(np.mean(b), r), round(np.std(b), r)
class FileAnalysis(ModelAnalysis):
def __init__(self, filenames, name, zero=False):
self.name = name
self.zero = zero
self.las_lowlr = {}
if zero:
self.zero_init(filenames)
else:
self.files_highlr = defaultdict(lambda:[])
self.files_lowlr = defaultdict(lambda:[])
self.las_highlr = {}
for filename in filenames:
for lan in languages:
for innerseed in range(0,5):
f = "finalresults/metatesting_0.001_True3_" + filename + "/" + lan + "_performance" + str(innerseed) +".json"
f2 = "finalresults/metatesting_0.0001_True3_" + filename + "/" + lan + "_performance" + str(innerseed) +".json"
if os.path.exists(f):
self.files_highlr[lan].append(f)
if os.path.exists(f2):
self.files_lowlr[lan].append(f2)
#if innerseed == 0:
# print("Using file", f2)
for lan in languages:
self._set_las_scores(lan)
def zero_init(self, filenames):
self.files_lowlr = defaultdict(lambda:[])
for filename in filenames:
for lan in languages:
f2 = "finalresults/zeroshot_" + filename + "/" + lan + "_performance.json"
if os.path.exists(f2):
r = 1
if len(filenames)==1:
r = 3
for i in range(3):
self.files_lowlr[lan].append(f2)
def _set_las_scores(self, lan):
if self.zero:
scores = []
for f in self.files_lowlr[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append(result['LAS']['aligned_accuracy'])
self.las_lowlr[lan] = np.array(scores)
else:
scores = []
for f in self.files_highlr[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append(result['LAS']['aligned_accuracy'])
#print(scores)
#self.las_highlr[lan] = np.array(scores)
scores = []
for f in self.files_lowlr[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append(result['LAS']['aligned_accuracy'])
self.las_lowlr[lan] = np.array(scores)
def print_all(self):
for lan in languages:
print('---')
print(lan,'\t',self.get_mean_sd_high(lan, 3), self.get_mean_sd_low(lan, 3))
class MetaListAnalysis():
def __init__(self, filelist, nameslist):
self.filelist = filelist
self.names = nameslist
self.accuracy_significance = {}
self.correlation_significance = {}
self.correlations = {}
self.lookup = {name:i for i,name in enumerate(nameslist)}
for f in filelist:
for i,lan in enumerate(languages):
self.accuracy_significance[lan] = {}
for name1,model1 in zip(nameslist,filelist+[0,0]):
self.accuracy_significance[lan][name1]= {}
for name2,model2 in zip(nameslist,filelist+[0,0]):
if name2 != name1:
if 'tran-en' in name1:
array1= [tran_expen[i]]*5
elif 'tran-mix' in name1:
array1 = [tran_expmix[i]]*5
else:
array1 = model1.las_lowlr[lan]
if 'tran-en' in name2:
array2= [tran_expen[i]]*5
elif 'tran-mix' in name2:
array2 = [tran_expmix[i]]*5
else:
array2 = model2.las_lowlr[lan]
p_value = stats.ttest_ind(array1, array2 , equal_var=False).pvalue
#print("setting", name1,name2,lan)
self.accuracy_significance[lan][name1][name2] = p_value
def print_latex(self, filename, train=False, print_sd =False):
with open(filename,'w') as f:
f.write(' &' + ' & '.join(self.names) + '\\\\\\hline\n')
for i, lan in enumerate(languages):
readable_lan = languages_readable[i]
lijstje = np.array([m.get_mean_sd_low(lan,7)[0] for m in self.filelist[:-2]] + [tran_expen[i], tran_expmix[i]])
sds = np.array([m.get_mean_sd_low(lan,7)[1] for m in self.filelist[:-2]] + [0,0])
#print([m.name for m in self.filelist])
#print(lijstje)
max_index = np.nanargmax(lijstje)
notmax_lijstje = np.delete(lijstje, max_index)
max_index2 = np.nanargmax(notmax_lijstje)
names2 = np.delete(np.array(self.names), max_index)
color = "\\rowcolor{LightRed}" if i in low_lans else ''
#print(max_index2, max_index, readable_lan)
significance = self.accuracy_significance[lan][self.names[max_index]][names2[max_index2]]
#print("Is it significant?", readable_lan, self.names[max_index], names2[max_index2], significance)
#print( '\t', significance )
lijstje = ['*{\\bf ' + str(round(l,3)) + '}'
if (i == max_index and significance < 0.01 and max_index < (len(self.names)-2))
else ('{\\bf ' + str(round(l,3)) + '}' if (i==max_index)
else str(round(l,3)) )
for i,l in enumerate(lijstje)]
lijstje = [ l + ('\\tiny{$\\pm$ '+str(round(sd,3))+'}' if z< (len(self.names)-2) and print_sd else '') for z, (l, sd) in enumerate(zip(lijstje, sds))]
if i not in train_lans and not train:
# Write normal resource
with open(filename,'a') as f:
f.write(color),
f.write(readable_lan + ' & ')
f.write(' & '.join(lijstje))
f.write('\\\\\n')
# Write low resources
elif i in train_lans and train:
with open(filename,'a') as f:
f.write(readable_lan + ' & ')
f.write(' & '.join(lijstje))
f.write('\\\\\n')
def compare_two_columns(self, name1, name2):
count = 0
for i, lan in enumerate(languages):
if i not in train_lans and 'ulg' not in lan:
significance = self.accuracy_significance[lan][name1][name2]
print(lan, significance)
if significance < 0.01:
count += 1
print(count)
return count
def plot_diffs(self, experiment_list=["english","maml","x-ne"], comparator = "x-maml"):
plt.rcParams["axes.grid"] = False
diffs = np.zeros((17,len(experiment_list)))
pvalues = np.zeros((17,len(experiment_list)))
labels = np.empty((17,len(experiment_list)),dtype=object)
enum = 0
real_lans = []
for i, lan in enumerate(languages):
if i not in train_lans and 'ulg' not in lan:
for j,setup in enumerate(experiment_list):
lookup = self.filelist[self.lookup[setup]]
mean_comp = self.filelist[self.lookup[comparator]].get_mean_sd_low(lan,7)[0]*100
if type(lookup) is str:
if 'en' in lookup:
mean_comp = tran_expen[i]*100
else:
#print(lan, i, tran_expmix[i]*100,mean_comp)
mean_setup = tran_expmix[i]*100
else:
mean_setup = lookup.get_mean_sd_low(lan,7)[0]*100
diffs[enum,j] = mean_comp - mean_setup
pvalue = self.accuracy_significance[lan][comparator][setup]
pvalues[enum,j] = pvalue
labels[enum, j] = str(round(diffs[enum,j],2)) + ('*' if pvalues[enum,j] < 0.01 else '')
enum += 1
real_lans.append(languages_readable[i])
fig, ax = plt.subplots()
print(labels)
rdgn = sns.diverging_palette(h_neg=10, h_pos=250, s=99, l=55, sep=3, as_cmap=True)
#labels = np.array([['A','B'],['C','D'],['E','F']])
g = sns.heatmap(diffs, annot=labels, ax=ax, fmt = '',
cmap=rdgn, vmin=-3, center=0, vmax=30)
# We want to show all ticks...
ax.set_yticklabels(real_lans, rotation=1)
ax.set_xticklabels(experiment_list, horizontalalignment='center')
for low in [0,1,2,3,9,14]:
ax.get_yticklabels()[low].set_color("red")
ax.set_xlabel("Baseline")
#g.set_axis_labels("Baseline", "Language")
#ax.set_xticks(np.arange(5))
#ax.set_yticks(np.arange(len(real_lans)))
# ... and label them with the respective list entries.
#ax.set_xticklabels(setups)
#ax.set_yticklabels(real_lans)
#im, cbar = tools.heatmap(diffs, real_lans, setups, ax=ax,
# cmap="RdYlGn", vmin=28, center=0, vmax=-5)
# texts = tools.annotate_heatmap(im, pvalues, valfmt="{x:.1f}", fontsize=8)
ax.set_title("X-MAML Improvement" + (" (Zero-Shot)" if "zero" in experiment_list[0] else " (Few-Shot) "))
fig.tight_layout()
plt.show()
def plot_diffs_pairwise(self):
plt.rcParams["axes.grid"] = False
diffs = np.zeros((9,4))
pvalues = np.zeros((9,4))
labels = np.empty((9,4),dtype=object)
enum = 0
real_lans = []
zeros = ["zero-eng","zero-maml","zero-x-ne","zero-x-maml"]
fews = ["english","maml","x-ne","x-maml"]
for i, lan in enumerate(languages):
if i in train_lans or 'ulg' in lan:
print(lan)
for j,setup in enumerate(zeros):
#print(zeros[])
lookup = self.filelist[self.lookup[setup]]
mean_comp = self.filelist[self.lookup[fews[j]]].get_mean_sd_low(lan,7)[0]*100
if type(lookup) is str:
if 'en' in lookup:
mean_comp = tran_expen[i]*100
else:
#print(lan, i, tran_expmix[i]*100,mean_comp)
mean_setup = tran_expmix[i]*100
else:
mean_setup = lookup.get_mean_sd_low(lan,7)[0]*100
diffs[enum,j] = mean_comp - mean_setup
pvalue = self.accuracy_significance[lan][fews[j]][setup]
pvalues[enum,j] = pvalue
labels[enum, j] = str(round(diffs[enum,j],2)) + ('*' if pvalues[enum,j] < 0.01 else '')
enum += 1
real_lans.append(languages_readable[i])
fig, ax = plt.subplots()
print(labels)
rdgn = sns.diverging_palette(h_neg=10, h_pos=250, s=99, l=55, sep=3, as_cmap=True)
#labels = np.array([['A','B'],['C','D'],['E','F']])
g = sns.heatmap(diffs, annot=labels, ax=ax, fmt = '',
cmap=rdgn, vmin=-3, center=0, vmax=6)
# We want to show all ticks...
ax.set_yticklabels(real_lans, rotation=1)
ax.set_xticklabels(fews, horizontalalignment='center')
#for low in [0,1,2,3,9,14]:
# ax.get_yticklabels()[low].set_color("red")
ax.set_xlabel("Model")
#g.set_axis_labels("Baseline", "Language")
#ax.set_xticks(np.arange(5))
#ax.set_yticks(np.arange(len(real_lans)))
# ... and label them with the respective list entries.
#ax.set_xticklabels(setups)
#ax.set_yticklabels(real_lans)
#im, cbar = tools.heatmap(diffs, real_lans, setups, ax=ax,
# cmap="RdYlGn", vmin=28, center=0, vmax=-5)
# texts = tools.annotate_heatmap(im, pvalues, valfmt="{x:.1f}", fontsize=8)
#ax.set_title("X-MAML Improvement" + (" (Zero-Shot)" if "zero" in experiment_list[0] else " (Few-Shot) "))
ax.set_title("Improvement of Few-Shot over Zero-Shot", fontsize=11)
fig.tight_layout()
plt.show()
def get_results(self, which):
model = self.filelist[which]
print("Doing", model.name)
l = len(model.las_lowlr['UD_Arabic-PADT'])
self.correlations[model.name] = []
for index in range(l):
filename = str(which) + str(index) + '.csv'
with open(filename,'w') as f:
f.write('language,' + model.name + '\n')
for lan in languages:
if 'Bulg' not in lan:
with open(filename, 'a') as f:
f.write(lan + ',')
f.write(str(model.las_lowlr[lan][index]))
f.write('\n')
sim = Similarities(uriel.lang2iso, uriel.feature_names, uriel.expMix, filename)
table = sim.create_table()
self.correlations[model.name].append(table)
return self.correlations[model.name]
def compare_correlations(self):
if not os.path.exists('correlations.pickle'):
for i in range(len(self.filelist[:-2])):
self.get_results(i)
with open("correlations.pickle", "wb") as f:
pickle.dump(self.correlations, f)
else:
with open('correlations.pickle', 'rb') as f:
self.correlations = pickle.load(f)
bigtable = np.zeros((8*5,8))
enum = -1
yticklabels = []
for lan in uriel.COMPARE_LANS:
self.correlation_significance[lan] = {}
for feature in ["syntax_knn"]:
enum += 1
yticklabels.append(lan) #+"_"+feature)
for j, name1 in enumerate(self.names[:-2]):
self.correlation_significance[lan][name1] = {}
bigtable[enum,j] = np.mean(np.array([d[name1][lan][feature] for d in self.correlations[name1]]))
for name2 in self.names[:-2]:
if name1 != name2:
lang = uriel.iso2lang[lan]
#print(type(name1))
#if name1 == 'eng': name1 = "english"
#if name2 == 'eng': name2 = "english"
#print(self.correlations[name1])
array1 = [d[name1][lan][feature] for d in self.correlations[name1]]
array2 = [d[name2][lan][feature] for d in self.correlations[name2]]
p_value = stats.ttest_ind(array1,array2 ,equal_var=False).pvalue
self.correlation_significance[lan][name1][name2] = p_value
#if p_value < 0.1:
with open("hi222.txt", "a") as f:
f.write(lang+' '+feature+' '+name1+' '+name2 + ' ')
f.write(str(round(np.mean(np.array(array1)),4)) + ' ')
f.write(str(round(np.mean(np.array(array2)),4)) + ' ')
f.write(str(p_value))
f.write('\n')
fig, ax = plt.subplots()
rdgn = sns.diverging_palette(145, 280, s=85, l=25, n=7, as_cmap=True) #sns.diverging_palette(h_neg=10, h_pos=250, s=99, l=55, sep=3, as_cmap=True)
#labels = np.array([['A','B'],['C','D'],['E','F']])
g = sns.heatmap(np.array(bigtable[:8])[3,1,0,2,4,5,6,7], annot=True, ax=ax,
cmap=rdgn, vmin=-1, center=0, vmax=1)
ax.set_yticks(np.arange(len(yticklabels))+0.5, )
ax.set_xticks(np.arange(len(self.filelist[:-2]))+0.5)
ax.set_yticklabels(yticklabels, rotation=1, verticalalignment='center')
ax.set_xticklabels([b.name for b in self.filelist[:-2]], rotation=30, horizontalalignment='center')
ax.set_xlabel("Model")
ax.set_ylabel("Language for syntax features")
plt.show()
f = FileAnalysis(["finetune24_MAML_0.0001_TrueVAL_averaging"], "bad")
english = FileAnalysis(["ONLY_averaging"], "english")
maml = FileAnalysis(["metalearn24_MAMLC_0.001_5e-05_True_3VAL_averaging",
"metalearn24_MAMLC2_0.001_5e-05_True_3VAL_averaging"
"metalearn24_MAML9C_0.001_5e-05_True_3VAL_averaging",
"metalearn24_MAML10C_0.001_5e-05_True_3VAL_averaging"], "maml")
ne = FileAnalysis([
"finetune24_seed1_0.0001_TrueVAL_averaging",
"finetune24_seed6_0.0001_TrueVAL_averaging",
"finetune24_seed7_0.0001_TrueVAL_averaging", # 7
"finetune24_seed8_0.0001_TrueVAL_averaging"
],"x-ne")
xmaml = FileAnalysis([ "metalearn24_seed1_0.001_5e-05_True_3VAL_averaging",
"metalearn24_seed2_0.001_5e-05_True_3VAL_averaging",
"metalearn24_seed5_0.001_5e-05_True_3VAL_averaging",
"metalearn24_seed6_0.001_5e-05_True_3VAL_averaging"],"x-maml")
zerone = FileAnalysis(["finetune24_seed7_0.0001_TrueVAL",
"finetune24_seed6_0.0001_TrueVAL",
"finetune24_seed8_0.0001_TrueVAL"
], "zero-x-ne", zero=True)
zeroen = FileAnalysis(["english"],"zero-eng", zero=True)
zerox = FileAnalysis(["metalearn24_seed1_0.001_5e-05_True_3VAL",
"metalearn24_seed2_0.001_5e-05_True_3VAL",
"metalearn24_seed5_0.001_5e-05_True_3VAL",
"metalearn24_seed6_0.001_5e-05_True_3VAL"], "zero-x-maml", zero=True)
zeromaml = FileAnalysis(["metalearn24_MAMLC_0.001_5e-05_True_3VAL",
"metalearn24_MAMLC2_0.001_5e-05_True_3VAL"
"metalearn24_MAML9C_0.001_5e-05_True_3VAL",
"metalearn24_MAML10C_0.001_5e-05_True_3VAL"], "zero-maml", zero=True)
# Our Meta Analysis class
meta = MetaListAnalysis(
[english,maml,ne, xmaml, zeroen, zeromaml, zerone, zerox, "tran-en", "tran-mix"],
["english","maml","x-ne","x-maml","zero-eng", "zero-maml", "zero-x-ne", "zero-x-maml", "tran-en", "tran-mix"])
"""Latex"""
#meta.print_latex("all_lans.tex", print_sd=True)
#meta.print_latex("train_lans.tex", True, print_sd=True)
#meta.print_latex("test_lans_small.tex",)
#meta.print_latex("train_lans_small.tex", True,)
"""Plotting"""
#meta.plot_diffs()
#meta.plot_diffs_pairwise()
"""Getting p-values for each two columns"""
#meta.compare_two_columns("english","x-maml")
#meta.compare_two_columns("maml","x-maml")
#meta.compare_two_columns("x-ne","x-maml")
#meta.compare_two_columns("zeroen","zerox")
#meta.compare_two_columns("zerone","zerox")
#meta.compare_two_columns("zerox","x-maml")
"""Doing correlation study"""
#meta.compare_correlations()
|
the-stack_106_19757
|
from monsterfactory import MonsterFactory
from character import NPC
import random
class Location:
name = ""
adjacentLocations = []
npcs = []
monsters = []
boss = None
def getName(self):
return self.name
def getNPC(self):
if len(self.npcs) > 0:
return self.npcs[random.randint(0, len(self.npcs) - 1)]
else:
print("There are no NPCs in this location.")
return None
def getMonster(self):
if len(self.monsters) > 0:
return self.monsters.pop()
elif self.boss is not None:
print("With all other monsters dead, the area boss appeared!")
boss = self.boss
self.boss = None
return boss
else:
print("There are no monsters in this location.")
return None
def addMonster(self, monster):
self.monsters.append(monster)
random.shuffle(self.monsters)
def getAdjacentLocationNames(self):
names = []
for location in self.adjacentLocations:
names.append(location.getName())
return names
def getAdjacentLocation(self, name):
for location in self.adjacentLocations:
if name == location.getName():
return location
return None
class TutorialIsland(Location):
npcs = []
def __init__(self):
self.name = "Tutorial Island"
self.npcs = [NPC("Old Man", [
"Hello adventurer, welcome to Tutorial Island!",
"This game is only a proof of concept, so there's not much content.",
"Try changing location to Tutorial Dungeon and I'll tell you about fighting."
])]
tutorialDungeon = RandomDungeon("Tutorial Dungeon", self, 1,[NPC("Old Man", [
"Hello again, welcome to the Tutorial Dungeon!",
"Here there will be a few monsters for you to fight by selecting \"battle\".",
"Once you've beaten all the regular monsters, a powerful boss monster will appear!",
"Why don't you try it out?"
])
])
self.adjacentLocations = [tutorialDungeon]
class RandomDungeon(Location):
monsters = []
npcs = []
boss = None
def __init__(self, name, prevLocation, level, npcs = []):
self.name = name
self.adjacentLocations = [prevLocation]
monstercount = random.randint(1, 5)
monsterFactory = MonsterFactory()
for loop in range(monstercount):
self.monsters.append(monsterFactory.createMonster(level))
self.boss = monsterFactory.createBoss(level)
self.npcs = npcs
|
the-stack_106_19761
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# import seaborn as sns
import shutil
import time
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
root_path = os.path.dirname(os.path.abspath('__file__'))
data_path = root_path + '/Xianyang/data/'
model_path = root_path+'/Xianyang/projects/lstm/'
if not os.path.exists(model_path):
os.makedirs(model_path)
import sys
sys.path.append(root_path)
from dump_data import dum_pred_results
from plot_utils import plot_rela_pred,plot_history,plot_error_distribution,plot_convergence_,plot_evaluations_,plot_objective_
from variables import lags
"""NOTE:WILL DEPRICATED IN THE FUTURE PROJECTS"""
RE_TRAIN = False
WARM_UP = False
EARLY_STOPING = True
INITIAL_EPOCH = 6000
# For initialize weights and bias
SEED=1
# set hyper-parameters
EPS=5000 #epochs number
#########--1--###########
LR=0.007 #learnin rate 0.0001, 0.0003, 0.0007, 0.001, 0.003, 0.007,0.01, 0.03 0.1
#########--2--############
HU1 = 16 #hidden units for hidden layer 1
BS = 512 #batch size
#########--3--###########
HL = 1 #hidden layers
HU2 = 16 #hidden units for hidden layer 2
DC=0.000 #decay rate of learning rate
#########--4--###########
DR1=0.0 #dropout rate for hidden layer 1
DR2=0.0 #dropout rate for hidden layer 2
# 1.Import the sampled normalized data set from disk
train = pd.read_csv(data_path+'minmax_unsample_train.csv')
dev = pd.read_csv(data_path+'minmax_unsample_dev.csv')
test = pd.read_csv(data_path+'minmax_unsample_test.csv')
# Split features from labels
train_x = train
train_y = train.pop('Y')
train_y = train_y.as_matrix()
dev_x = dev
dev_y = dev.pop('Y')
dev_y = dev_y.as_matrix()
test_x = test
test_y = test.pop('Y')
test_y = test_y.as_matrix()
# reshape the input features for LSTM
train_x = (train_x.values).reshape(train_x.shape[0],1,train_x.shape[1])
dev_x = (dev_x.values).reshape(dev_x.shape[0],1,dev_x.shape[1])
test_x = (test_x.values).reshape(test_x.shape[0],1,test_x.shape[1])
# 2.Build LSTM model with keras
# set the hyper-parameters
LEARNING_RATE=LR
EPOCHS = EPS
BATCH_SIZE = BS
if HL==2:
HIDDEN_UNITS = [HU1,HU2]
DROP_RATE = [DR1,DR2]
else:
HIDDEN_UNITS = [HU1]
DROP_RATE = [DR1]
DECAY_RATE = DC
MODEL_NAME = 'LSTM-LR['+str(LEARNING_RATE)+\
']-HU'+str(HIDDEN_UNITS)+\
'-EPS['+str(EPOCHS)+\
']-BS['+str(BATCH_SIZE)+\
']-DR'+str(DROP_RATE)+\
'-DC['+str(DECAY_RATE)+\
']-SEED['+str(SEED)+']'
# RESUME_TRAINING = True
def build_model():
if HL==2:
model = keras.Sequential(
[
layers.LSTM(HIDDEN_UNITS[0],activation=tf.nn.relu,return_sequences=True,input_shape=(train_x.shape[1],train_x.shape[2])),
layers.Dropout(DROP_RATE[0], noise_shape=None, seed=None),
layers.LSTM(HIDDEN_UNITS[1],activation=tf.nn.relu,return_sequences=False), # first hidden layer if hasnext hidden layer
layers.Dropout(DROP_RATE[1], noise_shape=None, seed=None),
# layers.LSTM(20,activation=tf.nn.relu,return_sequence=True),
layers.Dense(1)
]
)
else:
model = keras.Sequential(
[
layers.LSTM(HIDDEN_UNITS[0],activation=tf.nn.relu,input_shape=(train_x.shape[1],train_x.shape[2])),
layers.Dropout(DROP_RATE[0], noise_shape=None, seed=None),
# layers.LSTM(HIDDEN_UNITS1,activation=tf.nn.relu,return_sequences=True,input_shape=(train_x.shape[1],train_x.shape[2])), # first hidden layer if hasnext hidden layer
# layers.LSTM(20,activation=tf.nn.relu,return_sequence=True),
layers.Dense(1)
]
)
optimizer = keras.optimizers.Adam(LEARNING_RATE,
decay=DECAY_RATE
)
model.compile(loss='mean_squared_error',
optimizer=optimizer,
metrics=['mean_absolute_error','mean_squared_error'])
return model
# set model's parameters restore path
cp_path = model_path+MODEL_NAME+'\\'
if not os.path.exists(cp_path):
os.makedirs(cp_path)
checkpoint_path = model_path+MODEL_NAME+'\\cp.ckpt' #restore only the latest checkpoint after every update
# checkpoint_path = model_path+'cp-{epoch:04d}.ckpt' #restore the checkpoint every period=x epoch
checkpoint_dir = os.path.dirname(checkpoint_path)
print('checkpoint dir:{}'.format(checkpoint_dir))
cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path,save_best_only=True,mode='min',save_weights_only=True,verbose=1)
# cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path,save_weights_only=True,period=5,verbose=1)
# if not RESUME_TRAINING:
# print("Removing previous artifacts...")
# shutil.rmtree(checkpoint_dir, ignore_errors=True)
# else:
# print("Resuming training...")
# initialize a new model
model = build_model()
model.summary() #print a simple description for the model
"""
# Evaluate before training or load trained weights and biases
loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
# Try the model with initial weights and biases
example_batch = train_x[:10]
example_result = model.predict(example_batch)
print(example_result)
"""
# 3.Train the model
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
files = os.listdir(checkpoint_dir)
from tensorflow.keras.callbacks import ReduceLROnPlateau,EarlyStopping
# reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')
reduce_lr = ReduceLROnPlateau(monitor='val_loss',min_lr=0.00001,factor=0.2, verbose=1,patience=10, mode='min')
early_stopping = EarlyStopping(monitor='val_loss', mode='min',verbose=1,patience=100,restore_best_weights=True)
warm_dir = 'LSTM-LR['+str(LEARNING_RATE)+\
']-HU'+str(HIDDEN_UNITS)+\
'-EPS['+str(INITIAL_EPOCH)+\
']-BS['+str(BATCH_SIZE)+\
']-DR'+str(DROP_RATE)+\
'-DC['+str(DECAY_RATE)+\
']-SEED['+str(SEED)+']'
print("WARM UP PATH:{}".format(os.path.exists(model_path+warm_dir)))
# Training models
if RE_TRAIN: # Retraining the LSTM model
print('retrain the model')
if EARLY_STOPING:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=EPOCHS,batch_size=BATCH_SIZE ,validation_data=(dev_x,dev_y),verbose=1,
callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end-start
else:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=EPOCHS,batch_size=BATCH_SIZE ,validation_data=(dev_x,dev_y),verbose=1,callbacks=[cp_callback])
end =time.process_time()
time_cost = end-start
# # Visualize the model's training progress using the stats stored in the history object
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+MODEL_NAME+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
# print(hist.tail())
plot_history(history,model_path+MODEL_NAME+'-MAE-ERRORS-TRAINTEST.png',model_path+MODEL_NAME+'-MSE-ERRORS-TRAINTEST.png')
elif len(files)==0: # The current model has not been trained
if os.path.exists(model_path+warm_dir) and WARM_UP: # Training the model using the trained weights and biases as initialized parameters
print('WARM UP FROM EPOCH '+str(INITIAL_EPOCH)) # Warm up from the last epoch of the target model
prev_time_cost = (pd.read_csv(model_path+warm_dir+'.csv')['time_cost'])[0]
warm_path=model_path+warm_dir+'\\cp.ckpt'
model.load_weights(warm_path)
if EARLY_STOPING:
start=time.process_time()
history = model.fit(train_x,train_y,initial_epoch=INITIAL_EPOCH,epochs=EPOCHS,batch_size=BATCH_SIZE ,validation_data=(dev_x,dev_y),verbose=1,
callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end - start + prev_time_cost
else:
start = time.process_time()
history = model.fit(train_x,train_y,initial_epoch=INITIAL_EPOCH,epochs=EPOCHS,batch_size=BATCH_SIZE ,validation_data=(dev_x,dev_y),verbose=1,
callbacks=[
cp_callback,
])
end = time.process_time()
time_cost = end - start + prev_time_cost
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+MODEL_NAME+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
# print(hist.tail())
plot_history(history,model_path+MODEL_NAME+'-MAE-ERRORS-TRAINTEST.png',model_path+MODEL_NAME+'-MSE-ERRORS-TRAINTEST.png')
else: # Training entirely new model
print('new train')
if EARLY_STOPING:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=EPOCHS,batch_size=BATCH_SIZE ,validation_data=(dev_x,dev_y),verbose=1,callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end -start
else:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=EPOCHS,batch_size=BATCH_SIZE ,validation_data=(dev_x,dev_y),verbose=1,
callbacks=[
cp_callback,
])
end = time.process_time()
time_cost = end - start
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+MODEL_NAME+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
# print(hist.tail())
plot_history(history,model_path+MODEL_NAME+'-MAE-ERRORS-TRAINTEST.png',model_path+MODEL_NAME+'-MSE-ERRORS-TRAINTEST.png')
else:
print('#'*10+'Already Trained')
time_cost = (pd.read_csv(model_path+MODEL_NAME+'.csv')['time_cost'])[0]
model.load_weights(checkpoint_path)
# loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
"""
# Evaluate after training or load trained weights and biases
loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
print("Testing set Mean Abs Error: {:5.2f} ".format(mae))
"""
# 4. Predict the model
# load the unsample data
train_predictions = model.predict(train_x).flatten()
dev_predictions = model.predict(dev_x).flatten()
test_predictions = model.predict(test_x).flatten()
# renormized the predictions and labels
# load the normalized traindev indicators
norm = pd.read_csv(data_path+'norm_unsample_id.csv')
sMax = norm['series_max'][norm.shape[0]-1]
sMin = norm['series_min'][norm.shape[0]-1]
print('Series min:{}'.format(sMin))
print('Series max:{}'.format(sMax))
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1,sMax - sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
dev_predictions = np.multiply(dev_predictions + 1,sMax - sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
test_predictions = np.multiply(test_predictions + 1,sMax - sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
dum_pred_results(
path = model_path+MODEL_NAME+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost=time_cost,
)
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path + MODEL_NAME + '-TRAIN-PRED.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path + MODEL_NAME + "-DEV-PRED.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path + MODEL_NAME + "-TEST-PRED.png")
plot_error_distribution(test_predictions,test_y,model_path+MODEL_NAME+'-ERROR-DSTRI.png')
|
the-stack_106_19762
|
# -*- coding: utf-8 -*-
"""
emmett_mongorest.serializers
----------------------------
Provides REST serialization tools
:copyright: 2019 Giovanni Barillari
:license: BSD-3-Clause
"""
from emmett_rest.serializers import Serializer as _Serializer
class Serializer(_Serializer):
def __init__(self, model):
self._model = model
if not self.attributes:
self.attributes = []
for fieldname in self._model.__fields__.keys():
self.attributes.append(fieldname)
self.attributes += self.include
for el in self.exclude:
if el in self.attributes:
self.attributes.remove(el)
_attrs_override_ = []
for key in dir(self):
if not key.startswith('_') and callable(getattr(self, key)):
_attrs_override_.append(key)
self._attrs_override_ = _attrs_override_
self._init()
def id(self, obj):
return str(obj['_id'])
|
the-stack_106_19763
|
# -*- coding: utf-8 -*-
'''
Management of Linux logical volumes
===================================
A state module to manage LVMs
.. code-block:: yaml
/dev/sda:
lvm.pv_present
my_vg:
lvm.vg_present:
- devices: /dev/sda
lvroot:
lvm.lv_present:
- vgname: my_vg
- size: 10G
- stripes: 5
- stripesize: 8K
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
# Import salt libs
import salt.utils.path
from salt.ext import six
def __virtual__():
'''
Only load the module if lvm is installed
'''
if salt.utils.path.which('lvm'):
return 'lvm'
return False
def pv_present(name, **kwargs):
'''
Set a physical device to be used as an LVM physical volume
name
The device name to initialize.
kwargs
Any supported options to pvcreate. See
:mod:`linux_lvm <salt.modules.linux_lvm>` for more details.
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if __salt__['lvm.pvdisplay'](name):
ret['comment'] = 'Physical Volume {0} already present'.format(name)
elif __opts__['test']:
ret['comment'] = 'Physical Volume {0} is set to be created'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.pvcreate'](name, **kwargs)
if __salt__['lvm.pvdisplay'](name):
ret['comment'] = 'Created Physical Volume {0}'.format(name)
ret['changes']['created'] = changes
else:
ret['comment'] = 'Failed to create Physical Volume {0}'.format(name)
ret['result'] = False
return ret
def pv_absent(name):
'''
Ensure that a Physical Device is not being used by lvm
name
The device name to initialize.
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if not __salt__['lvm.pvdisplay'](name):
ret['comment'] = 'Physical Volume {0} does not exist'.format(name)
elif __opts__['test']:
ret['comment'] = 'Physical Volume {0} is set to be removed'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.pvremove'](name)
if __salt__['lvm.pvdisplay'](name):
ret['comment'] = 'Failed to remove Physical Volume {0}'.format(name)
ret['result'] = False
else:
ret['comment'] = 'Removed Physical Volume {0}'.format(name)
ret['changes']['removed'] = changes
return ret
def vg_present(name, devices=None, **kwargs):
'''
Create an LVM volume group
name
The volume group name to create
devices
A list of devices that will be added to the volume group
kwargs
Any supported options to vgcreate. See
:mod:`linux_lvm <salt.modules.linux_lvm>` for more details.
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if isinstance(devices, six.string_types):
devices = devices.split(',')
if __salt__['lvm.vgdisplay'](name):
ret['comment'] = 'Volume Group {0} already present'.format(name)
for device in devices:
realdev = os.path.realpath(device)
pvs = __salt__['lvm.pvdisplay'](realdev, real=True)
if pvs and pvs.get(realdev, None):
if pvs[realdev]['Volume Group Name'] == name:
ret['comment'] = '{0}\n{1}'.format(
ret['comment'],
'{0} is part of Volume Group'.format(device))
elif pvs[realdev]['Volume Group Name'] in ['', '#orphans_lvm2']:
__salt__['lvm.vgextend'](name, device)
pvs = __salt__['lvm.pvdisplay'](realdev, real=True)
if pvs[realdev]['Volume Group Name'] == name:
ret['changes'].update(
{device: 'added to {0}'.format(name)})
else:
ret['comment'] = '{0}\n{1}'.format(
ret['comment'],
'{0} could not be added'.format(device))
ret['result'] = False
else:
ret['comment'] = '{0}\n{1}'.format(
ret['comment'],
'{0} is part of {1}'.format(
device, pvs[realdev]['Volume Group Name']))
ret['result'] = False
else:
ret['comment'] = '{0}\n{1}'.format(
ret['comment'],
'pv {0} is not present'.format(device))
ret['result'] = False
elif __opts__['test']:
ret['comment'] = 'Volume Group {0} is set to be created'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.vgcreate'](name, devices, **kwargs)
if __salt__['lvm.vgdisplay'](name):
ret['comment'] = 'Created Volume Group {0}'.format(name)
ret['changes']['created'] = changes
else:
ret['comment'] = 'Failed to create Volume Group {0}'.format(name)
ret['result'] = False
return ret
def vg_absent(name):
'''
Remove an LVM volume group
name
The volume group to remove
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if not __salt__['lvm.vgdisplay'](name):
ret['comment'] = 'Volume Group {0} already absent'.format(name)
elif __opts__['test']:
ret['comment'] = 'Volume Group {0} is set to be removed'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.vgremove'](name)
if not __salt__['lvm.vgdisplay'](name):
ret['comment'] = 'Removed Volume Group {0}'.format(name)
ret['changes']['removed'] = changes
else:
ret['comment'] = 'Failed to remove Volume Group {0}'.format(name)
ret['result'] = False
return ret
def lv_present(name,
vgname=None,
size=None,
extents=None,
snapshot=None,
pv='',
thinvolume=False,
thinpool=False,
force=False,
**kwargs):
'''
Create a new logical volume
name
The name of the logical volume
vgname
The volume group name for this logical volume
size
The initial size of the logical volume
extents
The number of logical extents to allocate
snapshot
The name of the snapshot
pv
The physical volume to use
kwargs
Any supported options to lvcreate. See
:mod:`linux_lvm <salt.modules.linux_lvm>` for more details.
.. versionadded:: to_complete
thinvolume
Logical volume is thinly provisioned
thinpool
Logical volume is a thin pool
.. versionadded:: 2018.3.0
force
Assume yes to all prompts
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
_snapshot = None
if snapshot:
_snapshot = name
name = snapshot
if thinvolume:
lvpath = '/dev/{0}/{1}'.format(vgname.split('/')[0], name)
else:
lvpath = '/dev/{0}/{1}'.format(vgname, name)
if __salt__['lvm.lvdisplay'](lvpath, quiet=True):
ret['comment'] = 'Logical Volume {0} already present'.format(name)
elif __opts__['test']:
ret['comment'] = 'Logical Volume {0} is set to be created'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.lvcreate'](name,
vgname,
size=size,
extents=extents,
snapshot=_snapshot,
pv=pv,
thinvolume=thinvolume,
thinpool=thinpool,
force=force,
**kwargs)
if __salt__['lvm.lvdisplay'](lvpath):
ret['comment'] = 'Created Logical Volume {0}'.format(name)
ret['changes']['created'] = changes
else:
ret['comment'] = 'Failed to create Logical Volume {0}. Error: {1}'.format(name, changes)
ret['result'] = False
return ret
def lv_absent(name, vgname=None):
'''
Remove a given existing logical volume from a named existing volume group
name
The logical volume to remove
vgname
The volume group name
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
lvpath = '/dev/{0}/{1}'.format(vgname, name)
if not __salt__['lvm.lvdisplay'](lvpath):
ret['comment'] = 'Logical Volume {0} already absent'.format(name)
elif __opts__['test']:
ret['comment'] = 'Logical Volume {0} is set to be removed'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.lvremove'](name, vgname)
if not __salt__['lvm.lvdisplay'](lvpath):
ret['comment'] = 'Removed Logical Volume {0}'.format(name)
ret['changes']['removed'] = changes
else:
ret['comment'] = 'Failed to remove Logical Volume {0}'.format(name)
ret['result'] = False
return ret
|
the-stack_106_19764
|
# -*- test-case-name: twisted.conch.test.test_manhole -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Asynchronous local terminal input handling
@author: Jp Calderone
"""
import os, tty, sys, termios
from twisted.internet import reactor, stdio, protocol, defer
from twisted.python import failure, reflect, log
from twisted.conch.insults.insults import ServerProtocol
from twisted.conch.manhole import ColoredManhole
class UnexpectedOutputError(Exception):
pass
class TerminalProcessProtocol(protocol.ProcessProtocol):
def __init__(self, proto):
self.proto = proto
self.onConnection = defer.Deferred()
def connectionMade(self):
self.proto.makeConnection(self)
self.onConnection.callback(None)
self.onConnection = None
def write(self, data):
"""
Write to the terminal.
@param data: Data to write.
@type data: L{bytes}
"""
self.transport.write(data)
def outReceived(self, data):
"""
Receive data from the terminal.
@param data: Data received.
@type data: L{bytes}
"""
self.proto.dataReceived(data)
def errReceived(self, data):
"""
Report an error.
@param data: Data to include in L{Failure}.
@type data: L{bytes}
"""
self.transport.loseConnection()
if self.proto is not None:
self.proto.connectionLost(failure.Failure(UnexpectedOutputError(data)))
self.proto = None
def childConnectionLost(self, childFD):
if self.proto is not None:
self.proto.childConnectionLost(childFD)
def processEnded(self, reason):
if self.proto is not None:
self.proto.connectionLost(reason)
self.proto = None
class ConsoleManhole(ColoredManhole):
"""
A manhole protocol specifically for use with L{stdio.StandardIO}.
"""
def connectionLost(self, reason):
"""
When the connection is lost, there is nothing more to do. Stop the
reactor so that the process can exit.
"""
reactor.stop()
def runWithProtocol(klass):
fd = sys.__stdin__.fileno()
oldSettings = termios.tcgetattr(fd)
tty.setraw(fd)
try:
p = ServerProtocol(klass)
stdio.StandardIO(p)
reactor.run()
finally:
termios.tcsetattr(fd, termios.TCSANOW, oldSettings)
os.write(fd, b"\r\x1bc\r")
def main(argv=None):
log.startLogging(open('child.log', 'w'))
if argv is None:
argv = sys.argv[1:]
if argv:
klass = reflect.namedClass(argv[0])
else:
klass = ConsoleManhole
runWithProtocol(klass)
if __name__ == '__main__':
main()
|
the-stack_106_19765
|
#!/usr/bin/env python
## HiCPack
## Author(s): Mohsen Naghipourfar
## Contact: [email protected] or [email protected]
## This software is distributed without any guarantee under the terms of the GNU General
## MIT License
"""
Script to keep only valid pairs when no restriction enzyme are used (i.e. DNAse or Micro-HiC)
"""
import getopt
import os
import re
import sys
import pysam
def usage():
"""Usage function"""
print("Usage : python mapped_2hic_dnase.py")
print("-r/--mappedReadsFile <BAM/SAM file of mapped reads>")
print("[-o/--outputDir] <Output directory. Default is current directory>")
print("[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>")
print(
"[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>")
print(
"[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>")
print("[-v/--verbose] <Verbose>")
print("[-h/--help] <Help>")
return
def get_args():
"""Get argument"""
try:
opts, args = getopt.getopt(
sys.argv[1:],
"r:o:d:g:avh",
["mappedReadsFile=",
"outputDir=", "minDist=", "gatg", "all", "verbose", "help"])
except getopt.GetoptError:
usage()
sys.exit(-1)
return opts
def get_read_strand(read):
"""
Conversion of read position to naive strand representation
Parameters
----------
read : list
list of aligned reads
"""
strand = "+"
if read.is_reverse:
strand = "-"
return strand
def get_read_pos(read, st="start"):
"""
Return the read position (zero-based) used for the intersection with
the restriction fragment
The 5' end is not a good choice for the reverse reads (which contain part
of the restriction site, and thus overlap the next restriction fragment)
Using the left-most position (5' for forward, 3' for reverse) or the
middle of the read should work but the middle of the reads might be more
safe
Parameters
-----------
read : list
list of aligned reads
"""
if st == "middle":
pos = read.pos + int(read.alen / 2)
elif st == "start":
pos = get_read_start(read)
elif st == "left":
pos = read.pos
return pos
def get_read_start(read):
"""
Return the 5' end of the read
"""
if read.is_reverse:
pos = read.pos + read.alen - 1
else:
pos = read.pos
return pos
def get_ordered_reads(read1, read2):
"""
Reorient reads
The sequencing is usually not oriented. Reorient the reads so that r1 is
always before r2
read1 = [AlignedRead]
read2 = [AlignedRead]
"""
if read1.tid == read2.tid:
if get_read_pos(read1) < get_read_pos(read2):
r1 = read1
r2 = read2
else:
r1 = read2
r2 = read1
else:
if read1.tid < read2.tid:
r1 = read1
r2 = read2
else:
r1 = read2
r2 = read1
return r1, r2
def isIntraChrom(read1, read2):
"""
Return true is the reads pair is intrachromosomal
read1 : [AlignedRead]
read2 : [AlignedRead]
"""
if read1.tid == read2.tid:
return True
else:
return False
def get_valid_orientation(read1, read2):
"""
Both reads are expected to be on the different restriction fragments
Check the orientation of reads ->-> / <-<- / -><- / <-->
read1 : [AlignedRead]
read2 : [AlignedRead]
"""
# Get oriented reads
r1, r2 = get_ordered_reads(read1, read2)
direction = None
if get_read_strand(r1) == "+" and get_read_strand(r2) == "+":
direction = "FF"
elif get_read_strand(r1) == "-" and get_read_strand(r2) == "-":
direction = "RR"
elif get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
direction = "FR"
elif get_read_strand(r1) == "-" and get_read_strand(r2) == "+":
direction = "RF"
return direction
def get_cis_dist(read1, read2):
"""
Calculte the size of the DNA fragment library
read1 : [AlignedRead]
read2 : [AlignedRead]
"""
# Get oriented reads
##r1, r2 = get_ordered_reads(read1, read2)
dist = None
if not r1.is_unmapped and not r2.is_unmapped:
## Contact distances can be calculated for intrachromosomal reads only
if isIntraChrom(read1, read2):
r1pos = get_read_pos(read1)
r2pos = get_read_pos(read2)
dist = abs(r1pos - r2pos)
return dist
def get_read_tag(read, tag):
for t in read.tags:
if t[0] == tag:
return t[1]
return None
if __name__ == "__main__":
# Read command line arguments
opts = get_args()
verbose = False
allOutput = False
minInsertSize = None
maxInsertSize = None
minDist = None
outputDir = "."
gtag = None
if len(opts) == 0:
usage()
sys.exit()
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-r", "--mappedReadsFile"):
mappedReadsFile = arg
elif opt in ("-o", "--outputDir"):
outputDir = arg
elif opt in ("-d", "--minCisDist"):
minDist = arg
elif opt in ("-g", "--gtag"):
gtag = arg
elif opt in ("-a", "--all"):
allOutput = True
elif opt in ("-v", "--verbose"):
verbose = True
else:
assert False, "unhandled option"
# Verbose mode
if verbose:
print("## overlapMapped2HiCFragments.py")
print("## mappedReadsFile=", mappedReadsFile)
print("## minCisDist=", minDist)
print("## allOuput=", allOutput)
print("## verbose=", verbose, "\n")
# Initialize variables
reads_counter = 0
valid_counter = 0
valid_counter_FF = 0
valid_counter_RR = 0
valid_counter_FR = 0
valid_counter_RF = 0
single_counter = 0
dump_counter = 0
filt_counter = 0
# AS counter
G1G1_ascounter = 0
G2G2_ascounter = 0
G1U_ascounter = 0
UG1_ascounter = 0
G2U_ascounter = 0
UG2_ascounter = 0
G1G2_ascounter = 0
G2G1_ascounter = 0
UU_ascounter = 0
CF_ascounter = 0
baseReadsFile = os.path.basename(mappedReadsFile)
baseReadsFile = re.sub(r'\.bam$|\.sam$', '', baseReadsFile)
# Open handlers for output files
handle_valid = open(outputDir + '/' + baseReadsFile + '.validPairs', 'w')
if allOutput:
handle_dump = open(outputDir + '/' + baseReadsFile + '.DumpPairs', 'w')
handle_single = open(outputDir + '/' + baseReadsFile + '.SinglePairs', 'w')
handle_filt = open(outputDir + '/' + baseReadsFile + '.FiltPairs', 'w')
# Read the SAM/BAM file
if verbose:
print("## Opening SAM/BAM file '", mappedReadsFile, "'...")
samfile = pysam.Samfile(mappedReadsFile, "rb")
# Reads are 0-based too (for both SAM and BAM format)
# Loop on all reads
for read in samfile.fetch(until_eof=True):
reads_counter += 1
cur_handler = None
interactionType = None
htag = ""
# First mate
if read.is_read1:
r1 = read
if not r1.is_unmapped:
r1_chrom = samfile.getrname(r1.tid)
else:
r1_chrom = None
# Second mate
elif read.is_read2:
r2 = read
if not r2.is_unmapped:
r2_chrom = samfile.getrname(r2.tid)
else:
r2_chrom = None
if isIntraChrom(r1, r2):
dist = get_cis_dist(r1, r2)
else:
dist = None
# Check singleton
if r1.is_unmapped or r2.is_unmapped:
interactionType = "SI"
single_counter += 1
cur_handler = handle_single if allOutput else None
# Check Distance criteria - Filter
if (minDist is not None and dist is not None and dist < int(minDist)):
interactionType = "FILT"
filt_counter += 1
cur_handler = handle_filt if allOutput else None
# By default pair is valid
if interactionType == None:
interactionType = "VI"
valid_counter += 1
cur_handler = handle_valid
validType = get_valid_orientation(r1, r2)
if validType == "RR":
valid_counter_RR += 1
elif validType == "FF":
valid_counter_FF += 1
elif validType == "FR":
valid_counter_FR += 1
elif validType == "RF":
valid_counter_RF += 1
else:
interactionType = "DUMP"
dump_counter += 1
cur_handler = handle_dump if allOutput else None
# Split valid pairs based on XA tag
if gtag is not None:
r1as = get_read_tag(r1, gtag)
r2as = get_read_tag(r2, gtag)
if r1as == 1 and r2as == 1:
G1G1_ascounter += 1
elif r1as == 2 and r2as == 2:
G2G2_ascounter += 1
elif r1as == 1 and r2as == 0:
G1U_ascounter += 1
elif r1as == 0 and r2as == 1:
UG1_ascounter += 1
elif r1as == 2 and r2as == 0:
G2U_ascounter += 1
elif r1as == 0 and r2as == 2:
UG2_ascounter += 1
elif r1as == 1 and r2as == 2:
G1G2_ascounter += 1
elif r1as == 2 and r2as == 1:
G2G1_ascounter += 1
elif r1as == 3 or r2as == 3:
CF_ascounter += 1
else:
UU_ascounter += 1
if cur_handler is not None:
if not r1.is_unmapped and not r2.is_unmapped:
##reorient reads to ease duplicates removal
or1, or2 = get_ordered_reads(r1, r2)
or1_chrom = samfile.getrname(or1.tid)
or2_chrom = samfile.getrname(or2.tid)
##reset as tag now that the reads are oriented
r1as = get_read_tag(or1, gtag)
r2as = get_read_tag(or2, gtag)
if gtag is not None:
htag = str(r1as) + "-" + str(r2as)
cur_handler.write(
or1.qname + "\t" +
or1_chrom + "\t" +
str(get_read_pos(or1) + 1) + "\t" +
str(get_read_strand(or1)) + "\t" +
or2_chrom + "\t" +
str(get_read_pos(or2) + 1) + "\t" +
str(get_read_strand(or2)) + "\t" +
"NA" + "\t" + ##dist
"NA" + "\t" + ##resfrag1
"NA" + "\t" + ##resfrag2
str(or1.mapping_quality) + "\t" +
str(or2.mapping_quality) + "\t" +
str(htag) + "\n")
elif r2.is_unmapped and not r1.is_unmapped:
cur_handler.write(
r1.qname + "\t" +
r1_chrom + "\t" +
str(get_read_pos(r1) + 1) + "\t" +
str(get_read_strand(r1)) + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
str(r1.mapping_quality) + "\t" +
"*" + "\n")
elif r1.is_unmapped and not r2.is_unmapped:
cur_handler.write(
r2.qname + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
r2_chrom + "\t" +
str(get_read_pos(r2) + 1) + "\t" +
str(get_read_strand(r2)) + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
str(r2.mapping_quality) + "\n")
if (reads_counter % 100000 == 0 and verbose):
print("##", reads_counter)
# Close handler
handle_valid.close()
if allOutput:
handle_dump.close()
handle_single.close()
handle_filt.close()
# Write stats file
handle_stat = open(outputDir + '/' + baseReadsFile + '.RSstat', 'w')
handle_stat.write("## Hi-C processing - no restriction fragments\n")
handle_stat.write("Valid_interaction_pairs\t" + str(valid_counter) + "\n")
handle_stat.write(
"Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
handle_stat.write(
"Valid_interaction_pairs_RR\t" + str(valid_counter_RR) + "\n")
handle_stat.write(
"Valid_interaction_pairs_RF\t" + str(valid_counter_RF) + "\n")
handle_stat.write(
"Valid_interaction_pairs_FR\t" + str(valid_counter_FR) + "\n")
handle_stat.write("Single-end_pairs\t" + str(single_counter) + "\n")
handle_stat.write("Filtered_pairs\t" + str(filt_counter) + "\n")
handle_stat.write("Dumped_pairs\t" + str(dump_counter) + "\n")
## Write AS report
if gtag is not None:
handle_stat.write("## ======================================\n")
handle_stat.write("## Allele specific information\n")
handle_stat.write("Valid_pairs_from_ref_genome_(1-1)\t" + str(G1G1_ascounter) + "\n")
handle_stat.write("Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t" + str(
UG1_ascounter + G1U_ascounter) + "\n")
handle_stat.write("Valid_pairs_from_alt_genome_(2-2)\t" + str(G2G2_ascounter) + "\n")
handle_stat.write("Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t" + str(
UG2_ascounter + G2U_ascounter) + "\n")
handle_stat.write(
"Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter + G2G1_ascounter) + "\n")
handle_stat.write("Valid_pairs_with_both_unassigned_mated_(0-0)\t" + str(UU_ascounter) + "\n")
handle_stat.write("Valid_pairs_with_at_least_one_conflicting_mate_(3-)\t" + str(CF_ascounter) + "\n")
handle_stat.close()
|
the-stack_106_19766
|
from dama.reg.wrappers import XGB, SKLP
import xgboost as xgb
class Xgboost(XGB):
def prepare_model(self, obj_fn=None, num_steps: int = 0, model_params=None, batch_size: int = None):
data_train = self.ds[self.data_groups["data_train_group"]].to_ndarray()
target_train = self.ds[self.data_groups["target_train_group"]].to_ndarray()
data_val = self.ds[self.data_groups["data_validation_group"]].to_ndarray()
target_val = self.ds[self.data_groups["target_validation_group"]].to_ndarray()
d_train = xgb.DMatrix(data_train, target_train)
d_valid = xgb.DMatrix(data_val, target_val)
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
nrounds = num_steps
xgb_model = xgb.train(model_params, d_train, nrounds, watchlist, early_stopping_rounds=int(nrounds/2),
feval=obj_fn, maximize=True, verbose_eval=100)
return self.ml_model(xgb, bst=xgb_model)
def feature_importance(self):
pass
class XgboostSKL(SKLP):
def prepare_model(self, obj_fn=None, num_steps: int = 0, model_params=None, batch_size: int = None):
if model_params is None:
model_params = dict(seed=3, n_estimators=25)
model = xgb.XGBClassifier(**model_params)
reg_model = model.fit(self.ds[self.data_groups["data_train_group"]].to_ndarray(),
self.ds[self.data_groups["target_train_group"]].to_ndarray())
return self.ml_model(reg_model)
|
the-stack_106_19767
|
import decimal
from typing import (
Tuple,
)
abi_decimal_context = decimal.Context(prec=999)
ZERO = decimal.Decimal(0)
TEN = decimal.Decimal(10)
def ceil32(x: int) -> int:
return x if x % 32 == 0 else x + 32 - (x % 32)
def compute_unsigned_integer_bounds(num_bits: int) -> Tuple[int, int]:
return (
0,
2 ** num_bits - 1,
)
def compute_signed_integer_bounds(num_bits: int) -> Tuple[int, int]:
return (
-1 * 2 ** (num_bits - 1),
2 ** (num_bits - 1) - 1,
)
def compute_unsigned_fixed_bounds(
num_bits: int,
frac_places: int,
) -> Tuple[decimal.Decimal, decimal.Decimal]:
int_upper = compute_unsigned_integer_bounds(num_bits)[1]
with decimal.localcontext(abi_decimal_context):
upper = decimal.Decimal(int_upper) * TEN ** -frac_places
return ZERO, upper
def compute_signed_fixed_bounds(
num_bits: int,
frac_places: int,
) -> Tuple[decimal.Decimal, decimal.Decimal]:
int_lower, int_upper = compute_signed_integer_bounds(num_bits)
with decimal.localcontext(abi_decimal_context):
exp = TEN ** -frac_places
lower = decimal.Decimal(int_lower) * exp
upper = decimal.Decimal(int_upper) * exp
return lower, upper
|
the-stack_106_19769
|
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
from django.conf import settings
from authentication import views as views
from vehicle import views as v_views
from driver import views as d_views
from accidents import views as a_views
from gpsdevices import views as as_views
urlpatterns = [
# url(r'^django_popup_view_field/', include('django_popup_view_field.urls', namespace="django_popup_view_field")),
url(r'^$', auth_views.login, {'template_name': 'login.html'}, name='login'),
url(r'^home$', views.index, name='index'),
url(r'^location$', views.location, name='location'),
url(r'^route$', views.route, name='routes'),
url(r'^report$', views.report, name='reports'),
url(r'^schedule$', views.schedule, name='schedules'),
url(r'^vehicles_list/$', v_views.VehicleListView.as_view(), name='vehicles_list'),
url(r'^add_vehicle/$', v_views.CreateVehicleView.as_view(), name='add_vehicle'),
url(r'^edit_vehicle/(?P<pk>\d+)/$', v_views.UpdateVehicleView.as_view(), name='edit_vehicle'),
url(r'^view_vehicle/(?P<pk>\d+)/$', v_views.VehicleView.as_view(), name='view_vehicle'),
# url(r'^view_vehicle/(?P<pk>\d+)/$', v_views.view_vehicle, name='view_vehicle'),
url(r'^delete_vehicle/(?P<pk>\d+)/$', v_views.DeleteVehicleView.as_view(), name='delete_vehicle'),
# url(r'^records_list/$', v_views.MonthlyListView.as_view(), name='records_list'),
# url(r'^add_record/$', v_views.CreateMonthlyView.as_view(), name='add_record'),
# url(r'^edit_record/(?P<pk>\d+)/$', v_views.UpdateMonthlyView.as_view(), name='edit_record'),
# url(r'^record/(?P<pk>\d+)/$', v_views.MonthlyView.as_view(), name='view_record'),
url(r'^drivers_list/$', d_views.DriverListView.as_view(), name='drivers_list'),
url(r'^add_driver/$',d_views.CreateDriverView.as_view(), name='add_driver'),
url(r'^edit_driver/(?P<pk>\d+)/$', d_views.UpdateDriverView.as_view(), name='edit_driver'),
url(r'^driver/(?P<pk>\d+)/$', d_views.DriverView.as_view(), name='view_driver'),
url(r'^delete_driver/(?P<pk>\d+)/$', d_views.DeleteDriverView.as_view(), name='delete_driver'),
url(r'^gpsdevices_list/$', as_views.GPSDeviceListView.as_view(), name='gpsdevices_list'),
url(r'^add_gpsdevice/$', as_views.CreateGPSDeviceView.as_view(), name='add_gpsdevice'),
url(r'^edit_gpsdevice/(?P<pk>\d+)/$', as_views.UpdateGPSDeviceView.as_view(), name='edit_gpsdevice'),
url(r'^gpsdevice/(?P<pk>\d+)/$', as_views.GPSDeviceView.as_view(), name='gpsdevice'),
url(r'^view_gpsdevice/(?P<pk>\d+)/$', as_views.view_gpsdevice, name='view_gpsdevice'),
url(r'^accidents_list/$', a_views.AccidentsListView.as_view(), name='accidents_list'),
url(r'^add_accident/$', a_views.CreateAccidentView.as_view(), name='add_accident'),
url(r'^edit_accident/(?P<pk>\d+)/$', a_views.UpdateAccidentView.as_view(), name='edit_accident'),
url(r'^delete_accident/(?P<pk>\d+)/$', a_views.DeleteAccidentView.as_view(), name='delete_accident'),
url(r'^accident/(?P<pk>\d+)/$', a_views.AccidentView.as_view(), name='view_accident'),
url(r'^accidents/(?P<pk>\d+)/$', a_views.accidentsView, name='accidents'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATICFILES_DIRS)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
the-stack_106_19771
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteSecurityGroupResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""DeleteSecurityGroupResponse - a model defined in huaweicloud sdk"""
super(DeleteSecurityGroupResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteSecurityGroupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_19773
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data input for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.examples.speech_commands_custom import input_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class InputDataTest(test.TestCase):
def _getWavData(self):
with self.cached_session() as sess:
sample_data = tf.zeros([1000, 2])
wav_encoder = contrib_audio.encode_wav(sample_data, 16000)
wav_data = sess.evaluate(wav_encoder)
return wav_data
def _saveTestWavFile(self, filename, wav_data):
with open(filename, "wb") as f:
f.write(wav_data)
def _saveWavFolders(self, root_dir, labels, how_many):
wav_data = self._getWavData()
for label in labels:
dir_name = os.path.join(root_dir, label)
os.mkdir(dir_name)
for i in range(how_many):
file_path = os.path.join(dir_name, "some_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
def _model_settings(self):
return {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"dct_coefficient_count": 40,
}
def testPrepareWordsList(self):
words_list = ["a", "b"]
self.assertGreater(
len(input_data.prepare_words_list(words_list), 10.0, 10.0), len(words_list))
def testWhichSet(self):
self.assertEqual(
input_data.which_set("foo.wav", 10, 10),
input_data.which_set("foo.wav", 10, 10))
self.assertEqual(
input_data.which_set("foo_nohash_0.wav", 10, 10),
input_data.which_set("foo_nohash_1.wav", 10, 10))
@test_util.run_deprecated_v1
def testPrepareDataIndex(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"],
10, 10, self._model_settings())
self.assertLess(0, audio_processor.set_size("training"))
self.assertTrue("training" in audio_processor.data_index)
self.assertTrue("validation" in audio_processor.data_index)
self.assertTrue("testing" in audio_processor.data_index)
self.assertEquals(input_data.UNKNOWN_WORD_INDEX,
audio_processor.word_to_index["c"])
def testPrepareDataIndexEmpty(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 0)
with self.assertRaises(Exception) as e:
_ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"], 10, 10,
self._model_settings())
self.assertTrue("No .wavs found" in str(e.exception))
def testPrepareDataIndexMissing(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
with self.assertRaises(Exception) as e:
_ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b", "d"], 10,
10, self._model_settings())
self.assertTrue("Expected to find" in str(e.exception))
@test_util.run_deprecated_v1
def testPrepareBackgroundData(self):
tmp_dir = self.get_temp_dir()
background_dir = os.path.join(tmp_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"],
10, 10, self._model_settings())
self.assertEqual(10, len(audio_processor.background_data))
def testLoadWavFile(self):
tmp_dir = self.get_temp_dir()
file_path = os.path.join(tmp_dir, "load_test.wav")
wav_data = self._getWavData()
self._saveTestWavFile(file_path, wav_data)
sample_data = input_data.load_wav_file(file_path)
self.assertIsNotNone(sample_data)
def testSaveWavFile(self):
tmp_dir = self.get_temp_dir()
file_path = os.path.join(tmp_dir, "load_test.wav")
save_data = np.zeros([16000, 1])
input_data.save_wav_file(file_path, save_data, 16000)
loaded_data = input_data.load_wav_file(file_path)
self.assertIsNotNone(loaded_data)
self.assertEqual(16000, len(loaded_data))
@test_util.run_deprecated_v1
def testPrepareProcessingGraph(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
background_dir = os.path.join(wav_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
model_settings = {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"dct_coefficient_count": 40,
}
audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
10, 10, model_settings)
self.assertIsNotNone(audio_processor.wav_filename_placeholder_)
self.assertIsNotNone(audio_processor.foreground_volume_placeholder_)
self.assertIsNotNone(audio_processor.time_shift_padding_placeholder_)
self.assertIsNotNone(audio_processor.time_shift_offset_placeholder_)
self.assertIsNotNone(audio_processor.background_data_placeholder_)
self.assertIsNotNone(audio_processor.background_volume_placeholder_)
self.assertIsNotNone(audio_processor.mfcc_)
@test_util.run_deprecated_v1
def testGetData(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
background_dir = os.path.join(wav_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
model_settings = {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"dct_coefficient_count": 40,
}
audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
10, 10, model_settings)
with self.cached_session() as sess:
result_data, result_labels = audio_processor.get_data(
10, 0, model_settings, 0.3, 0.1, 100, "training", sess)
self.assertEqual(10, len(result_data))
self.assertEqual(10, len(result_labels))
@test_util.run_deprecated_v1
def testGetUnprocessedData(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
model_settings = {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"dct_coefficient_count": 40,
}
audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
10, 10, model_settings)
result_data, result_labels = audio_processor.get_unprocessed_data(
10, model_settings, "training")
self.assertEqual(10, len(result_data))
self.assertEqual(10, len(result_labels))
if __name__ == "__main__":
test.main()
|
the-stack_106_19774
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from django.http import HttpResponseRedirect, HttpResponse
from psms.models import Users
#由于这个版本没有mixin类
class MiddlewareMixin(object):
def __init__(self, get_response=None):
self.get_response = get_response
super(MiddlewareMixin, self).__init__()
def __call__(self, request):
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
if not response:
response = self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
class CountAddstu(MiddlewareMixin):
def process_request(self, request):
# 统一验证登录
# return none 或者 不写return才会继续往下执行, 不需要执行
if request.path == '/psms/login/' or request.path == '/psms/regist/' or request.is_ajax():
return None
ticket = request.COOKIES.get('ticket')
if not ticket:
return HttpResponseRedirect('/psms/login/')
users = Users.objects.filter(u_ticket=ticket)
if not users:
return HttpResponseRedirect('/psms/login/')
# 将user赋值在request请求的user上,以后可以直接判断user有没有存在
# 备注,django自带的有user值
request.user = users[0].u_name
def process_exception(self,request,exception):
#只要有异常报错,就会执行退出,然后提示服务器正在维护,然后给用户发短信,这里是模拟,只创建一个error列表。
response = HttpResponse('网络故障。。。')
# 删除cookie
response.delete_cookie('ticket')
# 清除session
try:
del request.session['user']
except:
pass
return response
|
the-stack_106_19775
|
"""Base class for directed graphs."""
from copy import deepcopy
import networkx as nx
from networkx.classes.graph import Graph
from networkx.classes.coreviews import AdjacencyView
from networkx.classes.reportviews import (
OutEdgeView,
InEdgeView,
DiDegreeView,
InDegreeView,
OutDegreeView,
)
from networkx.exception import NetworkXError
import networkx.convert as convert
class DiGraph(Graph):
"""
Base class for directed graphs.
A DiGraph stores nodes and edges with optional data, or attributes.
DiGraphs hold directed edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes. By convention `None` is not used as a node.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be any format that is supported
by the to_networkx_graph() function, currently including edge list,
dict of dicts, dict of lists, NetworkX graph, NumPy matrix
or 2d ndarray, SciPy sparse matrix, or PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
Graph
MultiGraph
MultiDiGraph
OrderedDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.DiGraph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2, 3])
>>> G.add_nodes_from(range(100, 110))
>>> H = nx.path_graph(10)
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1, 2), (1, 3)])
or a collection of edges,
>>> G.add_edges_from(H.edges)
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.DiGraph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.nodes
>>> G.add_node(1, time="5pm")
>>> G.add_nodes_from([3], time="2pm")
>>> G.nodes[1]
{'time': '5pm'}
>>> G.nodes[1]["room"] = 714
>>> del G.nodes[1]["room"] # remove attribute
>>> list(G.nodes(data=True))
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edges.
>>> G.add_edge(1, 2, weight=4.7)
>>> G.add_edges_from([(3, 4), (4, 5)], color="red")
>>> G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})])
>>> G[1][2]["weight"] = 4.7
>>> G.edges[1, 2]["weight"] = 4
Warning: we protect the graph data structure by making `G.edges[1, 2]` a
read-only dict-like structure. However, you can assign to attributes
in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change
data attributes: `G.edges[1, 2]['weight'] = 4`
(For multigraphs: `MG.edges[u, v, key][name] = value`).
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n < 3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
Often the best way to traverse all edges of a graph is via the neighbors.
The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()`
>>> for n, nbrsdict in G.adjacency():
... for nbr, eattr in nbrsdict.items():
... if "weight" in eattr:
... # Do something useful with the edges
... pass
But the edges reporting object is often more convenient:
>>> for u, v, weight in G.edges(data="weight"):
... if weight is not None:
... # Do something useful with the edges
... pass
**Reporting:**
Simple graph information is obtained using object-attributes and methods.
Reporting usually provides views instead of containers to reduce memory
usage. The views update as the graph is updated similarly to dict-views.
The objects `nodes, `edges` and `adj` provide access to data attributes
via lookup (e.g. `nodes[n], `edges[u, v]`, `adj[u][v]`) and iteration
(e.g. `nodes.items()`, `nodes.data('color')`,
`nodes.data('color', default='blue')` and similarly for `edges`)
Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`.
For details on these and other miscellaneous methods, see below.
**Subclasses (Advanced):**
The Graph class uses a dict-of-dict-of-dict data structure.
The outer dict (node_dict) holds adjacency information keyed by node.
The next dict (adjlist_dict) represents the adjacency information and holds
edge data keyed by neighbor. The inner dict (edge_attr_dict) represents
the edge data and holds edge attribute values keyed by attribute names.
Each of these three dicts can be replaced in a subclass by a user defined
dict-like object. In general, the dict-like features should be
maintained but extra features can be added. To replace one of the
dicts create a new graph class by changing the class(!) variable
holding the factory for that dict-like structure. The variable names are
node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory,
adjlist_outer_dict_factory, edge_attr_dict_factory and graph_attr_dict_factory.
node_dict_factory : function, (default: dict)
Factory function to be used to create the dict containing node
attributes, keyed by node id.
It should require no arguments and return a dict-like object
node_attr_dict_factory: function, (default: dict)
Factory function to be used to create the node attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object
adjlist_outer_dict_factory : function, (default: dict)
Factory function to be used to create the outer-most dict
in the data structure that holds adjacency info keyed by node.
It should require no arguments and return a dict-like object.
adjlist_inner_dict_factory : function, optional (default: dict)
Factory function to be used to create the adjacency list
dict which holds edge data keyed by neighbor.
It should require no arguments and return a dict-like object
edge_attr_dict_factory : function, optional (default: dict)
Factory function to be used to create the edge attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
graph_attr_dict_factory : function, (default: dict)
Factory function to be used to create the graph attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
Typically, if your extension doesn't impact the data structure all
methods will inherited without issue except: `to_directed/to_undirected`.
By default these methods create a DiGraph/Graph class and you probably
want them to create your extension of a DiGraph/Graph. To facilitate
this we define two class variables that you can set in your subclass.
to_directed_class : callable, (default: DiGraph or MultiDiGraph)
Class to create a new graph structure in the `to_directed` method.
If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used.
to_undirected_class : callable, (default: Graph or MultiGraph)
Class to create a new graph structure in the `to_undirected` method.
If `None`, a NetworkX class (Graph or MultiGraph) is used.
**Subclassing Example**
Create a low memory graph class that effectively disallows edge
attributes by using a single attribute dict for all edges.
This reduces the memory used, but you lose edge attributes.
>>> class ThinGraph(nx.Graph):
... all_edge_dict = {"weight": 1}
...
... def single_edge_dict(self):
... return self.all_edge_dict
...
... edge_attr_dict_factory = single_edge_dict
>>> G = ThinGraph()
>>> G.add_edge(2, 1)
>>> G[2][1]
{'weight': 1}
>>> G.add_edge(2, 2)
>>> G[2][1] is G[2][2]
True
Please see :mod:`~networkx.classes.ordered` for more examples of
creating graph subclasses by overwriting the base class `dict` with
a dictionary-like object.
"""
def __init__(self, incoming_graph_data=None, **attr):
"""Initialize a graph with edges, name, or graph attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name="my graph")
>>> e = [(1, 2), (2, 3), (3, 4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G = nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.graph_attr_dict_factory = self.graph_attr_dict_factory
self.node_dict_factory = self.node_dict_factory
self.node_attr_dict_factory = self.node_attr_dict_factory
self.adjlist_outer_dict_factory = self.adjlist_outer_dict_factory
self.adjlist_inner_dict_factory = self.adjlist_inner_dict_factory
self.edge_attr_dict_factory = self.edge_attr_dict_factory
self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes
self._node = self.node_dict_factory() # dictionary for node attr
# We store two adjacency lists:
# the predecessors of node n are stored in the dict self._pred
# the successors of node n are stored in the dict self._succ=self._adj
self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict
self._pred = self.adjlist_outer_dict_factory() # predecessor
self._succ = self._adj # successor
# attempt to load graph with data
if incoming_graph_data is not None:
convert.to_networkx_graph(incoming_graph_data, create_using=self)
# load graph attributes (must be after convert)
self.graph.update(attr)
@property
def adj(self):
"""Graph adjacency object holding the neighbors of each node.
This object is a read-only dict-like structure with node keys
and neighbor-dict values. The neighbor-dict is keyed by neighbor
to the edge-data-dict. So `G.adj[3][2]['color'] = 'blue'` sets
the color of the edge `(3, 2)` to `"blue"`.
Iterating over G.adj behaves like a dict. Useful idioms include
`for nbr, datadict in G.adj[n].items():`.
The neighbor information is also provided by subscripting the graph.
So `for nbr, foovalue in G[node].data('foo', default=1):` works.
For directed graphs, `G.adj` holds outgoing (successor) info.
"""
return AdjacencyView(self._succ)
@property
def succ(self):
"""Graph adjacency object holding the successors of each node.
This object is a read-only dict-like structure with node keys
and neighbor-dict values. The neighbor-dict is keyed by neighbor
to the edge-data-dict. So `G.succ[3][2]['color'] = 'blue'` sets
the color of the edge `(3, 2)` to `"blue"`.
Iterating over G.succ behaves like a dict. Useful idioms include
`for nbr, datadict in G.succ[n].items():`. A data-view not provided
by dicts also exists: `for nbr, foovalue in G.succ[node].data('foo'):`
and a default can be set via a `default` argument to the `data` method.
The neighbor information is also provided by subscripting the graph.
So `for nbr, foovalue in G[node].data('foo', default=1):` works.
For directed graphs, `G.adj` is identical to `G.succ`.
"""
return AdjacencyView(self._succ)
@property
def pred(self):
"""Graph adjacency object holding the predecessors of each node.
This object is a read-only dict-like structure with node keys
and neighbor-dict values. The neighbor-dict is keyed by neighbor
to the edge-data-dict. So `G.pred[2][3]['color'] = 'blue'` sets
the color of the edge `(3, 2)` to `"blue"`.
Iterating over G.pred behaves like a dict. Useful idioms include
`for nbr, datadict in G.pred[n].items():`. A data-view not provided
by dicts also exists: `for nbr, foovalue in G.pred[node].data('foo'):`
A default can be set via a `default` argument to the `data` method.
"""
return AdjacencyView(self._pred)
def add_node(self, node_for_adding, **attr):
"""Add a single node `node_for_adding` and update node attributes.
Parameters
----------
node_for_adding : node
A node can be any hashable Python object except None.
attr : keyword arguments, optional
Set or change node attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1, size=10)
>>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
if node_for_adding not in self._succ:
self._succ[node_for_adding] = self.adjlist_inner_dict_factory()
self._pred[node_for_adding] = self.adjlist_inner_dict_factory()
attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory()
attr_dict.update(attr)
else: # update attr even if node already exists
self._node[node_for_adding].update(attr)
def add_nodes_from(self, nodes_for_adding, **attr):
"""Add multiple nodes.
Parameters
----------
nodes_for_adding : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple take
precedence over attributes specified via keyword arguments.
See Also
--------
add_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(), key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1, 2], size=10)
>>> G.add_nodes_from([3, 4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific nodes.
>>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})])
>>> G.nodes[1]["size"]
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.nodes[1]["size"]
11
"""
for n in nodes_for_adding:
try:
newnode = n not in self._node
newdict = attr
except TypeError:
n, ndict = n
newnode = n not in self._node
newdict = attr.copy()
newdict.update(ndict)
if newnode:
self._succ[n] = self.adjlist_inner_dict_factory()
self._pred[n] = self.adjlist_inner_dict_factory()
self._node[n] = self.node_attr_dict_factory()
self._node[n].update(newdict)
def remove_node(self, n):
"""Remove node n.
Removes the node n and all adjacent edges.
Attempting to remove a non-existent node will raise an exception.
Parameters
----------
n : node
A node in the graph
Raises
------
NetworkXError
If n is not in the graph.
See Also
--------
remove_nodes_from
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> list(G.edges)
[(0, 1), (1, 2)]
>>> G.remove_node(1)
>>> list(G.edges)
[]
"""
try:
nbrs = self._succ[n]
del self._node[n]
except KeyError as e: # NetworkXError if n not in self
raise NetworkXError(f"The node {n} is not in the digraph.") from e
for u in nbrs:
del self._pred[u][n] # remove all edges n-u in digraph
del self._succ[n] # remove node from succ
for u in self._pred[n]:
del self._succ[u][n] # remove all edges n-u in digraph
del self._pred[n] # remove node from pred
def remove_nodes_from(self, nodes):
"""Remove multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.). If a node
in the container is not in the graph it is silently ignored.
See Also
--------
remove_node
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = list(G.nodes)
>>> e
[0, 1, 2]
>>> G.remove_nodes_from(e)
>>> list(G.nodes)
[]
"""
for n in nodes:
try:
succs = self._succ[n]
del self._node[n]
for u in succs:
del self._pred[u][n] # remove all edges n-u in digraph
del self._succ[n] # now remove node
for u in self._pred[n]:
del self._succ[u][n] # remove all edges n-u in digraph
del self._pred[n] # now remove node
except KeyError:
pass # silent failure on remove
def add_edge(self, u_of_edge, v_of_edge, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by directly
accessing the edge's attribute dictionary. See examples below.
Parameters
----------
u_of_edge, v_of_edge : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use
an edge attribute (by default `weight`) to hold a numerical value.
Examples
--------
The following all add the edge e=(1, 2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1, 2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from([(1, 2)]) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
For non-string attribute keys, use subscript notation.
>>> G.add_edge(1, 2)
>>> G[1][2].update({0: 5})
>>> G.edges[1, 2].update({0: 5})
"""
u, v = u_of_edge, v_of_edge
# add nodes
if u not in self._succ:
self._succ[u] = self.adjlist_inner_dict_factory()
self._pred[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._succ:
self._succ[v] = self.adjlist_inner_dict_factory()
self._pred[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
# add the edge
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
self._succ[u][v] = datadict
self._pred[v][u] = datadict
def add_edges_from(self, ebunch_to_add, **attr):
"""Add all the edges in ebunch_to_add.
Parameters
----------
ebunch_to_add : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as 2-tuples (u, v) or
3-tuples (u, v, d) where d is a dictionary containing edge data.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Edge attributes specified in an ebunch take precedence over
attributes specified via keyword arguments.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples
>>> e = zip(range(0, 3), range(1, 4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1, 2), (2, 3)], weight=3)
>>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898")
"""
for e in ebunch_to_add:
ne = len(e)
if ne == 3:
u, v, dd = e
elif ne == 2:
u, v = e
dd = {}
else:
raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.")
if u not in self._succ:
self._succ[u] = self.adjlist_inner_dict_factory()
self._pred[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._succ:
self._succ[v] = self.adjlist_inner_dict_factory()
self._pred[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
datadict.update(dd)
self._succ[u][v] = datadict
self._pred[v][u] = datadict
def remove_edge(self, u, v):
"""Remove the edge between u and v.
Parameters
----------
u, v : nodes
Remove the edge between nodes u and v.
Raises
------
NetworkXError
If there is not an edge between u and v.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.Graph() # or DiGraph, etc
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.remove_edge(0, 1)
>>> e = (1, 2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
>>> e = (2, 3, {"weight": 7}) # an edge with attribute data
>>> G.remove_edge(*e[:2]) # select first part of edge tuple
"""
try:
del self._succ[u][v]
del self._pred[v][u]
except KeyError as e:
raise NetworkXError(f"The edge {u}-{v} not in graph.") from e
def remove_edges_from(self, ebunch):
"""Remove all edges specified in ebunch.
Parameters
----------
ebunch: list or container of edge tuples
Each edge given in the list or container will be removed
from the graph. The edges can be:
- 2-tuples (u, v) edge between u and v.
- 3-tuples (u, v, k) where k is ignored.
See Also
--------
remove_edge : remove a single edge
Notes
-----
Will fail silently if an edge in ebunch is not in the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> ebunch = [(1, 2), (2, 3)]
>>> G.remove_edges_from(ebunch)
"""
for e in ebunch:
u, v = e[:2] # ignore edge data
if u in self._succ and v in self._succ[u]:
del self._succ[u][v]
del self._pred[v][u]
def has_successor(self, u, v):
"""Returns True if node u has successor v.
This is true if graph has the edge u->v.
"""
return u in self._succ and v in self._succ[u]
def has_predecessor(self, u, v):
"""Returns True if node u has predecessor v.
This is true if graph has the edge u<-v.
"""
return u in self._pred and v in self._pred[u]
def successors(self, n):
"""Returns an iterator over successor nodes of n.
A successor of n is a node m such that there exists a directed
edge from n to m.
Parameters
----------
n : node
A node in the graph
Raises
------
NetworkXError
If n is not in the graph.
See Also
--------
predecessors
Notes
-----
neighbors() and successors() are the same.
"""
try:
return iter(self._succ[n])
except KeyError as e:
raise NetworkXError(f"The node {n} is not in the digraph.") from e
# digraph definitions
neighbors = successors
def predecessors(self, n):
"""Returns an iterator over predecessor nodes of n.
A predecessor of n is a node m such that there exists a directed
edge from m to n.
Parameters
----------
n : node
A node in the graph
Raises
------
NetworkXError
If n is not in the graph.
See Also
--------
successors
"""
try:
return iter(self._pred[n])
except KeyError as e:
raise NetworkXError(f"The node {n} is not in the digraph.") from e
@property
def edges(self):
"""An OutEdgeView of the DiGraph as G.edges or G.edges().
edges(self, nbunch=None, data=False, default=None)
The OutEdgeView provides set-like operations on the edge-tuples
as well as edge attribute lookup. When called, it also provides
an EdgeDataView object which allows control of access to edge
attributes (but does not provide set-like operations).
Hence, `G.edges[u, v]['color']` provides the value of the color
attribute for edge `(u, v)` while
`for (u, v, c) in G.edges.data('color', default='red'):`
iterates through all the edges yielding the color attribute
with default `'red'` if no color attribute exists.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u, v, ddict[data]).
If True, return edge attribute dict in 3-tuple (u, v, ddict).
If False, return 2-tuple (u, v).
default : value, optional (default=None)
Value used for edges that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
edges : OutEdgeView
A view of edge attributes, usually it iterates over (u, v)
or (u, v, d) tuples of edges, but can also be used for
attribute lookup as `edges[u, v]['foo']`.
See Also
--------
in_edges, out_edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> nx.add_path(G, [0, 1, 2])
>>> G.add_edge(2, 3, weight=5)
>>> [e for e in G.edges]
[(0, 1), (1, 2), (2, 3)]
>>> G.edges.data() # default data is {} (empty dict)
OutEdgeDataView([(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})])
>>> G.edges.data("weight", default=1)
OutEdgeDataView([(0, 1, 1), (1, 2, 1), (2, 3, 5)])
>>> G.edges([0, 2]) # only edges incident to these nodes
OutEdgeDataView([(0, 1), (2, 3)])
>>> G.edges(0) # only edges incident to a single node (use G.adj[0]?)
OutEdgeDataView([(0, 1)])
"""
return OutEdgeView(self)
# alias out_edges to edges
out_edges = edges
@property
def in_edges(self):
"""An InEdgeView of the Graph as G.in_edges or G.in_edges().
in_edges(self, nbunch=None, data=False, default=None):
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u, v, ddict[data]).
If True, return edge attribute dict in 3-tuple (u, v, ddict).
If False, return 2-tuple (u, v).
default : value, optional (default=None)
Value used for edges that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
in_edges : InEdgeView
A view of edge attributes, usually it iterates over (u, v)
or (u, v, d) tuples of edges, but can also be used for
attribute lookup as `edges[u, v]['foo']`.
See Also
--------
edges
"""
return InEdgeView(self)
@property
def degree(self):
"""A DegreeView for the Graph as G.degree or G.degree().
The node degree is the number of edges adjacent to the node.
The weighted node degree is the sum of the edge weights for
edges incident to that node.
This object provides an iterator for (node, degree) as well as
lookup for the degree for a single node.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
If a single node is requested
deg : int
Degree of the node
OR if multiple nodes are requested
nd_iter : iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
in_degree, out_degree
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.degree(0) # node 0 with degree 1
1
>>> list(G.degree([0, 1, 2]))
[(0, 1), (1, 2), (2, 2)]
"""
return DiDegreeView(self)
@property
def in_degree(self):
"""An InDegreeView for (node, in_degree) or in_degree for single node.
The node in_degree is the number of edges pointing to the node.
The weighted node degree is the sum of the edge weights for
edges incident to that node.
This object provides an iteration over (node, in_degree) as well as
lookup for the degree for a single node.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
If a single node is requested
deg : int
In-degree of the node
OR if multiple nodes are requested
nd_iter : iterator
The iterator returns two-tuples of (node, in-degree).
See Also
--------
degree, out_degree
Examples
--------
>>> G = nx.DiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.in_degree(0) # node 0 with degree 0
0
>>> list(G.in_degree([0, 1, 2]))
[(0, 0), (1, 1), (2, 1)]
"""
return InDegreeView(self)
@property
def out_degree(self):
"""An OutDegreeView for (node, out_degree)
The node out_degree is the number of edges pointing out of the node.
The weighted node degree is the sum of the edge weights for
edges incident to that node.
This object provides an iterator over (node, out_degree) as well as
lookup for the degree for a single node.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
If a single node is requested
deg : int
Out-degree of the node
OR if multiple nodes are requested
nd_iter : iterator
The iterator returns two-tuples of (node, out-degree).
See Also
--------
degree, in_degree
Examples
--------
>>> G = nx.DiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.out_degree(0) # node 0 with degree 1
1
>>> list(G.out_degree([0, 1, 2]))
[(0, 1), (1, 1), (2, 1)]
"""
return OutDegreeView(self)
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear()
>>> list(G.nodes)
[]
>>> list(G.edges)
[]
"""
self._succ.clear()
self._pred.clear()
self._node.clear()
self.graph.clear()
def clear_edges(self):
"""Remove all edges from the graph without altering nodes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear_edges()
>>> list(G.nodes)
[0, 1, 2, 3]
>>> list(G.edges)
[]
"""
for predecessor_dict in self._pred.values():
predecessor_dict.clear()
for successor_dict in self._succ.values():
successor_dict.clear()
def is_multigraph(self):
"""Returns True if graph is a multigraph, False otherwise."""
return False
def is_directed(self):
"""Returns True if graph is directed, False otherwise."""
return True
def to_undirected(self, reciprocal=False, as_view=False):
"""Returns an undirected representation of the digraph.
Parameters
----------
reciprocal : bool (optional)
If True only keep edges that appear in both directions
in the original digraph.
as_view : bool (optional, default=False)
If True return an undirected view of the original directed graph.
Returns
-------
G : Graph
An undirected graph with the same name and nodes and
with edge (u, v, data) if either (u, v, data) or (v, u, data)
is in the digraph. If both edges exist in digraph and
their edge data is different, only one edge is created
with an arbitrary choice of which edge data to use.
You must check and correct for this manually if desired.
See Also
--------
Graph, copy, add_edge, add_edges_from
Notes
-----
If edges in both directions (u, v) and (v, u) exist in the
graph, attributes for the new undirected edge will be a combination of
the attributes of the directed edges. The edge data is updated
in the (arbitrary) order that the edges are encountered. For
more customized control of the edge attributes use add_edge().
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, https://docs.python.org/3/library/copy.html.
Warning: If you have subclassed DiGraph to use dict-like objects
in the data structure, those changes do not transfer to the
Graph created by this method.
Examples
--------
>>> G = nx.path_graph(2) # or MultiGraph, etc
>>> H = G.to_directed()
>>> list(H.edges)
[(0, 1), (1, 0)]
>>> G2 = H.to_undirected()
>>> list(G2.edges)
[(0, 1)]
"""
graph_class = self.to_undirected_class()
if as_view is True:
return nx.graphviews.generic_graph_view(self, Graph)
# deepcopy when not a view
G = Graph()
G.graph.update(deepcopy(self.graph))
G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
if reciprocal is True:
G.add_edges_from(
(u, v, deepcopy(d))
for u, nbrs in self._adj.items()
for v, d in nbrs.items()
if v in self._pred[u]
)
else:
G.add_edges_from(
(u, v, deepcopy(d))
for u, nbrs in self._adj.items()
for v, d in nbrs.items()
)
return G
def reverse(self, copy=True):
"""Returns the reverse of the graph.
The reverse is a graph with the same nodes and edges
but with the directions of the edges reversed.
Parameters
----------
copy : bool optional (default=True)
If True, return a new DiGraph holding the reversed edges.
If False, the reverse graph is created using a view of
the original graph.
"""
if copy:
H = self.__class__()
H.graph.update(deepcopy(self.graph))
H.add_nodes_from((n, deepcopy(d)) for n, d in self.nodes.items())
H.add_edges_from((v, u, deepcopy(d)) for u, v, d in self.edges(data=True))
return H
return nx.graphviews.reverse_view(self)
|
the-stack_106_19779
|
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
import ssl
import datetime
import time
import json
import logging
import pkg_resources
from requests import Session
from io import BufferedReader
from ixnetwork_restpy.errors import *
from ixnetwork_restpy.files import Files
try:
basestring
except NameError:
basestring = str
class Connection(object):
"""Http/Https transport"""
X_API_KEY = 'X-Api-Key'
TRACE_NONE = 'none'
TRACE_REQUEST = 'request'
TRACE_REQUEST_RESPONSE = 'request_response'
def __init__(self, hostname, rest_port=443, platform='windows', log_file_name=None, ignore_env_proxy=False):
""" Set the connection parameters to a rest server
Args:
hostname (str): hostname or ip address
rest_port (int, optional, default=443): the rest port of the server
platform (str):
log_file_name (str):
ignore_env_proxy (bool):
"""
if sys.version < '2.7.9':
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
else:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self._headers = {
Connection.X_API_KEY: None
}
self._hostname = hostname
self._rest_port = rest_port
self._verify_cert = False
self._scheme = 'https'
if platform == 'windows':
self._scheme = 'http'
self._session = Session()
if ignore_env_proxy is True:
self._session.proxies.update({
'http': None,
'https': None
})
# setup logging to both console and file if requested
self._trace = Connection.TRACE_NONE
handlers = [logging.StreamHandler(sys.stdout)]
if log_file_name is not None:
handlers.append(logging.FileHandler(log_file_name, mode='w'))
formatter = logging.Formatter(fmt='%(asctime)s [%(name)s] [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
for handler in handlers:
handler.setFormatter(formatter)
logging.getLogger(__name__).addHandler(handler)
logging.getLogger(__name__).setLevel(logging.INFO)
logging.getLogger(__name__).info('using python version %s' % sys.version)
try:
logging.getLogger(__name__).info('using ixnetwork-restpy version %s' % pkg_resources.get_distribution("ixnetwork-restpy").version)
except Exception as e:
logging.getLogger(__name__).warn("ixnetwork-restpy not installed using pip, unable to determine version")
@property
def trace(self):
"""str: Trace all requests and responses."""
return self._trace
@trace.setter
def trace(self, value):
if value not in [Connection.TRACE_NONE, Connection.TRACE_REQUEST, Connection.TRACE_REQUEST_RESPONSE]:
raise ValueError('the value %s is an incorrect Trace level' % value)
self._trace = value
if self._trace == Connection.TRACE_NONE:
logging.getLogger(__name__).setLevel(logging.WARNING)
if self._trace in [Connection.TRACE_REQUEST, Connection.TRACE_REQUEST_RESPONSE]:
logging.getLogger(__name__).setLevel(logging.DEBUG)
@property
def x_api_key(self):
"""str: Get/Set the x-api-key header value."""
return self._headers[Connection.X_API_KEY]
@x_api_key.setter
def x_api_key(self, value):
self._headers[Connection.X_API_KEY] = value
def _read(self, url):
return self._send_recv('GET', url)
def _create(self, url, payload):
return self._send_recv('POST', url, payload)
def _update(self, url, payload):
return self._send_recv('PATCH', url, payload)
def _delete(self, url, payload=None):
return self._send_recv('DELETE', url, payload)
def _execute(self, url, payload):
return self._send_recv('POST', url, payload)
def _options(self, url):
return self._send_recv('OPTIONS', url)
def _print_request(self, method, url, payload=None):
if self._trace in [Connection.TRACE_REQUEST, Connection.TRACE_REQUEST_RESPONSE]:
logging.getLogger(__name__).debug('%s %s %s' % (method, url, payload))
def _print_response(self, response):
if self._trace == Connection.TRACE_REQUEST_RESPONSE:
logging.getLogger(__name__).debug('%s %s %s' % (response.status_code, response.reason, response.raw.data))
def _info(self, message):
logging.getLogger(__name__).info(message)
def _warn(self, message):
logging.getLogger(__name__).warn(message)
def _debug(self, message):
logging.getLogger(__name__).debug(message)
def _normalize_url(self, url):
connection = '%s://%s:%s' % (self._scheme, self._hostname, self._rest_port)
if url.startswith(self._scheme) == False:
url = '%s/%s' % (connection, url.strip('/'))
path_start = url.find('://') + 3
url = '%s%s' % (url[0:path_start], url[path_start:].replace('//', '/'))
return (connection, url)
def _get_file(self, url, remote_filename, remote_directory=None, local_filename=None, local_directory=None):
headers = self._headers
connection, url = self._normalize_url(url)
url = '%s/files?filename=%s' % (url, remote_filename)
if remote_directory is not None:
url = '%s&absolute=%s' % (url, remote_directory)
response = self._session.request('GET', url)
if local_filename is None:
local_filename = remote_filename
if local_directory is not None:
local_filename = os.path.join(local_directory, local_filename)
local_filename = os.path.normpath(local_filename)
with open(local_filename, 'wb') as fid:
fid.write(response.content)
return local_filename
def _send_recv(self, method, url, payload=None):
headers = self._headers
headers['Content-Type'] = 'application/octet-stream'
connection, url = self._normalize_url(url)
data = payload
if payload is not None:
if isinstance(payload, dict) or isinstance(payload, list):
headers['Content-Type'] = 'application/json'
data = json.dumps(payload)
elif isinstance(payload, Files):
headers['Content-Type'] = 'application/octet-stream'
if os.path.isfile(payload.file_path):
with open(payload.file_path, 'rb') as fid:
data = fid.read()
else:
response = self._session.request('GET', url.replace('filename=', 'filter='), headers=headers, verify=self._verify_cert, allow_redirects=False)
if response.status_code == 200:
return
data = ''
elif isinstance(payload, basestring):
headers['Content-Type'] = 'application/json'
data = payload
self._print_request(method, url, None if isinstance(payload, Files) else data)
response = self._session.request(method, url, data=data, headers=headers, verify=self._verify_cert, allow_redirects=False)
self._print_response(response)
if str(response.status_code).startswith('3'):
url = response.headers['location']
if url.find('://') != -1:
self._scheme = url[:url.find('://')]
self._hostname = url[url.find('://')+3:url.find('/', url.find('://')+3)]
if self._scheme == 'https':
self._rest_port = 443
host_pieces = self._hostname.split(':')
if len(host_pieces) > 1:
self._hostname = host_pieces[0]
self._rest_port = host_pieces[1]
else:
url = '%s://%s:%s%s' % (self._scheme, self._hostname, self._rest_port, url)
self._print_request(method, url, data)
response = self._session.request(method, url, data=data, headers=headers, verify=self._verify_cert, allow_redirects=False)
self._print_response(response)
if response.status_code == 202:
while True:
async_status = response.json()
if 'state' not in async_status.keys():
break
state = async_status['state']
if state == 'IN_PROGRESS':
time.sleep(1)
state_url = async_status['url']
if state_url.startswith(self._scheme) == False:
state_url = '%s/%s' % (connection, state_url.strip('/'))
self._print_request('GET', state_url)
response = self._session.request('GET', state_url, headers=headers, verify=self._verify_cert)
self._print_response(response)
elif state == 'SUCCESS':
if 'result' in async_status.keys():
return async_status['result']
else:
return None
elif async_status['message'] is not None and 'API CONTENTION' in async_status['message']:
raise ResourceInUseError(response)
else:
raise ServerError(response)
while(response.status_code == 409):
time.sleep(6)
response = self._session.request(method, url, data=data, headers=headers, verify=self._verify_cert)
if response.status_code == 204:
return None
elif str(response.status_code).startswith('2') is True:
if response.status_code == 201 and 'links' in response.json().keys():
href = response.json()['links'][0]['href']
return self._send_recv('GET', href)
if response.headers.get('Content-Type'):
if 'application/json' in response.headers['Content-Type']:
return response.json()
return None
elif response.status_code == 400:
raise BadRequestError(response)
elif response.status_code == 401:
raise UnauthorizedError(response)
elif response.status_code == 404:
raise NotFoundError(response)
elif response.status_code == 409:
raise ResourceInUseError(response)
else:
raise ServerError(response)
|
the-stack_106_19781
|
import collections
import logging
import firewall_translator.generic
class Action(firewall_translator.generic.Action):
def __init__(self, allow=False, reply=False, log=False):
if log:
raise NotImplementedError
super(Action, self).__init__(allow, reply, log)
def __repr__(self):
pass
def __str__(self):
if self.allow:
return 'ACCEPT'
elif self.reply:
return 'REJECT'
return 'DROP'
class Rule:
match_params = None
action = None
action_params = None
def __init__(self, match_params=None, action=None, action_params=None):
self.match_params = match_params
self.action = action
self.action_params = action_params
def __repr__(self):
string = '<{}'.format(self.__class__.__name__)
if self.match_params:
string += ' Match: {!r}'.format(self.match_params)
if self.action:
string += ' Action: {}'.format(self.action)
if self.action_params:
string += ' {!r}'.format(self.action_params)
string += '>'
return string
def __str__(self):
string = []
if self.match_params:
for k, v in self.match_params.items():
string.append(' '.join([k, v]))
if self.action:
string.append('-j {}'.format(self.action))
if self.action_params:
for k, v in self.action_params.items():
string.append(' '.join([k, v]))
return ' '.join(string)
@staticmethod
def from_cli(string):
match_params, action = string.partition(' -j ')[::2]
match_params = match_params.strip().split(' ')
match_params_keys = match_params[::2]
match_params_values = match_params[1::2]
match_params = dict(zip(match_params_keys, match_params_values))
action_params = None
if action:
action = action.strip()
if ' ' in action:
info(action)
action, action_params = action.split(' ', 1)
if action_params:
action_params = action_params.strip().split(' ')
action_params_keys = action_params[::2]
action_params_values = action_params[1::2]
action_params = dict(zip(action_params_keys, action_params_values))
rule = Rule(match_params, action, action_params)
info('Table: {}, Chain: {}, Action: {}, Params: {}, Matches: {}'.format(table, chain, action, action_params,
match_params))
self.tables[table].chains[chain].append(rule)
class Chain(collections.MutableSequence, collections.Iterable):
name = None
rules = None
action = None
def __delitem__(self, index):
del(self.rules[index])
def __getitem__(self, index):
return self.rules[index]
def __init__(self, name, rules=None, action='-'):
self.name = name
self.action = action
if rules is None:
self.rules = []
else:
self.rules = rules
def __iter__(self):
for rule in self.rules:
yield rule
def __len__(self):
return len(self.rules)
def __repr__(self):
return '<{} {} {}>'.format(self.__class__.__name__, self.name, self.action)
def __setitem__(self, index, value):
self.rules[index] = value
def __str__(self):
string = [':{} {} [0:0]'.format(self.name, self.action)]
for rule in self.rules:
string.append('-A {} {}'.format(self.name, rule))
return '\n'.join(string)
def append(self, rule):
self.rules.append(rule)
def insert(self, rule, position=0):
self.rules.insert(position, rule)
def delete(self, rule):
self.rules.remove(rule)
class Table(collections.MutableMapping):
name = None
chains = None
def __delitem__(self, key):
del(self.chains[key])
def __getitem__(self, key):
return self.chains[key]
def __init__(self, name, chains=None):
self.name = name
if chains is None:
self.chains = {}
else:
self.chains = chains
def __iter__(self):
for chain in self.chains.values():
yield chain
def __len__(self):
return len(self.chains)
def __repr__(self):
return '<{} {} {}>'.format(self.__class__.__name__, self.name, list(self.chains))
def __setitem__(self, key, value):
self.chains[key] = value
def __str__(self):
string = ['*{}'.format(self.name)]
for chain in self.chains.values():
string.append(str(chain))
string.append('COMMIT')
return '\n'.join(string)
def new_chain(self, name, rules=None, action='-'):
if name in self.chains:
raise KeyError
self.chains[name] = Chain(name, rules, action)
def delete_chain(self, name):
del(self.chains[name])
class RuleSet(collections.MutableMapping):
tables = None
def __delitem__(self, key):
del(self.tables[key])
def __getitem__(self, key):
return self.tables[key]
def __init__(self, tables=None):
if tables is None:
self.tables = {
'filter': Table('filter', {
'FORWARD': Chain('FORWARD', action='ACCEPT'),
'INPUT': Chain('INPUT', action='ACCEPT'),
'OUTPUT': Chain('OUTPUT', action='ACCEPT'),
}),
'mangle': Table('mangle', {
'FORWARD': Chain('FORWARD', action='ACCEPT'),
'INPUT': Chain('INPUT', action='ACCEPT'),
'OUTPUT': Chain('OUTPUT', action='ACCEPT'),
'POSTROUTING': Chain('POSTROUTING', action='ACCEPT'),
'PREROUTING': Chain('PREROUTING', action='ACCEPT'),
}),
'nat': Table('nat', {
'INPUT': Chain('INPUT', action='ACCEPT'),
'OUTPUT': Chain('OUTPUT', action='ACCEPT'),
'POSTROUTING': Chain('POSTROUTING', action='ACCEPT'),
'PREROUTING': Chain('PREROUTING', action='ACCEPT'),
}),
}
else:
self.tables = tables
def __iter__(self):
for table in self.tables.values():
yield table
def __len__(self):
return len(self.tables)
def __repr__(self):
return '<{} {}>'.format(self.__class__.__name__, list(self.tables))
def __setitem__(self, key, value):
self.tables[key] = value
def __str__(self):
string = []
for table in self.tables.values():
string.append(str(table))
return '\n'.join(string)
def read(self, rule_def):
table = None
for line in rule_def.splitlines():
if not line:
continue
if line.strip().startswith('#'):
debug('Found a comment: {}'.format(line))
continue
elif line.strip().startswith('*'):
debug('Found a table definition: {}'.format(line))
table = line[1:]
info('Table: {}'.format(table))
if table not in self.tables.keys():
raise KeyError
elif line.strip().startswith(':'):
debug('Found a chain definition: {}'.format(line))
name, action, counters = line[1:].split(' ')
info('Table: {}, Chain: {}'.format(table, name))
if name not in self.tables[table].chains.keys():
self.tables[table].new_chain(name, action=action)
elif line.strip() == 'COMMIT':
debug('Finished reading table {}'.format(table))
table = None
else:
debug('Found a rule definition: {}'.format(line))
operation, rule = line.split(' ', 1)
if operation != '-A':
raise RuntimeError
chain, rule = rule.split(' ', 1)
if '-j' in rule:
match_params, action = rule.split('-j')
else:
match_params = rule
action = None
match_params = match_params.strip().split(' ')
match_params_keys = match_params[::2]
match_params_values = match_params[1::2]
match_params = dict(zip(match_params_keys, match_params_values))
action_params = None
if action:
action = action.strip()
if ' ' in action:
info(action)
action, action_params = action.split(' ', 1)
if action_params:
action_params = action_params.strip().split(' ')
action_params_keys = action_params[::2]
action_params_values = action_params[1::2]
action_params = dict(zip(action_params_keys, action_params_values))
rule = Rule(match_params, action, action_params)
info('Table: {}, Chain: {}, Action: {}, Params: {}, Matches: {}'.format(table, chain, action, action_params, match_params))
self.tables[table].chains[chain].append(rule)
def read_from_file(self, file):
with open(file) as r:
self.read(r.read())
|
the-stack_106_19782
|
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from genomic_neuralnet.config import MAX_EPOCHS, CONTINUE_EPOCHS, TRY_CONVERGENCE, USE_ARAC
from genomic_neuralnet.common.in_temp_dir import in_temp_dir
from fann2 import libfann
LEARNING_RATE = 0.01
_ITERATIONS_BETWEEN_REPORTS = 1000
_DESIRED_ERROR = 0 # If 0, always train until max epochs.
_TRAIN_FILE = './train.data'
_NETWORK_FILE = './train.net'
def _get_nn(inputs, hidden):
"""
Construct a neural network.
"""
ann = libfann.neural_net()
ann.create_standard_array((inputs, hidden[0], 1))
ann.set_learning_rate(LEARNING_RATE)
ann.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC)
ann.set_activation_function_output(libfann.LINEAR_PIECE_SYMMETRIC)
ann.set_training_algorithm(libfann.TRAIN_RPROP)
ann.set_rprop_delta_zero(1e-6)
return ann
def _train_nn(neuralnet, train_data, train_truth, weight_decay):
"""
A stateful method that trains the network
on a dataset.
"""
neuralnet.set_quickprop_decay(-1 * weight_decay)
_write_training_file(train_data, train_truth)
neuralnet.train_on_file(_TRAIN_FILE, MAX_EPOCHS, _ITERATIONS_BETWEEN_REPORTS, _DESIRED_ERROR)
neuralnet.save(_NETWORK_FILE)
return neuralnet
def _write_training_file(train_data, train_truth):
lines = []
# Write header
lines.append('{} {} {}'.format(train_data.shape[0], train_data.shape[1], 1))
# Write data
for idx in range(train_data.shape[0]):
input = ' '.join(map(str, tuple(train_data[idx,:])))
output = str(train_truth[idx])
lines.append(input)
lines.append(output)
with open(_TRAIN_FILE, 'w') as f:
f.write('\n'.join(lines))
def _convert_to_individual_alleles(array):
"""
Convert SNPs to individual copies so neuralnet can learn dominance relationships.
[-1, 0, 1] => [(0, 0), (0, 1), (1, 1)] => [0, 0, 0, 1, 1, 1]
"""
array = array # We don't want a pandas series anymore.
# Set non-integer values to 0 (het)
array = np.trunc(array)
incr = array + 1 # Now we have 0, 1, and 2
incr = incr[:,:,np.newaxis] # Add another dimension.
pairs = np.pad(incr, ((0,0), (0,0), (0,1)), mode='constant') # Append one extra 0 value to final axis.
twos = np.sum(pairs, axis=2) == 2
pairs[twos] = [1,1]
x, y, z = pairs.shape
pairs = pairs.reshape((x, y*z)) # Merge pairs to one axis.
return pairs
@in_temp_dir
def get_fast_nn_dom_prediction(train_data, train_truth, test_data, test_truth, hidden=(5,), weight_decay=0.0):
# Convert data to individual alleles to capture dominance.
train_data, test_data = tuple(map(_convert_to_individual_alleles, [train_data, test_data]))
scaler = MinMaxScaler(feature_range = (-1, 1))
train_truth = scaler.fit_transform(train_truth)
test_truth = scaler.transform(test_truth)
net = _get_nn(train_data.shape[1], hidden)
_train_nn(net, train_data, train_truth, weight_decay)
out = []
for i in range(test_data.shape[0]):
sample = test_data[i,:]
res = net.run(sample)
out.append(res)
predicted = scaler.inverse_transform(np.array(out))
return predicted.ravel()
|
the-stack_106_19784
|
import re
import numpy as np
import pytest
import qcodes as qc
from qcodes.dataset.descriptions.dependencies import InterDependencies_
from qcodes.dataset.descriptions.param_spec import ParamSpecBase
from qcodes.dataset.measurements import DataSaver
CALLBACK_COUNT = 0
CALLBACK_RUN_ID = None
CALLBACK_SNAPSHOT = None
def callback(result_list, data_set_len, state, run_id, snapshot):
"""
default_callback example function implemented in the Web UI.
"""
global CALLBACK_COUNT, CALLBACK_RUN_ID, CALLBACK_SNAPSHOT
CALLBACK_COUNT += 1
CALLBACK_RUN_ID = run_id
CALLBACK_SNAPSHOT = snapshot
def reset_callback_globals():
global CALLBACK_COUNT, CALLBACK_RUN_ID, CALLBACK_SNAPSHOT
CALLBACK_COUNT = 0
CALLBACK_RUN_ID = None
CALLBACK_SNAPSHOT = None
@pytest.mark.usefixtures("experiment")
@pytest.mark.parametrize("bg_writing", [True, False])
def test_default_callback(bg_writing):
"""
The Web UI needs to know the results of an experiment with the metadata.
So a default_callback class variable is set by the Web UI with a callback
to introspect the data.
"""
test_set = None
reset_callback_globals()
try:
DataSaver.default_callback = {
'run_tables_subscription_callback': callback,
'run_tables_subscription_min_wait': 1,
'run_tables_subscription_min_count': 2}
test_set = qc.new_data_set("test-dataset")
test_set.add_metadata('snapshot', 'reasonable_snapshot')
DataSaver(dataset=test_set,
write_period=0,
interdeps=InterDependencies_)
test_set.mark_started(start_bg_writer=bg_writing)
test_set.mark_completed()
assert CALLBACK_SNAPSHOT == 'reasonable_snapshot'
assert CALLBACK_RUN_ID > 0
assert CALLBACK_COUNT > 0
finally:
DataSaver.default_callback = None
if test_set is not None:
test_set.conn.close()
@pytest.mark.usefixtures("experiment")
@pytest.mark.parametrize("bg_writing", [True, False])
def test_numpy_types(bg_writing):
"""
Test that we can save numpy types in the data set
"""
p = ParamSpecBase(name="p", paramtype="numeric")
test_set = qc.new_data_set("test-dataset")
test_set.set_interdependencies(InterDependencies_(standalones=(p,)))
test_set.mark_started(start_bg_writer=bg_writing)
idps = InterDependencies_(standalones=(p,))
data_saver = DataSaver(
dataset=test_set, write_period=0, interdeps=idps)
dtypes = [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32,
np.float64]
for dtype in dtypes:
data_saver.add_result(("p", dtype(2)))
data_saver.flush_data_to_database()
test_set.mark_completed()
data = test_set.get_parameter_data("p")["p"]["p"]
expected_data = np.ones(len(dtypes))
expected_data[:] = 2
np.testing.assert_array_equal(data, expected_data)
@pytest.mark.usefixtures("experiment")
@pytest.mark.parametrize('numeric_type',
[int, float, np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64])
@pytest.mark.parametrize("bg_writing", [True, False])
def test_saving_numeric_values_as_text(numeric_type, bg_writing):
"""
Test the saving numeric values into 'text' parameter raises an exception
"""
p = ParamSpecBase("p", "text")
test_set = qc.new_data_set("test-dataset")
test_set.set_interdependencies(InterDependencies_(standalones=(p,)))
test_set.mark_started(start_bg_writer=bg_writing)
idps = InterDependencies_(standalones=(p,))
data_saver = DataSaver(
dataset=test_set, write_period=0, interdeps=idps)
try:
value = numeric_type(2)
gottype = np.array(value).dtype
msg = re.escape(f'Parameter {p.name} is of type '
f'"{p.type}", but got a result of '
f'type {gottype} ({value}).')
with pytest.raises(ValueError, match=msg):
data_saver.add_result((p.name, value))
finally:
data_saver.dataset.mark_completed()
data_saver.dataset.conn.close()
@pytest.mark.usefixtures("experiment")
def test_duplicated_parameter_raises():
"""
Test that passing same parameter multiple times to ``add_result`` raises an exception
"""
p = ParamSpecBase("p", "text")
test_set = qc.new_data_set("test-dataset")
test_set.set_interdependencies(InterDependencies_(standalones=(p,)))
test_set.mark_started()
idps = InterDependencies_(standalones=(p,))
data_saver = DataSaver(dataset=test_set, write_period=0, interdeps=idps)
try:
msg = re.escape(
"Not all parameter names are unique. Got multiple values for ['p']"
)
with pytest.raises(ValueError, match=msg):
data_saver.add_result((p.name, 1), (p.name, 1))
finally:
data_saver.dataset.mark_completed()
data_saver.dataset.conn.close()
|
the-stack_106_19788
|
#!/usr/bin/env python
# Skeleton for python-based regression tests using
# JSON-RPC
# Add python-qbitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-qbitcoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from qbitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def run_test(nodes):
# Replace this as appropriate
for node in nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave qbitcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing qbitcoind/qbitcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(2, options.tmpdir)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
run_test(nodes)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
stack = traceback.extract_tb(sys.exc_info()[2])
print(stack[-1])
if not options.nocleanup:
print("Cleaning up")
stop_nodes()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
|
the-stack_106_19790
|
"""
Author: Francis Chan
Data Transformation for building classification models
"""
from sklearn.base import TransformerMixin, BaseEstimator
import random
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing import label_binarize
from sklearn.impute import SimpleImputer
from datetime import datetime
import explore as ex
import utility as ut
# ----------------------------------------------------------------------------
# Imputer
# ----------------------------------------------------------------------------
class DataFrameImputer(TransformerMixin):
"""
Impute missing values
categorical: fill with most frequent
numerical: fill with mean value
"""
def __init__(self, cateogrical_strategy = 'most_frequent', numerical_strategy= 'mean',
numeric_const = 0, categorical_const = 'Unknown'):
self.c_strategy = cateogrical_strategy
self.n_strategy = numerical_strategy
self.categorical_const = categorical_const
self.numerical_const = numeric_const
def fit(self, X, y=None):
"""Preparation steps for transformation
Columns of categorical object are imputed with the most frequent value
in column.
Columns of numerical data are imputed with mean of column.
Parameters
----------
X : pandas data frame
"""
# all of these return a single value for all missing
def fill_most_frequent(x):
"return the most freq value in this column"
try:
value = (x.value_counts().index[0])
except:
value = self.categorical_const
return value
def fill_mean(x):
"return the mean value in this column"
return(x.mean())
def fill_median(x):
"return the median value in this column"
return(x.median())
def fill_categorical_const(x):
"return the constant categorical value"
return self.categorical_const
def fill_numerical_const(x):
"return the constant numerical value"
return self.numerical_const
if self.c_strategy == 'constant':
self.c_func = fill_categorical_const
else:
self.c_func = fill_most_frequent
if self.n_strategy == 'most_frequent':
self.n_func = fill_most_frequent
elif self.n_strategy == 'median':
self.n_func = fill_median
elif self.n_strategy == 'constant':
self.n_func = fill_numerical_const
else:
self.n_func = fill_mean
# X is data frame
categorical_columns = ex.get_categorical_column(X)
non_categorical_columns = ex.get_non_categorical_column(X)
# find the values to impute for each column
self.fill = pd.Series([self.c_func(X[c])
if c in categorical_columns
else self.n_func(X[c]) for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
# class Imputer(TransformerMixin):
# def fit(self, X, y=None):
# return self
# def transform(self, X):
# self.categoricalCols = [col for col in X.dtypes.index if X.dtypes.get(col) == 'object' ]
# do_transform = lambda x: x.fillna('NA') \
# if x.name in self.categoricalCols else x.fillna(0)
# result = X.copy()
# result = result.apply(do_transform)
# return result
class ConstantImputer(TransformerMixin):
"""
Imputer Data Frame with contant values
Parameters
----------
numerical_columns: name cols to be transformed
categorical_columns: name cols to be transformed
Returns
-------
numerical columns are filled with 0
categorical columns are filled with 'missing_value"
"""
def __init__(self, numerical_columns=None, categorical_columns=None):
self.num_cols = numerical_columns
self.cat_cols = categorical_columns
def fit(self, X, y=None):
"X is a data frame"
# self.type_dict = ex.get_type_dict(X)
self.column_types = ex.get_column_type(X)
if self.cat_cols is None:
self.cat_cols = self.column_types['categorical'] + self.column_types['binary']
if self.num_cols is None:
self.num_cols = self.column_types['numerical']
return self
def transform(self, X,):
num_imputer = SimpleImputer(strategy="constant", fill_value=0)
cat_imputer = SimpleImputer(strategy="constant")
df = X.copy()
df[self.num_cols] = num_imputer.fit_transform(df[self.num_cols])
df[self.cat_cols] = cat_imputer.fit_transform(df[self.cat_cols])
return df
# ----------------------------------------------------------------------------
# Extractor
# ----------------------------------------------------------------------------
class DateExtractor(TransformerMixin):
"""
Extract Day, Month and Year of the given date columns
Parameters
----------
datacolumns: list of date columns
fmt : strptime date format, e.g. "%d-%m-%Y"
Returns
-------
_Date: day of month
_Month: month
_Year: year
_Weekday: Monday is 1
"""
def __init__(self, datecolumns, fmt):
self.cols = datecolumns
self.fmt = fmt
def fit(self, X, y=None):
return self
def transform(self, X):
"expand date columns into weekday, day, month, year"
df = X.copy()
to_day = lambda x: datetime.strptime(x, self.fmt).day
to_month = lambda x: datetime.strptime(x, self.fmt).month
to_year = lambda x: datetime.strptime(x, self.fmt).year
to_weekday = lambda x: datetime.strptime(x, self.fmt).isoweekday()
funcs = [to_day, to_month, to_year, to_weekday]
date_val = ['_Day', '_Month', '_Year', "_Weekday"]
for col in self.cols:
names = [col + d for d in date_val]
combo = zip(names, funcs)
for (field, f) in combo:
df[field] = df[col].apply(f)
return df
class ValueRemover(TransformerMixin):
"""
Remove rows for a given list of values
Parameters
----------
cols: list of columns
values: list of values
Returns
-------
dataframe filter by the values
"""
def __init__(self, cols, values):
# both cols and values are lists
self.cols = cols
self.values = values
def fit(self, X, y=None):
self.X = X
return self
def transform(self, X):
rmidx = self.X[self.cols].isin(self.values)
return self.X[~rmidx.values]
class ColumnRemover(TransformerMixin):
"""Remove the given list of columns"""
def __init__(self, cols):
# cols to drop
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
return X.drop(self.cols, axis=1)
class CreateDataFrame(TransformerMixin):
"""
Create Data Frame from ndarray or list
Parameters
----------
col : list of column names
X : ndarry or list
"""
def __init__(self, col):
self.col = col
def fit(self, X, y=None):
return self
def transform(self, X):
df = pd.DataFrame(X)
df.columns = self.col
return df
# ----------------------------------------------------------------------------
# Encoder
# ----------------------------------------------------------------------------
# Encode all categorical columns of data frame
# class DataFrameLabelEncoder(TransformerMixin):
# class DummyEncoder(BaseEstimator, TransformerMixin):
# "One Hot Encoder"
# def __init__(self):
# self.encoder = OneHotEncoder(handle_unknown=='ignore')
# def fit(self, X, y=None):
# self.encoder.fit(X.ravel())
# return self
# def transform(self, X):
# shape = X.shape
# result = self.encoder.transform(X.ravel())
# return result.reshape(shape)
class DataFrameCategoricalEncoder(TransformerMixin):
def __init__(self, cols=None, onehot=True, exclusion=[]):
# cols: categorical column (if None, it is determined automatically)
self.reset(cols)
self.exclusion = exclusion
def get_transformed_index(self, col, x):
"Return the transformed index of the give value in the column"
d = self.get_transform_map(col)
return d[x]
def single_transform(self, col, x):
"""
transform the given column
col : column name in string
x : pdf series or list to be transformed
"""
if col in self.fit_dict.keys():
rule = self.fit_dict[col]
return (rule.fit_transform(x))
else:
return None
def get_transform_map(self, col):
"""Return the transformed dictionary of the given column"""
if col in self.fit_dict.keys():
rule = self.fit_dict[col]
return (dict(zip(rule.classes_, range(len(rule.classes_)))))
else:
return None
def single_inverse_transform(self, col, x):
"""Inverse transformed column to original"""
if col in self.fit_dict.keys():
rule = self.fit_dict[col]
return (rule.inverse_transform(x))
else:
return None
def get_all_transform_map(self):
"""Return the transform map of all columns"""
result = defaultdict(np.array)
for col in self.fit_dict.keys():
rule = self.fit_dict[col]
result[col] = dict(zip(rule.classes_, range(len(rule.classes_))))
return (dict(result))
def reset(self, cols=None):
"reset the state"
# label encoder (a default dict is used to assoicate an encoder for that column)
# TODO: stacked encoder with oneHot
# TODO: one hot encoding
self.fit_dict = defaultdict(LabelEncoder)
self.categorical_cols = cols
def fit(self, X, y=None):
"dummy fit"
return self
# TODO: inverse_transform
def transform(self, X, y=None):
'''
Call LabelEncoder() for each column in data frame X
Parameters
----------
X : pandas data frame
'''
# lambda function either transform or a columns or return the same column
do_transform = lambda x: self.fit_dict[x.name].fit_transform(x) \
if x.name in self.categorical_cols else x
result = X.copy()
# get categorical variables
if self.categorical_cols is None:
self.categorical_cols = ex.get_categorical_column(X)
# apply exclusions
self.categorical_cols = list(set(self.categorical_cols) - set(self.exclusion))
# Encoding and return results
result = result.apply(do_transform)
return result
# def fit_transform(self, X):
# "fit and transformataion"
# return (self.transform(X))
class Encoder(TransformerMixin):
"""
Apply Label Encoder to categorical columns
Note: mixed type returns errors
Parameters
----------
df: data frame
X: list of column names to be encoded
Returns
-------
"""
def __init__(self, todf=True):
self.todf = todf
def fit(self, X, y=None):
return self
def transform(self, X):
self.categoricalCols = [col for col in X.dtypes.index if X.dtypes.get(col) == 'object' ]
self.fit_dict = defaultdict(LabelEncoder)
# lambda function either transform columns or return the same column
do_transform = lambda x: self.fit_dict[x.name].fit_transform(x) \
if x.name in self.categoricalCols else x
result = X.copy()
# Encoding and return results
result = result.apply(do_transform)
if self.todf:
return result
else:
return result.values
# Deprecated: use label encoder
# class CategoricalEncoder(BaseEstimator, TransformerMixin):
# "Label encoder"
# def __init__(self):
# self.encoder = LabelEncoder()
# def fit(self, X, y=None):
# self.encoder.fit(X.ravel())
# return self
# def transform(self, X):
# shape = X.shape
# result = self.encoder.transform(X.ravel())
# return result.reshape(shape)
# class Factorizer(BaseEstimator, TransformerMixin):
# "Label encoder"
# def __init__(self):
# pass
# def fit(self, X, y=None):
# return self
# def transform(self, X):
# codedX, categories = X.factorize()
# return codedX
# class DataFrameSelector(BaseEstimator, TransformerMixin):
class TypeSelector(BaseEstimator, TransformerMixin):
"""
Select columns with specific types:
Parameters
----------
X : data frame
col_type: binary, categorical or numerical
"""
def __init__(self, column_type, todf=True):
self.column_type = column_type
self.todf = todf
def fit(self, X, y=None):
return self
def transform(self, X):
dftype = ex.get_column_type(X)
attribute_names = dftype[self.column_type]
# if self.column_type == "numerical":
# self.attribute_names =
# elif self.column_type == "categorical":
# self.attribute_names = ex.get_numerical_column(X)
# else:
# self.attribute_names = ex.get_categorical_column(X)
if self.todf:
return X[attribute_names]
else:
return X[attribute_names].values
class FunctionTransformer(TransformerMixin):
"""
Apply a function to the columns of df
e.g.
t = tr.FunctionTransformer(ex.get_distinct_value, 'Cl_Toilets', 'Cl_Scenic_View')
Parameters
----------
X : dataframe
fun: function object
args: argument to the function
"""
def __init__(self, fun, *args):
self.transformer = fun
self.args = args
def fit(self, X, y=None):
return self
def transform(self, X):
result = self.transformer(X, self.args)
return result
class TransformDebugger(TransformerMixin):
"""
Transformer Wrapper
Embed a transformer and print debug information
Parameters
----------
transformer: any transformer
debug_func : print function for debug (see utility lib)
cols : columns to show when debug
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
def __init__(self, transformer, debug_func, cols=None):
self.transformer = transformer
self.debug_func = debug_func
self.cols = cols
def set_col(self, cols):
self.cols = cols
def fit(self, X, y=None):
return self
def _debug_print(self, X):
"print debug information"
print(self.transformer.__class__.__name__)
# select a random row of data
idx = random.randrange(0, len(X))
# show only specific columns
# fill in all columns if not specified
if isinstance(X, pd.core.frame.DataFrame):
if self.cols is None:
cols = X.columns
else:
cols = self.cols
XX = X[cols]
elif isinstance(X, np.ndarray):
if self.cols is None:
self.cols = range(X.shape[1])
else:
cols = self.cols
XX = X[:, cols]
else:
raise ValueError("input is neither a data frame nor numpy array")
# call the debug function
self.debug_func(XX, idx, "Before")
XX = self.transformer.transform(XX)
self.debug_func(XX, idx, "After")
def transform(self, X):
"transform routine for pipeline"
self._debug_print(X)
return X
# ----------------------------------------------------------------------------
# DEBUG
# ----------------------------------------------------------------------------
# class DataFrameOneHotEncoder(TransformerMixin):
# def __init__(self, cols=None, exclusion=[]):
# self.reset(cols)
# self.exclusion = exclusion
# def reset(self, cols=None):
# self.fit_dict = defaultdict(OneHotEncoder)
# # self.encoder = defaultdict(OneHotEncoder)
# self.categorical_cols = cols
# self.shape_dict = None
# self.index = None
# self.column_name = None
# def record_metadata(self, X):
# self.shape_dict = X.shape
# self.index = X.index
# self.column_name = X.columns
# def fit(self, X, y=None):
# return self
# def inverse_transform(self, X):
# pass
# def transform(self, X):
# """
# X : a data frame
# """
# # x is a series
# do_transform = lambda x: self.fit_dict[x.name].fit_transform(x.values.reshape(-1, 1)) \
# if x.name in self.categorical_cols else x
# self.record_metadata(X)
# result = X.copy()
# # get categorical variables
# if self.categorical_cols is None:
# self.categorical_cols = ex.get_categorical_column(X)
# # apply exclusions
# self.categorical_cols = list(set(self.categorical_cols) - set(self.exclusion))
# # Encoding and return results
# result = result.apply(do_transform)
# return result
# class DummyEncoder(TransformerMixin):
# '''
# Convert the columns to dummy variables
# '''
# def __init__(self, excluded_cols=None, cols=None):
# '''
# excluded_cols : columns to be excluded (higher priority)
# cols : columns to transform
# '''
# self.reset(excluded_cols, cols)
# def fit(self, X, y=None):
# """needed for pipeline"""
# # must return self
# return self
# def set_col(self, cols):
# self.cols = cols
# def reset(self, excluded_cols, cols=None):
# "reset the state"
# # dictionary of label encoder
# self.categorical_cols = cols
# if isinstance(excluded_cols, list):
# self.excluded_cols = excluded_cols
# else:
# self.excluded_cols = [excluded_cols]
# def transform(self, X):
# """
# X : pd data frame
# """
# # get cols for transformation
# if self.categorical_cols is None:
# if self.excluded_cols is not None:
# cols = list(set(X.columns) - set(self.excluded_cols))
# else:
# cols = X.columns
# else:
# if self.excluded_cols is not None:
# cols = list(set(self.categorical_cols) - set(self.excluded_cols))
# else:
# cols = self.categorical_cols
# dummy_df = pd.get_dummies(X[cols])
# # columns not for transformation
# other_cols = list(set(X.columns) - set(cols))
# if other_cols is not None:
# df = pd.concat([dummy_df, X[other_cols]], axis=1)
# else:
# df = dummy_df
# return df
|
the-stack_106_19791
|
import numpy as np
def _ensure_matrix(x):
"""
Ensures the vector/matrix `x` is in matrix format.
Parameters
----------
x : array_like, shape (n,)
Vector or matrix.
Returns
-------
x : array_like, shape (m, p)
Matrix.
"""
x = np.array(x)
if x.ndim == 1:
x = np.reshape(x, (x.size, 1))
elif x.ndim > 2:
raise ValueError('`x` must be of dimension 1 or 2.')
return x
|
the-stack_106_19792
|
# -*- coding: utf-8 -*-
#
# geometric-smote documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 18 14:44:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'numpydoc',
'sphinx_gallery.gen_gallery',
]
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_show_class_members = False
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sports-betting'
copyright = u'2019, Georgios Douzas'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from sportsbet import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Custom style
html_style = 'css/sports-betting.css'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sports-bettingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'sports-betting.tex', u'sports-betting Documentation',
u'Georgios Douzas', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sports-betting.tex', u'sports-betting Documentation',
[u'Georgios Douzas'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sports-betting', u'sports-betting Documentation',
u'Georgios Douzas', 'sports-betting', 'Sports betting toolbox.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'sklearn': ('http://scikit-learn.org/stable', None)
}
# sphinx-gallery configuration
sphinx_gallery_conf = {
'doc_module': 'sportsbet',
'backreferences_dir': os.path.join('generated'),
'reference_url': {
'sportsbet': None},
'filename_pattern': '/*'
}
def setup(app):
# a copy button to copy snippet of code from the documentation
app.add_javascript('js/copybutton.js')
|
the-stack_106_19793
|
from django.test import TestCase
from django.contrib.auth.models import User
from dwitter.models import Dweet
from dwitter.models import Comment
from django.utils import timezone
from datetime import timedelta
class DweetTestCase(TestCase):
def setUp(self):
user1 = User.objects.create(id=1, username="user1", password="")
user2 = User.objects.create(id=2, username="user2", password="")
now = timezone.now()
dweet1 = Dweet.objects.create(id=1,
code="dweet1 code",
posted=now - timedelta(minutes=1),
author=user1)
dweet2 = Dweet.objects.create(id=2,
code="dweet2 code",
posted=now,
reply_to=dweet1,
author=user2)
Comment.objects.create(id=1,
text="comment1 text",
posted=now - timedelta(minutes=1),
reply_to=dweet2,
author=user1)
Comment.objects.create(id=2,
text="comment2 text",
posted=now,
reply_to=dweet1,
author=user2)
def test_comment_renders_to_string_correctly(self):
self.assertEqual(Comment.objects.get(id=1).__str__(),
"c/1 (user1) to d/2 (user2)")
self.assertEqual(Comment.objects.get(id=2).__str__(),
"c/2 (user2) to d/1 (user1)")
def test_comment_reply_to_do_nothing_on_soft_delete(self):
Dweet.objects.get(id=2).delete()
self.assertTrue(Comment.objects.get(id=1).reply_to.deleted)
self.assertEqual(Comment.objects.get(id=2).reply_to,
Dweet.objects.get(id=1))
def test_comment_author_cascade_on_delete(self):
User.objects.get(username="user1").delete()
with self.assertRaises(Comment.DoesNotExist):
Comment.objects.get(id=1)
self.assertEqual(Comment.objects.get(id=2).author,
User.objects.get(id=2))
|
the-stack_106_19794
|
from torch import nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from core import resnet, densenet, resnext, mobilenet
import numpy as np
from core.anchors import generate_default_anchor_maps, hard_nms
from config import CAT_NUM, PROPOSAL_NUM
class ProposalNet(nn.Module):
def __init__(self, in_features=1280):
super(ProposalNet, self).__init__()
# self.down1 = nn.Conv2d(2048, 128, 3, 1, 1)
self.down1 = nn.Conv2d(in_features, 128, 3, 1, 1)
self.down2 = nn.Conv2d(128, 128, 3, 2, 1)
self.down3 = nn.Conv2d(128, 128, 3, 2, 1)
self.ReLU = nn.ReLU()
self.tidy1 = nn.Conv2d(128, 6, 1, 1, 0)
self.tidy2 = nn.Conv2d(128, 6, 1, 1, 0)
self.tidy3 = nn.Conv2d(128, 9, 1, 1, 0)
def forward(self, x):
batch_size = x.size(0)
d1 = self.ReLU(self.down1(x))
d2 = self.ReLU(self.down2(d1))
d3 = self.ReLU(self.down3(d2))
t1 = self.tidy1(d1).view(batch_size, -1)
t2 = self.tidy2(d2).view(batch_size, -1)
t3 = self.tidy3(d3).view(batch_size, -1)
return torch.cat((t1, t2, t3), dim=1)
class attention_net(nn.Module):
def __init__(self, topN=4, classNum=200):
super(attention_net, self).__init__()
# self.pretrained_model = resnet.resnet50(pretrained=True)
# self.pretrained_model = resnet.resnet152(pretrained=True)
# self.pretrained_model = resnet.resnet101(pretrained=True)
# self.pretrained_model = densenet.densenet121(pretrained=False,num_classes=classNum)
# self.pretrained_model = densenet.densenet169(pretrained=True,num_classes=classNum)
# self.pretrained_model = densenet.densenet161(pretrained=True,num_classes=classNum)
# self.pretrained_model = resnext.resnext101_32x8d(pretrained=True,num_classes=classNum)
self.pretrained_model = mobilenet.mobilenet_v2(pretrained=True,num_classes=classNum)
# self.pretrained_model.avgpool = nn.AdaptiveAvgPool2d(1)
# print("classNum: ",classNum) # classNum: 20 Fish
# num_ftrs = self.pretrained_model.fc.in_features
# self.pretrained_model.fc = nn.Linear(num_ftrs, classNum)
# self.pretrained_model.fc = nn.Linear(512 * 4, classNum)
self.proposal_net = ProposalNet()
self.topN = topN
self.concat_net = nn.Linear(6400, classNum)
self.partcls_net = nn.Linear(1280, classNum)
_, edge_anchors, _ = generate_default_anchor_maps()
self.pad_side = 224
self.edge_anchors = (edge_anchors + 224).astype(np.int)
def forward(self, x):
resnet_out, rpn_feature, feature = self.pretrained_model(x)
# print("resnet_out {} rpn_feature {} feature {}".format(resnet_out.shape, rpn_feature.shape, feature.shape))
# resnet_out torch.Size([3, 1000])
# rpn_feature torch.Size([3, 1024, 14, 14]) feature torch.Size([3, 1024])
x_pad = F.pad(x, (self.pad_side, self.pad_side, self.pad_side, self.pad_side), mode='constant', value=0)
batch = x.size(0)
# we will reshape rpn to shape: batch * nb_anchor
rpn_score = self.proposal_net(rpn_feature.detach())
all_cdds = [
np.concatenate((x.reshape(-1, 1), self.edge_anchors.copy(), np.arange(0, len(x)).reshape(-1, 1)), axis=1)
for x in rpn_score.data.cpu().numpy()]
top_n_cdds = [hard_nms(x, topn=self.topN, iou_thresh=0.25) for x in all_cdds]
top_n_cdds = np.array(top_n_cdds)
top_n_index = top_n_cdds[:, :, -1].astype(np.int)
top_n_index = torch.from_numpy(top_n_index).cuda()
top_n_prob = torch.gather(rpn_score, dim=1, index=top_n_index)
part_imgs = torch.zeros([batch, self.topN, 3, 224, 224]).cuda()
for i in range(batch):
for j in range(self.topN):
[y0, x0, y1, x1] = top_n_cdds[i][j, 1:5].astype(np.int)
part_imgs[i:i + 1, j] = F.interpolate(x_pad[i:i + 1, :, y0:y1, x0:x1], size=(224, 224), mode='bilinear',
align_corners=True)
part_imgs = part_imgs.view(batch * self.topN, 3, 224, 224)
_, _, part_features = self.pretrained_model(part_imgs.detach())
part_feature = part_features.view(batch, self.topN, -1)
part_feature = part_feature[:, :CAT_NUM, ...].contiguous()
part_feature = part_feature.view(batch, -1)
# concat_logits have the shape: B*200
# print("part_feature {}".format(part_feature.shape))
concat_out = torch.cat([part_feature, feature], dim=1)
concat_logits = self.concat_net(concat_out)
raw_logits = resnet_out
# part_logits have the shape: B*N*200
part_logits = self.partcls_net(part_features).view(batch, self.topN, -1)
return [raw_logits, concat_logits, part_logits, top_n_index, top_n_prob]
def list_loss(logits, targets):
temp = F.log_softmax(logits, -1)
loss = [-temp[i][targets[i].item()] for i in range(logits.size(0))]
return torch.stack(loss)
def ranking_loss(score, targets, proposal_num=PROPOSAL_NUM):
loss = Variable(torch.zeros(1).cuda())
batch_size = score.size(0)
for i in range(proposal_num):
targets_p = (targets > targets[:, i].unsqueeze(1)).type(torch.cuda.FloatTensor)
pivot = score[:, i].unsqueeze(1)
loss_p = (1 - pivot + score) * targets_p
loss_p = torch.sum(F.relu(loss_p))
loss += loss_p
return loss / batch_size
|
the-stack_106_19795
|
"""
Integration test for the Cornflow client
Base, admin and service user get tested
Integration between Airflow and cornflow through airflow client and cornflow client tested as well
"""
# Full imports
import json
import os
import pulp as pl
import time
# Partial imports
from unittest import TestCase
# Internal imports
from cornflow_client import CornFlow
from cornflow_client.constants import STATUS_OPTIMAL, STATUS_NOT_SOLVED
from cornflow_client.schema.tools import get_pulp_jsonschema
from cornflow_client.tests.const import PUBLIC_DAGS, PULP_EXAMPLE
# Constants
path_to_tests_dir = os.path.dirname(os.path.abspath(__file__))
# Helper functions
def _load_file(_file):
with open(_file) as f:
temp = json.load(f)
return temp
def _get_file(relative_path):
return os.path.join(path_to_tests_dir, relative_path)
# Testing suit classes
class TestCornflowClientUser(TestCase):
def setUp(self):
self.client = CornFlow(url="http://127.0.0.1:5050/")
login_result = self.client.login("user", "UserPassword1!")
self.assertIn("id", login_result.keys())
self.assertIn("token", login_result.keys())
self.user_id = login_result["id"]
def tearDown(self):
pass
def test_health_endpoint(self):
response = self.client.is_alive()
self.assertEqual(response["cornflow_status"], "healthy")
self.assertEqual(response["airflow_status"], "healthy")
def test_sign_up(self):
response = self.client.sign_up(
"test_username", "[email protected]", "TestPassword2!"
)
self.assertIn("id", response.json().keys())
self.assertIn("token", response.json().keys())
self.assertEqual(201, response.status_code)
def test_create_instance(self):
data = _load_file(PULP_EXAMPLE)
response = self.client.create_instance(data, "test_example", "test_description")
items = [
"id",
"name",
"description",
"created_at",
"user_id",
"data_hash",
"schema",
"executions",
]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual("test_example", response["name"])
self.assertEqual("solve_model_dag", response["schema"])
self.assertEqual("test_description", response["description"])
return response
def test_create_case(self):
data = _load_file(PULP_EXAMPLE)
response = self.client.create_case(
name="test_case",
schema="solve_model_dag",
data=data,
description="test_description",
)
items = [
"id",
"name",
"description",
"created_at",
"user_id",
"data_hash",
"schema",
"solution_hash",
"path",
"updated_at",
"is_dir",
]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual("test_case", response["name"])
self.assertEqual("solve_model_dag", response["schema"])
self.assertEqual("test_description", response["description"])
return response
def test_create_instance_file(self):
response = self.client.create_instance_file(
_get_file("../data/test_mps.mps"),
name="test_filename",
description="filename_description",
)
items = [
"id",
"name",
"description",
"created_at",
"user_id",
"data_hash",
"schema",
"executions",
]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual("test_filename", response["name"])
self.assertEqual("solve_model_dag", response["schema"])
self.assertEqual("filename_description", response["description"])
def test_create_execution(self):
instance = self.test_create_instance()
response = self.client.create_execution(
instance_id=instance["id"],
config={"solver": "PULP_CBC_CMD", "timeLimit": 60},
name="test_execution",
description="execution_description",
schema="solve_model_dag",
)
items = [
"id",
"name",
"description",
"created_at",
"user_id",
"data_hash",
"schema",
"config",
"instance_id",
"state",
"message",
]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual(instance["id"], response["instance_id"])
self.assertEqual("test_execution", response["name"])
self.assertEqual("execution_description", response["description"])
self.assertEqual(
{"solver": "PULP_CBC_CMD", "timeLimit": 60}, response["config"]
)
self.assertEqual(STATUS_NOT_SOLVED, response["state"])
return response
def test_execution_results(self):
execution = self.test_create_execution()
time.sleep(10)
response = self.client.get_results(execution["id"])
items = [
"id",
"name",
"description",
"created_at",
"user_id",
"data_hash",
"schema",
"config",
"instance_id",
"state",
"message",
]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual(execution["id"], response["id"])
self.assertEqual(STATUS_OPTIMAL, response["state"])
def test_execution_status(self):
execution = self.test_create_execution()
response = self.client.get_status(execution["id"])
items = ["id", "state", "message", "data_hash"]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual(STATUS_NOT_SOLVED, response["state"])
time.sleep(10)
response = self.client.get_status(execution["id"])
for item in items:
self.assertIn(item, response.keys())
self.assertEqual(STATUS_OPTIMAL, response["state"])
def test_stop_execution(self):
execution = self.test_create_execution()
response = self.client.stop_execution(execution["id"])
self.assertEqual(response["message"], "The execution has been stopped")
def test_get_execution_log(self):
execution = self.test_create_execution()
response = self.client.get_log(execution["id"])
items = [
"id",
"name",
"description",
"created_at",
"user_id",
"data_hash",
"schema",
"config",
"instance_id",
"state",
"message",
"log",
]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual(execution["id"], response["id"])
def test_get_execution_solution(self):
execution = self.test_create_execution()
time.sleep(10)
response = self.client.get_solution(execution["id"])
items = [
"id",
"name",
"description",
"created_at",
"user_id",
"data_hash",
"schema",
"config",
"instance_id",
"state",
"message",
"data",
"checks",
]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual(execution["id"], response["id"])
self.assertEqual(STATUS_OPTIMAL, response["state"])
return response
def test_create_case_execution(self):
execution = self.test_get_execution_solution()
response = self.client.create_case(
name="case_from_solution",
schema="solve_model_dag",
description="case_from_solution_description",
solution=execution["data"],
)
items = [
"id",
"name",
"description",
"created_at",
"user_id",
"data_hash",
"schema",
"solution_hash",
"path",
"updated_at",
"is_dir",
]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual("case_from_solution", response["name"])
self.assertEqual("solve_model_dag", response["schema"])
self.assertEqual("case_from_solution_description", response["description"])
return response
def test_get_all_instances(self):
self.test_create_instance()
self.test_create_instance()
instances = self.client.get_all_instances()
self.assertGreaterEqual(len(instances), 2)
def test_get_all_executions(self):
self.test_stop_execution()
self.test_stop_execution()
executions = self.client.get_all_executions()
self.assertGreaterEqual(len(executions), 2)
def test_get_all_cases(self):
self.test_create_case()
self.test_create_case()
cases = self.client.get_all_cases()
self.assertGreaterEqual(len(cases), 2)
def test_get_one_user(self):
response = self.client.get_one_user(self.user_id)
self.assertEqual(response.status_code, 200)
items = ["id", "first_name", "last_name", "username", "email"]
for item in items:
self.assertIn(item, response.json().keys())
self.assertEqual(self.user_id, response.json()["id"])
self.assertEqual("user", response.json()["username"])
self.assertEqual("[email protected]", response.json()["email"])
def test_get_one_instance(self):
instance = self.test_create_instance()
response = self.client.get_one_instance(instance["id"])
items = [
"id",
"name",
"description",
"created_at",
"user_id",
"data_hash",
"schema",
"executions",
]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual(instance[item], response[item])
def test_get_one_case(self):
case = self.test_create_case()
response = self.client.get_one_case(case["id"])
items = [
"id",
"name",
"description",
"created_at",
"user_id",
"data_hash",
"schema",
"solution_hash",
"path",
"updated_at",
"is_dir",
]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual(case[item], response[item])
def test_get_one_case_execution(self):
case = self.test_create_case_execution()
response = self.client.get_one_case(case["id"])
items = [
"id",
"name",
"description",
"created_at",
"user_id",
"data_hash",
"schema",
"solution_hash",
"path",
"updated_at",
"is_dir",
]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual(case[item], response[item])
def test_delete_one_case(self):
case = self.test_create_case()
response = self.client.delete_one_case(case["id"])
self.assertEqual("The object has been deleted", response["message"])
def test_put_one_case(self):
case = self.test_create_case()
response = self.client.put_one_case(case["id"], {"name": "new_case_name"})
self.assertEqual("Updated correctly", response["message"])
def test_patch_one_case(self):
# TODO: Get example of data patch for the tests
pass
def test_delete_one_instance(self):
instance = self.test_create_instance()
response = self.client.delete_one_instance(instance["id"])
self.assertEqual(200, response.status_code)
self.assertEqual("The object has been deleted", response.json()["message"])
def test_get_schema(self):
response = self.client.get_schema("solve_model_dag")
schema = {
"config": get_pulp_jsonschema("solver_config.json"),
"instance": get_pulp_jsonschema(),
"solution": get_pulp_jsonschema(),
}
schema["config"]["properties"]["solver"]["enum"] = pl.listSolvers()
schema["config"]["properties"]["solver"]["default"] = "PULP_CBC_CMD"
for (key, value) in schema.items():
self.assertDictEqual(value, response[key])
def test_get_all_schemas(self):
response = self.client.get_all_schemas()
read_schemas = [dag for v in response for (_, dag) in v.items()]
for schema in PUBLIC_DAGS:
self.assertIn(schema, read_schemas)
class TestCornflowClientAdmin(TestCase):
def setUp(self):
self.client = CornFlow(url="http://127.0.0.1:5050/")
login_result = self.client.login("admin", "Adminpassword1!")
self.assertIn("id", login_result.keys())
self.assertIn("token", login_result.keys())
self.base_user_id = CornFlow(url="http://127.0.0.1:5050/").login(
"user", "UserPassword1!"
)["id"]
def tearDown(self):
pass
def test_get_all_users(self):
response = self.client.get_all_users()
self.assertGreaterEqual(len(response), 3)
def test_get_one_user(self):
response = self.client.get_one_user(self.base_user_id)
items = ["id", "first_name", "last_name", "username", "email"]
for item in items:
self.assertIn(item, response.json().keys())
self.assertEqual(self.base_user_id, response.json()["id"])
self.assertEqual("user", response.json()["username"])
self.assertEqual("[email protected]", response.json()["email"])
class TestCornflowClientService(TestCase):
def setUp(self):
self.client = CornFlow(url="http://127.0.0.1:5050/")
login_result = self.client.login("airflow", "Airflow_test_password1")
self.assertIn("id", login_result.keys())
self.assertIn("token", login_result.keys())
def tearDown(self):
pass
def test_get_execution_data(self):
client = CornFlow(url="http://127.0.0.1:5050/")
_ = client.login("user", "UserPassword1!")
data = _load_file(PULP_EXAMPLE)
instance = client.create_instance(data, "test_example", "test_description")
execution = client.create_execution(
instance_id=instance["id"],
config={"solver": "PULP_CBC_CMD", "timeLimit": 60},
name="test_execution",
description="execution_description",
schema="solve_model_dag",
)
response = self.client.get_data(execution["id"])
items = ["id", "data", "config"]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual(instance["id"], response["id"])
self.assertEqual(data, response["data"])
self.assertEqual(execution["config"], response["config"])
def test_write_execution_solution(self):
client = CornFlow(url="http://127.0.0.1:5050/")
_ = client.login("user", "UserPassword1!")
data = _load_file(PULP_EXAMPLE)
instance = client.create_instance(data, "test_example", "test_description")
execution = client.create_execution(
instance_id=instance["id"],
config={"solver": "PULP_CBC_CMD", "timeLimit": 60},
name="test_execution",
description="execution_description",
schema="solve_model_dag",
)
time.sleep(15)
solution = client.get_solution(execution["id"])
payload = dict(
state=1, log_json={}, log_text="", solution_schema="solve_model_dag"
)
payload["data"] = solution["data"]
response = self.client.write_solution(execution["id"], **payload)
self.assertEqual("results successfully saved", response["message"])
def test_get_deployed_dags(self):
response = self.client.get_deployed_dags()
items = ["id", "description"]
for item in items:
self.assertIn(item, response[0].keys())
deployed_dags = [v["id"] for v in response]
for dag in PUBLIC_DAGS:
self.assertIn(dag, deployed_dags)
def test_post_deployed_dag(self):
response = self.client.create_deployed_dag(
name="test_dag", description="test_dag_description"
)
items = ["id", "description"]
for item in items:
self.assertIn(item, response.keys())
self.assertEqual("test_dag", response["id"])
self.assertEqual("test_dag_description", response["description"])
|
the-stack_106_19796
|
from __future__ import unicode_literals
from io import open
import os
import re
import tempfile
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag="test_jtag_arm")
def test_examples_sysview_tracing_heap_log(env, extra_data):
rel_project_path = os.path.join('examples', 'system', 'sysview_tracing_heap_log')
dut = env.get_dut('sysview_tracing_heap_log', rel_project_path)
idf_path = dut.app.get_sdk_path()
proj_path = os.path.join(idf_path, rel_project_path)
elf_path = os.path.join(dut.app.get_binary_path(rel_project_path), 'sysview_tracing_heap_log.elf')
def get_temp_file():
with tempfile.NamedTemporaryFile(delete=False) as f:
return f.name
try:
tempfiles = [get_temp_file(), get_temp_file()]
with open(os.path.join(proj_path, 'gdbinit')) as f_in, open(tempfiles[0], 'w') as f_out:
new_content = f_in.read()
# localhost connection issue occurs in docker unless:
new_content = new_content.replace(':3333', '127.0.0.1:3333', 1)
new_content = new_content.replace('file:///tmp/heap_log.svdat', 'file://{}'.format(tempfiles[1]), 1)
f_out.write(new_content)
with ttfw_idf.OCDProcess(os.path.join(proj_path, 'openocd.log')):
dut.start_app()
dut.expect('esp_apptrace: Initialized TRAX on CPU0')
gdb_args = '-x {} --directory={}'.format(tempfiles[0], os.path.join(proj_path, 'main'))
with ttfw_idf.GDBProcess(os.path.join(proj_path, 'gdb.log'), elf_path, dut.app.target, gdb_args) as gdb:
gdb.pexpect_proc.expect_exact('Thread 1 hit Temporary breakpoint 2, heap_trace_stop ()')
gdb.pexpect_proc.expect_exact('(gdb)')
# dut has been restarted by gdb since the last dut.expect()
dut.expect('esp_apptrace: Initialized TRAX on CPU0')
with ttfw_idf.CustomProcess(' '.join([os.path.join(idf_path, 'tools/esp_app_trace/sysviewtrace_proc.py'),
'-p',
'-b', elf_path,
tempfiles[1]]),
logfile='sysviewtrace_proc.log') as sysviewtrace:
sysviewtrace.pexpect_proc.expect(re.compile(r'Found \d+ leaked bytes in \d+ blocks.'), timeout=120)
finally:
for x in tempfiles:
try:
os.unlink(x)
except Exception:
pass
if __name__ == '__main__':
test_examples_sysview_tracing_heap_log()
|
the-stack_106_19797
|
# 2. Add Two Numbers [Medium]
# You are given two non-empty linked lists representing two non-negative
# integers. The digits are stored in reverse order and each of their nodes
# contain a single digit. Add the two numbers and return it as a linked list.
# You may assume the two numbers do not contain any leading zero, except the
# number 0 itself.
# Example:
# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
# Output: 7 -> 0 -> 8
# Explanation: 342 + 465 = 807.
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
prehead = node = ListNode(0)
carry = 0
while l1 or l2 or carry:
# a C/C++ programmer may tend to write:
# node = node.next = ListNode(carry)
# which is wrong, see discussion below
node.next = ListNode(carry)
node = node.next
if l1:
node.val += l1.val
l1 = l1.next
if l2:
node.val += l2.val
l2 = l2.next
if node.val >= 10:
node.val -= 10
carry = 1
else:
carry = 0
return prehead.next
"""
Note for C/C++ programmers
In Python,
x = y = foo()
is equivalent to
temp = foo()
x = temp
y = temp
Be careful of the order, which is opposite to that in C. Says the official doc:
... assigns the single resulting object to each of the target lists, from left
to right.
"""
|
the-stack_106_19799
|
# Imports
import pyttsx3
from time import time, sleep
import unicodedata
# Retrieves wpm speed
rate = int(input('WPM : '))
while not(0 < rate <= 50):
rate = int(input('Please enter a speed between 0 and 50 WPM : '))
# Reads the text of the user in the text.txt file
with open('text.txt', 'r') as f:
string = f.read()
def preprocess(string, punctuation):
"""Transform the raw txt into a list of word for TTS
Parameters
----------
string : str
Raw text file
punctuation : dict
Dictionnary with the punctation to pronounce
Returns
-------
list
List of preprocessed words
"""
string = string.replace('\n', ' ') # Delete all \n (skip line)
for key, value in punctuation.items():
string = string.replace(key, value) # Replace punctuation
string = unicodedata.normalize('NFD', string)\
.encode('ascii', 'ignore')\
.decode("utf-8") # Normalize special characters
string = string.split(' ') # Split the string into a list
txt = []
for word in string:
if word not in [' ', '']:
txt.append(word) # Avoid having spaces or blank
return txt
def need(word, rate, len_word=5):
"""Computes the time needed to write word
Parameters
----------
word : str
The word
rate : int
Word per minute
len_word : int, optional
Usually, a word is defined as 5 characters
Returns
-------
float
Time needed (s)
"""
pause = len(word)/len_word*60/rate
return pause
# French common punctuation
punctuation = {
',' : ' virgule ',
'?' : " point d'interrogation ",
'!' : " point d'exclamation ",
':' : ' deux points ',
'.' : ' point ',
'...' : ' trois petits points ',
'=' : ' egal '
}
# Reverse punctuation
reverse = {value[1:-1]: key for key, value in punctuation.items()}
# Preprocess the txt file and init TTS
txt = preprocess(string, punctuation)
engine = pyttsx3.init()
# Track wpm speed
length = 0
start = time()
sleep(need(txt[0], rate)) # Avoid division by 0 the first loop
# Loop through each word
for word in txt:
if sum([word == value[1:-1] for value in punctuation.values()]): # If word is a punctuation
length += 1
print(reverse[word])
# TTS
engine.say(word)
engine.runAndWait()
else: # If it is a word
length += len(word)
wpm = (length/5)/((time()-start)/60)
print(word)
# Compute the needed time and save when TTS started
pause = need(word, rate)
t = time()
# TTS
engine.say(word)
engine.runAndWait()
# Pause
sleep(max(0, t+pause-time()))
print(f'{(length/5)/((time()-start)/60)}wpm') # Print wpm speed
|
the-stack_106_19803
|
from data import data_info
import numpy as np
import pandas as pd
import sys
import matplotlib.pyplot as plt
import plotly.graph_objects as go
def csv_to_df(csv_file):
data = pd.read_csv(csv_file)
data.rename(columns={data.columns[0]: "Number", data.columns[1]: "x", data.columns[2]: "y"}, inplace=True)
data.set_index("Number", inplace=True)
data.index = data.rename(index=str).index
#print(data)
#print(data.index)
return data
data_path = data_info.dundee_data_path
experiments_dundee = [
['Ube2T_ref_final.csv', 'ube2t_em02_5mM_final.csv'],
['Ube2T_ref_final.csv', 'ube_2t_em11_3mM_final.csv'],
['Ube2T_ref_final.csv', 'ube2t_em04_3mM_final.csv'],
['Ube2T_ref_final.csv', 'ube2t_em09_3mM_final.csv'],
['Ube2T_ref_final.csv', 'ube2t_em17_3mM_final.csv'],
['Ube2T_ref_final.csv', 'ube2t_em29_3mM_final.csv'],
['baz2b_phd_ref_renumbered.csv', 'baz2b_vs_5-mer_20_1_renumbered.csv'],
['baz2a_phd_ref_renumbered.csv', 'baz2a_phd_vs_5mer_64_1_renumbered.csv'],
['baz2a_phd_ref_renumbered.csv', 'baz2a_phd_vs_10mer_8_1_renumbered.csv'],
]
for free_prot, prot_with_ligand in experiments_dundee:
peaks = csv_to_df(data_path.joinpath(free_prot))
new_peaks = csv_to_df(data_path.joinpath(prot_with_ligand))
print(peaks)
print(new_peaks)
peak_distances = pd.DataFrame(columns=['shift'])
for free_pk_idx in peaks.index:
if free_pk_idx in new_peaks.index:
free_xy = peaks.loc[free_pk_idx].to_numpy()
with_ligand_xy = new_peaks.loc[free_pk_idx].to_numpy()
free_prot_name = free_prot.split('.csv')[0]
with_ligand_name = prot_with_ligand.split('.csv')[0]
print(free_pk_idx, free_xy, with_ligand_xy,
free_xy - with_ligand_xy,
np.square(free_xy - with_ligand_xy),
np.square(free_xy - with_ligand_xy).sum(),
np.sqrt( np.square(free_xy - with_ligand_xy).sum()))
dist = np.sqrt( np.square(free_xy - with_ligand_xy).sum() )
peak_distances.loc[free_pk_idx, 'shift'] = dist
#print(dist)
print(np.linalg.norm(free_xy - with_ligand_xy))
fig2 = go.Figure(layout=go.Layout(
title= free_prot_name + " - " + with_ligand_name,
font=dict(size=10)))
fig2.add_trace(go.Bar(
# x=df1.index,
x=peak_distances.index.tolist(),
y=peak_distances['shift'].tolist(),
name='Real Shift Distance',
marker_color='lightgray'
))
fig2.show()
#peak_distances.plot.bar()
#plt.title(free_prot+" * "+prot_with_ligand)
#ax1 = plt.axes()
#x_axis = ax1.axes.get_xaxis()
#x_axis.set_visible(False)
#plt.show()
print(peak_distances)
peak_distances.to_csv(free_prot_name+"____"+with_ligand_name+".csv")
sys.exit()
|
the-stack_106_19804
|
from django.contrib.contenttypes.models import ContentType
from django.db.models import Count, Model
from modelcluster.fields import ParentalKey
from taggit.models import Tag
# The edit_handlers module extends Page with some additional attributes required by
# wagtail admin (namely, base_form_class and get_edit_handler). Importing this within
# wagtail.admin.models ensures that this happens in advance of running wagtail.admin's
# system checks.
from wagtail.admin import edit_handlers # NOQA
from wagtail.core.models import Page
# A dummy model that exists purely to attach the access_admin permission type to, so that it
# doesn't get identified as a stale content type and removed by the remove_stale_contenttypes
# management command.
class Admin(Model):
class Meta:
default_permissions = [] # don't create the default add / change / delete / view perms
permissions = [
('access_admin', "Can access Wagtail admin"),
]
def get_object_usage(obj):
"""Returns a queryset of pages that link to a particular object"""
pages = Page.objects.none()
# get all the relation objects for obj
relations = [f for f in type(obj)._meta.get_fields(include_hidden=True)
if (f.one_to_many or f.one_to_one) and f.auto_created]
for relation in relations:
related_model = relation.related_model
# if the relation is between obj and a page, get the page
if issubclass(related_model, Page):
pages |= Page.objects.filter(
id__in=related_model._base_manager.filter(**{
relation.field.name: obj.id
}).values_list('id', flat=True)
)
else:
# if the relation is between obj and an object that has a page as a
# property, return the page
for f in related_model._meta.fields:
if isinstance(f, ParentalKey) and issubclass(f.remote_field.model, Page):
pages |= Page.objects.filter(
id__in=related_model._base_manager.filter(
**{
relation.field.name: obj.id
}).values_list(f.attname, flat=True)
)
return pages
def popular_tags_for_model(model, count=10):
"""Return a queryset of the most frequently used tags used on this model class"""
content_type = ContentType.objects.get_for_model(model)
return Tag.objects.filter(
taggit_taggeditem_items__content_type=content_type
).annotate(
item_count=Count('taggit_taggeditem_items')
).order_by('-item_count')[:count]
|
the-stack_106_19805
|
from selenium import webdriver
import os, time
# 加载启动项
option = webdriver.ChromeOptions()
option.add_argument('headless')
# 更换头部
option.add_argument('user-agent=Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Mobile Safari/537.36')
# 定义截图地址&图片格式
screen_path = os.path.dirname(os.getcwd()) + '/report/Screenshots/'
rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
screen_name = screen_path + rq + '.png'
# 打开chrome浏览器
# driver = webdriver.Chrome(chrome_options=option)
# 版本更新,需要options 代替chrome_option
driver = webdriver.Chrome(options=option)
# 定义url 地址
url = 'https://www.toutiao.com/i6827512913318642187/'
driver.get(url=url)
time.sleep(2)
print(driver.page_source)
# 截图
# print(screen_name)
# driver.save_screenshot(screen_name)
# time.sleep(3)
# 退出并关闭浏览器
driver.quit()
"""
只要你执行navigator.webdriver返回值是true就是浏览器内核访问
如果不是返回值是undefined
selenium为了解决这个需进行js 注入
from selenium import webdriver
browser = webdriver.Chrome()
script='''Object.defineProperties(navigator, {webdriver:{get:()=>undefined}})'''
#js1 = '''() =>{ Object.defineProperties(navigator,{ webdriver:{ get: () => false } }) }'''
browser.execute_script(script)
"""
|
the-stack_106_19806
|
# gunicorn config
# gunicorn -c config/gunicorn.py --worker-class sanic.worker.GunicornWorker server:app
bind = '0.0.0.0:8001'
backlog = 2048
workers = 2
worker_connections = 1000
timeout = 30
keepalive = 2
spew = False
daemon = False
umask = 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.