hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a24a3239fb3a7b865abb2a6196355773dd66502 | 5,951 | py | Python | tools/InterfaceGenerator/MsgVersionGenerate.py | shoamano83/sdl_core | ea5960280585d11ee02542b0ab183d4400ed691d | [
"BSD-3-Clause"
] | null | null | null | tools/InterfaceGenerator/MsgVersionGenerate.py | shoamano83/sdl_core | ea5960280585d11ee02542b0ab183d4400ed691d | [
"BSD-3-Clause"
] | 2 | 2017-12-25T19:40:16.000Z | 2017-12-25T23:34:25.000Z | tools/InterfaceGenerator/MsgVersionGenerate.py | vkushnirenko-luxoft/sdl_core | 946e25fa31411a4a00b547cee2d0f1dd12b94a7d | [
"BSD-3-Clause"
] | 1 | 2020-04-22T07:17:49.000Z | 2020-04-22T07:17:49.000Z | """
Generate file with major and minor msg_version.
"""
import xml.etree.ElementTree
from string import Template
import re
from generator.parsers import RPCBase
def generate_msg_version(file_name, path_to_storage):
"""Parses MOBILE_API.xml in order to
receive major_version, minor_version, and patch_version
"""
tree = xml.etree.ElementTree.parse(file_name)
root = tree.getroot()
if (root.tag == "interface" and "version" and "minVersion" in root.attrib):
check_version_format(root.attrib["version"])
array = (root.attrib["version"]).split(".")
major_version = array[0]
minor_version = array[1]
patch_version = array[2]
check_minimum_version_format(root.attrib["minVersion"])
minimum_version_array = (root.attrib["minVersion"]).split(".")
if (len(minimum_version_array) == 2):
minimum_version_array.append("0")
minimum_major_version = minimum_version_array[0]
minimum_minor_version = minimum_version_array[1]
minimum_patch_version = minimum_version_array[2]
if (major_version.isdigit() and minor_version.isdigit() and patch_version.isdigit() and
minimum_major_version.isdigit() and minimum_minor_version.isdigit() and minimum_patch_version.isdigit()):
data_for_storage = prepare_data_for_storage(major_version, minor_version, patch_version,
minimum_major_version, minimum_minor_version, minimum_patch_version)
store_data_to_file(path_to_storage, data_for_storage)
else:
raise RPCBase.ParseError("Attribute version has incorect value in MOBILE_API.xml")
else:
raise RPCBase.ParseError("Check MOBILE_API.xml file, parser can not find first element "
" with tag interface or atribute version")
def store_data_to_file(path_to_storage, data_for_storage):
"""Stores data with major and minor version
to file generated_msg_version.h
"""
path_to_storage = path_to_storage + "/generated_msg_version.h"
fh = open(path_to_storage, 'w')
fh.write(data_for_storage)
fh.close()
def check_version_format(version):
"""Checks correctness of format of version
"""
p = re.compile('\d+\\.\d+\\.\d+')
result = p.match(version)
if result == None or (result.end() != len(version)):
raise RPCBase.ParseError("Incorrect format of version please check MOBILE_API.xml. "
"Need format of version major_version.minor_version.patch_version")
def check_minimum_version_format(version):
"""Checks correctness of format of version
"""
p = re.compile('\d+\\.\d+\\.\d+|\d+\\.\d+')
result = p.match(version)
if result == None or (result.end() != len(version)):
raise RPCBase.ParseError("Incorrect format of version please check MOBILE_API.xml. "
"Need format of minVersion major_version.minor_version or major_version.minor_version.patch_version")
def prepare_data_for_storage(major_version, minor_version, patch_version, minimum_major_version, minimum_minor_version, minimum_patch_version):
"""Prepares data to store to file.
"""
temp = Template(
u'''/*Copyright (c) 2016, Ford Motor Company\n'''
u'''All rights reserved.\n'''
u'''Redistribution and use in source and binary forms, with or without\n'''
u'''modification, are permitted provided that the following conditions are met:\n'''
u'''Redistributions of source code must retain the above copyright notice, this\n'''
u'''list of conditions and the following disclaimer.\n'''
u'''Redistributions in binary form must reproduce the above copyright notice,\n'''
u'''this list of conditions and the following\n'''
u'''disclaimer in the documentation and/or other materials provided with the\n'''
u'''distribution.\n'''
u'''Neither the name of the Ford Motor Company nor the names of its contributors\n'''
u'''may be used to endorse or promote products derived from this software\n'''
u'''without specific prior written permission.\n'''
u'''THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n'''
u'''AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n'''
u'''IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n'''
u'''ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n'''
u'''LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n'''
u'''CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n'''
u'''SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n'''
u'''INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n'''
u'''CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n'''
u'''ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n'''
u'''POSSIBILITY OF SUCH DAMAGE.\n'''
u'''*/\n'''
u'''#ifndef GENERATED_MSG_VERSION_H\n'''
u'''#define GENERATED_MSG_VERSION_H\n\n'''
u'''namespace application_manager {\n\n'''
u'''const uint16_t major_version = $m_version;\n'''
u'''const uint16_t minor_version = $min_version;\n'''
u'''const uint16_t patch_version = $p_version;\n'''
u'''const uint16_t minimum_major_version = $min_major_version;\n'''
u'''const uint16_t minimum_minor_version = $min_minor_version;\n'''
u'''const uint16_t minimum_patch_version = $min_patch_version;\n'''
u'''} // namespace application_manager\n'''
u'''#endif // GENERATED_MSG_VERSION_H''')
data_to_file = temp.substitute(m_version = major_version, min_version = minor_version, p_version = patch_version,
min_major_version = minimum_major_version, min_minor_version = minimum_minor_version, min_patch_version = minimum_patch_version)
return data_to_file
| 53.133929 | 143 | 0.696858 |
4a24a3949e4c52171a24159a1dea6e427362a2ae | 8,380 | py | Python | celeriteflow/cpp_extension.py | mirca/celeriteflow | ed09a178df05856097552a9081b6eb6d537216ee | [
"MIT"
] | 38 | 2018-05-18T14:51:39.000Z | 2022-03-15T20:11:21.000Z | celeriteflow/cpp_extension.py | mirca/celeriteflow | ed09a178df05856097552a9081b6eb6d537216ee | [
"MIT"
] | 5 | 2019-02-23T13:40:00.000Z | 2022-02-02T06:20:40.000Z | celeriteflow/cpp_extension.py | mirca/celeriteflow | ed09a178df05856097552a9081b6eb6d537216ee | [
"MIT"
] | 9 | 2018-10-28T14:18:05.000Z | 2022-02-27T22:40:20.000Z | # -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["BuildExtension"]
import os
import re
import sys
import glob
import copy
import subprocess
import setuptools
from setuptools.command.build_ext import build_ext
import tensorflow as tf
def _find_cuda_home():
'''Finds the CUDA install path.'''
# Guess #1
cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
if cuda_home is None:
# Guess #2
if sys.platform == 'win32':
cuda_home = glob.glob(
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')
else:
cuda_home = '/usr/local/cuda'
if not os.path.exists(cuda_home):
# Guess #3
try:
which = 'where' if sys.platform == 'win32' else 'which'
nvcc = subprocess.check_output(
[which, 'nvcc']).decode().rstrip('\r\n')
cuda_home = os.path.dirname(os.path.dirname(nvcc))
except Exception:
cuda_home = None
if cuda_home and not tf.test.is_built_with_cuda():
print("No CUDA runtime is found, using CUDA_HOME='{}'"
.format(cuda_home))
return cuda_home
CUDA_HOME = _find_cuda_home()
class BuildExtension(build_ext):
def build_extensions(self):
# Register .cu and .cuh as valid source extensions.
self.compiler.src_extensions += ['.cu', '.cuh']
# Save the original _compile method for later.
if self.compiler.compiler_type == 'msvc':
self.compiler._cpp_extensions += ['.cu', '.cuh']
original_compile = self.compiler.compile
original_spawn = self.compiler.spawn
else:
original_compile = self.compiler._compile
def unix_wrap_compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
# Copy before we make any modifications.
cflags = copy.deepcopy(extra_postargs)
try:
original_compiler = self.compiler.compiler_so
if _is_cuda_file(src):
nvcc = _join_cuda_home('bin', 'nvcc')
self.compiler.set_executable('compiler_so', nvcc)
if isinstance(cflags, dict):
cflags = cflags['nvcc']
cflags += ['--compiler-options', "'-fPIC'"]
elif isinstance(cflags, dict):
cflags = cflags['cxx']
# NVCC does not allow multiple -std to be passed, so we avoid
# overriding the option if the user explicitly passed it.
if not any(flag.startswith('-std=') for flag in cflags):
cflags.append('-std=c++11')
original_compile(obj, src, ext, cc_args, cflags, pp_opts)
finally:
# Put the original compiler back in place.
self.compiler.set_executable('compiler_so', original_compiler)
def win_wrap_compile(sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
self.cflags = copy.deepcopy(extra_postargs)
extra_postargs = None
def spawn(cmd):
# Using regex to match src, obj and include files
src_regex = re.compile('/T(p|c)(.*)')
src_list = [
m.group(2) for m in (src_regex.match(elem) for elem in cmd)
if m
]
obj_regex = re.compile('/Fo(.*)')
obj_list = [
m.group(1) for m in (obj_regex.match(elem) for elem in cmd)
if m
]
include_regex = re.compile(r'((\-|\/)I.*)')
include_list = [
m.group(1)
for m in (include_regex.match(elem) for elem in cmd) if m
]
if len(src_list) >= 1 and len(obj_list) >= 1:
src = src_list[0]
obj = obj_list[0]
if _is_cuda_file(src):
nvcc = _join_cuda_home('bin', 'nvcc')
if isinstance(self.cflags, dict):
cflags = self.cflags['nvcc']
elif isinstance(self.cflags, list):
cflags = self.cflags
else:
cflags = []
cmd = [
nvcc, '-c', src, '-o', obj, '-Xcompiler',
'/wd4819', '-Xcompiler', '/MD'
] + include_list + cflags
elif isinstance(self.cflags, dict):
cflags = self.cflags['cxx']
cmd += cflags
elif isinstance(self.cflags, list):
cflags = self.cflags
cmd += cflags
return original_spawn(cmd)
try:
self.compiler.spawn = spawn
return original_compile(sources, output_dir, macros,
include_dirs, debug, extra_preargs,
extra_postargs, depends)
finally:
self.compiler.spawn = original_spawn
# Monkey-patch the _compile method.
if self.compiler.compiler_type == 'msvc':
self.compiler.compile = win_wrap_compile
else:
self.compiler._compile = unix_wrap_compile
build_ext.build_extensions(self)
def CppExtension(name, sources, *args, **kwargs):
kwargs['include_dirs'] = kwargs.get('include_dirs', []) + include_paths()
kwargs = add_tf_flags(kwargs)
kwargs['language'] = 'c++'
return setuptools.Extension(name, sources, *args, **kwargs)
def CUDAExtension(name, sources, *args, **kwargs):
kwargs['include_dirs'] = kwargs.get('include_dirs', []) \
+ include_paths(True)
kwargs['library_dirs'] = kwargs.get('library_dirs', []) \
+ library_paths(True)
kwargs['libraries'] = kwargs.get('libraries', []) + ['cudart']
kwargs = add_tf_flags(kwargs)
kwargs['language'] = 'c++'
return setuptools.Extension(name, sources, *args, **kwargs)
def add_tf_flags(kwargs):
flags = copy.deepcopy(kwargs.get('extra_compile_args', []))
if isinstance(flags, dict):
for k in flags:
flags[k] += tf.sysconfig.get_compile_flags()
else:
flags += tf.sysconfig.get_compile_flags()
kwargs['extra_compile_args'] = flags
flags = copy.deepcopy(kwargs.get('extra_link_args', []))
if isinstance(flags, dict):
for k in flags:
flags[k] += tf.sysconfig.get_link_flags()
else:
flags += tf.sysconfig.get_link_flags()
kwargs['extra_link_args'] = flags
return kwargs
def include_paths(cuda=False):
here = os.path.abspath(__file__)
torch_path = os.path.dirname(os.path.dirname(here))
lib_include = os.path.join(torch_path, 'lib', 'include')
paths = [lib_include]
if cuda:
paths.append(_join_cuda_home('include'))
return paths
def library_paths(cuda=False):
paths = []
if sys.platform == 'win32':
here = os.path.abspath(__file__)
torch_path = os.path.dirname(os.path.dirname(here))
lib_path = os.path.join(torch_path, 'lib')
paths.append(lib_path)
if cuda:
lib_dir = 'lib/x64' if sys.platform == 'win32' else 'lib64'
paths.append(_join_cuda_home(lib_dir))
return paths
def _join_cuda_home(*paths):
'''
Joins paths with CUDA_HOME, or raises an error if it CUDA_HOME is not set.
This is basically a lazy way of raising an error for missing $CUDA_HOME
only once we need to get any CUDA-specific path.
'''
if CUDA_HOME is None:
raise EnvironmentError('CUDA_HOME environment variable is not set. '
'Please set it to your CUDA install root.')
return os.path.join(CUDA_HOME, *paths)
def _is_cuda_file(path):
return os.path.splitext(path)[1] in ['.cu', '.cuh']
| 35.659574 | 79 | 0.540811 |
4a24a3e48375fa88148fa7fdf924e6f93b1a556f | 2,818 | py | Python | integrationtest/vm/monitor/alert_vm_cpu_util.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/monitor/alert_vm_cpu_util.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/monitor/alert_vm_cpu_util.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | '''
Test about monitor trigger on vm cpu free ratio in one minute
@author: Songtao,Haochen
'''
import os
import test_stub
import random
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.monitor_operations as mon_ops
def test():
global vm
global trigger
global media
global trigger_action
vm = test_stub.create_vm()
vm.check()
vm_ip = vm.get_vm().vmNics[0].ip
vm_uuid = vm.get_vm().uuid
vm_username = os.environ.get('Vm_Username')
vm_password = os.environ.get('Vm_Password')
vm_port = os.environ.get('Vm_Sshport')
test_item = "vm.cpu.util"
resource_type = "VmInstanceVO"
vm_monitor_item = test_stub.get_monitor_item(resource_type)
if test_item not in vm_monitor_item:
test_util.test_fail('%s is not available for monitor' % test_item)
duration = 60
expression = "vm.cpu.util{}>80.0"
monitor_trigger = mon_ops.create_monitor_trigger(vm_uuid, duration, expression)
send_email = test_stub.create_email_media()
media = send_email.uuid
trigger_action_name = "trigger"+ ''.join(map(lambda xx:(hex(ord(xx))[2:]),os.urandom(8)))
trigger = monitor_trigger.uuid
receive_email = os.environ.get('receive_email')
monitor_trigger_action = mon_ops.create_email_monitor_trigger_action(trigger_action_name, send_email.uuid, trigger.split(), receive_email)
trigger_action = monitor_trigger_action.uuid
ssh_cmd = test_stub.ssh_cmd_line(vm_ip, vm_username, vm_password, vm_port)
test_stub.yum_install_stress_tool(ssh_cmd)
test_stub.run_cpu_load(ssh_cmd, 0, 1)
status_problem, status_ok = test_stub.query_trigger_in_loop(trigger,50)
test_util.action_logger('Trigger old status: %s triggered. Trigger new status: %s recovered' % (status_problem, status_ok ))
if status_problem != 1 or status_ok != 1:
test_util.test_fail('%s Monitor Test failed, expected Problem or OK status not triggered' % test_item)
mail_list = test_stub.receive_email()
keywords = "fired"
mail_flag = test_stub.check_email(mail_list, keywords, trigger, vm_uuid)
if mail_flag == 0:
test_util.test_fail('Failed to Get Target: %s for: %s Trigger Mail' % (vm_uuid, test_item))
mon_ops.delete_monitor_trigger_action(trigger_action)
mon_ops.delete_monitor_trigger(trigger)
mon_ops.delete_email_media(media)
vm.destroy()
def error_cleanup():
global trigger
global media
global trigger_action
global vm
mon_ops.delete_monitor_trigger_action(trigger_action)
mon_ops.delete_monitor_trigger(trigger)
mon_ops.delete_email_media(media)
vm.destroy() | 36.597403 | 143 | 0.724627 |
4a24a4205e0af3856bce704b1dda27d1e9aa90ad | 3,107 | py | Python | docker-jans-certmanager/scripts/oxshibboleth_handler.py | duttarnab/jans | b4ae02f9cb60433a44a2b889268525532d82a247 | [
"Apache-2.0"
] | 18 | 2022-01-13T13:45:13.000Z | 2022-03-30T04:41:18.000Z | docker-jans-certmanager/scripts/oxshibboleth_handler.py | duttarnab/jans | b4ae02f9cb60433a44a2b889268525532d82a247 | [
"Apache-2.0"
] | 604 | 2022-01-13T12:32:50.000Z | 2022-03-31T20:27:36.000Z | docker-jans-certmanager/scripts/oxshibboleth_handler.py | duttarnab/jans | b4ae02f9cb60433a44a2b889268525532d82a247 | [
"Apache-2.0"
] | 8 | 2022-01-28T00:23:25.000Z | 2022-03-16T05:12:12.000Z | import logging.config
from jans.pycloudlib.utils import exec_cmd
from base_handler import BaseHandler
from settings import LOGGING_CONFIG
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("certmanager")
class OxshibbolethHandler(BaseHandler):
@classmethod
def gen_idp3_key(cls, storepass):
cmd = (
"java -classpath '/app/javalibs/*' "
"net.shibboleth.utilities.java.support.security.BasicKeystoreKeyStrategyTool "
"--storefile /etc/certs/sealer.jks "
"--versionfile /etc/certs/sealer.kver "
"--alias secret "
f"--storepass {storepass}"
)
return exec_cmd(cmd)
def _patch_shib_sealer(self, passwd):
sealer_jks = "/etc/certs/sealer.jks"
sealer_kver = "/etc/certs/sealer.kver"
logger.info(f"Generating new {sealer_jks} and {sealer_kver} files")
self.gen_idp3_key(passwd)
return sealer_jks, sealer_kver
def patch(self):
passwd = self.manager.secret.get("shibJksPass")
# shibIDP
cert_fn, key_fn = self._patch_cert_key("shibIDP", passwd)
if not self.dry_run:
if cert_fn:
self.manager.secret.from_file(
"shibIDP_cert", cert_fn, encode=True,
)
if key_fn:
self.manager.secret.from_file(
"shibIDP_cert", key_fn, encode=True,
)
keystore_fn = self._patch_keystore(
"shibIDP", self.manager.config.get("hostname"), passwd,
)
if not self.dry_run:
if keystore_fn:
self.manager.secret.from_file(
"shibIDP_jks_base64",
keystore_fn,
encode=True,
binary_mode=True,
)
sealer_jks_fn, sealer_kver_fn = self._patch_shib_sealer(passwd)
if not self.dry_run:
if sealer_jks_fn:
self.manager.secret.from_file(
"sealer_jks_base64",
sealer_jks_fn,
encode=True,
binary_mode=True,
)
if sealer_kver_fn:
self.manager.secret.from_file(
"sealer_kver_base64", sealer_kver_fn, encode=True,
)
# IDP signing
cert_fn, key_fn = self._patch_cert_key("idp-signing", passwd)
if not self.dry_run:
if cert_fn:
self.manager.secret.from_file(
"idp3SigningCertificateText", cert_fn,
)
if key_fn:
self.manager.secret.from_file("idp3SigningKeyText", key_fn)
# IDP encryption
cert_fn, key_fn = self._patch_cert_key("idp-encryption", passwd)
if not self.dry_run:
if cert_fn:
self.manager.secret.from_file(
"idp3EncryptionCertificateText", cert_fn,
)
if key_fn:
self.manager.secret.from_file("idp3EncryptionKeyText", key_fn)
| 33.771739 | 90 | 0.55906 |
4a24a5841fdd67233b8b6825023ceffdbe28dd63 | 6,324 | py | Python | paper/ProbCox/scripts/simulation/largescale_case.py | alexwjung/ProbCox | 6582ab30a4368283e779329d3df3fdeab1c48d32 | [
"MIT"
] | 3 | 2021-06-21T17:40:46.000Z | 2021-12-17T17:19:09.000Z | paper/ProbCox/scripts/simulation/largescale_case.py | alexwjung/ProbCox | 6582ab30a4368283e779329d3df3fdeab1c48d32 | [
"MIT"
] | null | null | null | paper/ProbCox/scripts/simulation/largescale_case.py | alexwjung/ProbCox | 6582ab30a4368283e779329d3df3fdeab1c48d32 | [
"MIT"
] | 1 | 2021-06-21T13:53:49.000Z | 2021-06-21T13:53:49.000Z | '''
Standard Case Simulation - Case 1:
Small size simulation with N >> I >> P
individuals: 1000
covaraites: 3 binary (0.2), 3 Normal(0, 1)
theta: -0.9, 0.2, 0, -0.4, 1.1, 0
censoring: ~ 0.74
runs: 200 - Seed = 1, 2, ..., 200
'''
# Modules
# =======================================================================================================================
import os
import sys
import shutil
import subprocess
import tqdm
import numpy as np
import pandas as pd
from multiprocessing import Pool
import torch
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.infer import SVI, Trace_ELBO
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import probcox as pcox
dtype = torch.FloatTensor
np.random.seed(2309)
torch.manual_seed(945)
sim_name = 'sim_ls'
os.chdir('/nfs/nobackup/gerstung/awj/projects/ProbCox/paper/ProbCox')
# cluster variable
try:
run_id = int(sys.argv[1])
except:
run_id = 0
if run_id == 0:
try:
shutil.rmtree('./out/simulation/' + sim_name)
except:
pass
try:
os.mkdir('./out/simulation/' + sim_name)
except:
pass
# Simulation Settings
# =======================================================================================================================
I = 4000000 # Number of Individuals
P_binary = 5
P_continuous = 5
P = P_binary + P_continuous
theta = np.random.normal(0, 0.75, (10, 1))
scale = 25 # Scaling factor for Baseline Hazard
# Simulation
# =======================================================================================================================
# save theta
if run_id == 0:
np.savetxt('./out/simulation/' + sim_name + '/theta.txt', np.round(theta, 5))
# Rough distribution for the corresponding linear effect size
X = np.concatenate((np.random.binomial(1, 0.2, (1000, P_binary)), np.random.normal(0, 1, (1000, P_continuous))), axis=1)
plt.hist(np.matmul(X, theta))
plt.show()
plt.close()
# Class for simulation
TVC = pcox.TVC(theta=theta, P_binary=P_binary, P_continuous=P_continuous, dtype=dtype)
# Sample baseline hazard - scale is set to define censorship/events
TVC.make_lambda0(scale=scale)
# Return the underlying shape of the baseline hazard and plot
if run_id == 0:
t_l, ll = TVC.return_lambda0()
plt.step(t_l, ll)
plt.show()
plt.close()
np.savetxt('./out/simulation/' + sim_name + '/lambda0.txt', np.concatenate((t_l[:, None], ll), axis=1))
# Sample Data
np.random.seed(run_id+100)
torch.manual_seed(run_id+100)
surv = []
X = []
def f(i):
a, b = TVC.sample()
return([a, b])
surv = []
X = []
with Pool(processes=8) as pool:
for i in pool.imap_unordered(f, tqdm.tqdm(range(I))):
a, b = i
surv.extend(a.tolist())
X.extend(b.tolist())
surv = torch.tensor(surv).type(dtype)
X = torch.tensor(X).type(dtype)
if run_id == 0:
plt.hist(surv[surv[:, -1]==1, 1])
plt.show()
plt.close()
total_obs = surv.shape[0]
total_events = torch.sum(surv[:, -1] == 1).numpy().tolist()
# Save information on intervall observation and number of events
if run_id != 0:
with open('./out/simulation/' + sim_name + '/N_obs.txt', 'a') as write_out:
write_out.write(str(run_id) + '; ' + str(surv.shape[0]) + '; ' + str(torch.sum(surv[:, -1]).detach().numpy().tolist()))
write_out.write('\n')
# Inference Setup
# =======================================================================================================================
def predictor(data):
theta = pyro.sample("theta", dist.StudentT(1, loc=0, scale=0.001).expand([data[1].shape[1], 1])).type(dtype)
pred = torch.mm(data[1], theta)
return(pred)
def evaluate(surv, X, rank, batchsize, sampling_proportion, iter_, run_suffix, predictor=predictor, sim_name=sim_name, run_id=run_id):
sampling_proportion[1] = batchsize
eta=0.1 # paramter for optimization
run = True # repeat initalization if NAN encounterd while training - gauge correct optimization settings
while run:
run = False
pyro.clear_param_store()
m = pcox.PCox(sampling_proportion=sampling_proportion, predictor=predictor)
m.initialize(eta=eta, rank=rank, num_particles=5)
loss=[0]
locat = np.where(surv[:, -1]==1)[0]
for ii in tqdm.tqdm(range((iter_))):
idx = np.unique(np.concatenate((np.random.choice(locat, 1, replace=False), np.random.randint(surv.shape[0], size=int(batchsize*1.5)))))[:batchsize] # random sample of data - force at least one event (no evaluation otherwise)
data=[surv[idx], X[idx]] # subsampled data
loss.append(m.infer(data=data))
# divergence check
if loss[-1] != loss[-1]:
eta = eta * 0.1
run=True
break
g = m.return_guide()
out = g.quantiles([0.025, 0.5, 0.975])
with open('./out/simulation/' + sim_name + '/probcox' + run_suffix + '_theta_lower.txt', 'a') as write_out:
write_out.write(str(run_id) + '; ')
write_out.write(''.join([str(ii) + '; ' for ii in out['theta'][0].detach().numpy()[:, 0].tolist()]))
write_out.write('\n')
with open('./out/simulation/' + sim_name + '/probcox' + run_suffix + '_theta.txt', 'a') as write_out:
write_out.write(str(run_id) + '; ')
write_out.write(''.join([str(ii) + '; ' for ii in out['theta'][1].detach().numpy()[:, 0].tolist()]))
write_out.write('\n')
with open('./out/simulation/' + sim_name + '/probcox' + run_suffix + '_theta_upper.txt', 'a') as write_out:
write_out.write(str(run_id) + '; ')
write_out.write(''.join([str(ii) + '; ' for ii in out['theta'][2].detach().numpy()[:, 0].tolist()]))
write_out.write('\n')
# Run
# =======================================================================================================================
if run_id != 0:
pyro.clear_param_store()
out = evaluate(run_suffix='b1000', rank=5, batchsize=1000, iter_=100000, surv=surv, X=X, sampling_proportion=[total_obs, None, total_events, None])
print('finished')
#for i in 15 21; do bsub -env "VAR1=$i" -n 16 -M 52000 -R "rusage[mem=16000]" './largescale_case.sh'; sleep 1; done
| 31.939394 | 236 | 0.573213 |
4a24a5ad382e5aa2dda445bb8f83e404746d750b | 3,768 | py | Python | sqlcollection/utils.py | knlambert/sqlcollection | bd5408c00e62c5284de8a70743a28032bbfaf9ba | [
"MIT"
] | null | null | null | sqlcollection/utils.py | knlambert/sqlcollection | bd5408c00e62c5284de8a70743a28032bbfaf9ba | [
"MIT"
] | null | null | null | sqlcollection/utils.py | knlambert/sqlcollection | bd5408c00e62c5284de8a70743a28032bbfaf9ba | [
"MIT"
] | null | null | null | # coding: utf-8
"""
This module contains various utils function at global usage.
"""
import sys
try:
import urlparse
from urllib import urlencode
except ImportError:
import urllib.parse as urlparse
from urllib.parse import urlencode
from .compatibility import UNICODE_TYPE
def json_set(item, path, value):
"""
Set the value corresponding to the path in a dict.
Arguments:
item (dict): The object where we want to put a field.
path (unicode): The path separated with dots to the field.
value: The value to set on the field.
Return:
(dict): The updated object.
"""
tab = path.split(u".")
if tab[0] not in item and len(tab) > 1:
item[tab[0]] = {}
if len(tab) == 1:
item[tab[0]] = value
else:
item[tab[0]] = json_set(item[tab[0]], u".".join(tab[1:]), value)
return item
def json_del(item, path):
"""
Delete the item corresponding to path of the field in a dict.
Arguments:
item (dict): The object where we want to delete a field.
path (unicode): The path separated with dots to the field.
Return:
The value.
"""
tab = path.split(u".")
if tab[0] in item:
if len(tab) > 1:
return json_del(item[tab[0]], u".".join(tab[1:]))
else:
del item[tab[0]]
return item
def json_get(item, path, default=None):
"""
Return the path of the field in a dict.
Arguments:
item (dict): The object where we want to put a field.
path (unicode): The path separated with dots to the field.
default: default value if path not found.
Return:
The value.
"""
tab = path.split(u".")
if isinstance(item, dict) and tab[0] in item:
if len(tab) > 1:
return json_get(item[tab[0]], u".".join(tab[1:]), default=default)
return item[tab[0]]
return default
def json_to_one_level(obj, parent=None):
"""
Take a dict and update all the path to be on one level.
Arguments:
output (dict): The dict to proceed.
parent (unicode): The parent key. Used only with recursion.
Return:
dict: The updated obj.
"""
output = {}
for key, value in obj.items():
if isinstance(value, dict):
if parent is None:
output.update(json_to_one_level(value, key))
else:
output.update(json_to_one_level(value, u".".join([parent, key])))
elif isinstance(value, list):
for index, item in enumerate(value):
item = {
UNICODE_TYPE(index): item
}
if parent is None:
output.update(json_to_one_level(item, u".".join([key])))
else:
output.update(json_to_one_level(item, u".".join([parent, key])))
else:
if parent is not None:
output[u".".join([parent, key])] = value
else:
output[key] = value
return output
def parse_url_and_add_param(url, param_key, param_value):
"""
Take a string url and add a param into it.
Args:
url (string): The URL to process.
param_key (string): The key of the argument to add.
param_value (any): The value of the argument.
Returns:
(string): The resulting url with the added parameter.
"""
if param_value is not None:
url_parts = list(urlparse.urlparse(url))
query = dict(urlparse.parse_qsl(url_parts[4]))
query.update({
param_key: param_value
})
url_parts[4] = urlencode(query)
return urlparse.unquote(urlparse.urlunparse(url_parts))
else:
return url | 28.984615 | 84 | 0.576699 |
4a24a5af0838567b5d3a74c1b11712074c7bcd02 | 4,646 | py | Python | regression/main_repulsive.py | maxwab/denn-ijcai | 6431f699b7d9b4e4fbb9ca71f41dbdecfd34378c | [
"MIT"
] | null | null | null | regression/main_repulsive.py | maxwab/denn-ijcai | 6431f699b7d9b4e4fbb9ca71f41dbdecfd34378c | [
"MIT"
] | 3 | 2021-09-08T02:07:17.000Z | 2022-03-12T00:33:04.000Z | regression/main_repulsive.py | maxwab/denn-ijcai | 6431f699b7d9b4e4fbb9ca71f41dbdecfd34378c | [
"MIT"
] | 2 | 2021-02-04T14:58:24.000Z | 2021-10-20T19:36:14.000Z | from comet_ml import Experiment
import argparse as ap
import torch
import numpy as np
import random
from tools import f, optimize
import model
from dataset import RegressionDataset
from model import MLP
from tqdm import tqdm
import os
import json
from pathlib import Path
from functools import partial
from sampler import repulsiveSampler
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.nn as nn
parser = ap.ArgumentParser()
parser.add_argument('--type', type=str)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--dataset_seed', type=int, default=2020)
parser.add_argument('--n_epochs', type=int, default=5000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--wd', type=float, default=0)
parser.add_argument('--repulsive', type=str)
parser.add_argument('--lambda_repulsive', type=float, default=3e-3)
parser.add_argument('--batch_size_repulsive', type=int, default=20)
parser.add_argument('--dropout_rate', type=float, default=0.0)
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--comet', action='store_true')
parser.add_argument('--save_folder', type=str, default='log/repulsive')
parser.add_argument('--id', type=int)
args = parser.parse_args()
# Logging
experiment = Experiment(api_key="XXX", project_name="final_regression", workspace="XXXX",
disabled=not args.comet)
experiment.log_parameters(vars(args))
model_name = 'repulsive_lambda:{}'.format(args.lambda_repulsive)
if args.id is not None:
model_name = model_name + '_{}'.format(args.id)
savepath = Path(args.save_folder)
try:
if not Path.exists(savepath):
os.makedirs(savepath)
except:
pass
if not Path.exists(savepath / 'config.json'): # Only create json if it does not exist
with open(savepath / 'config.json', 'w') as fd:
json.dump(vars(args), fd)
# Generate data and create dataset
torch.manual_seed(args.dataset_seed)
np.random.seed(args.dataset_seed)
random.seed(args.dataset_seed)
X = (np.random.rand(10).reshape(-1, 1) - 1) / 2 # x between -0.5 and 0.
Y = f(X)
X = torch.from_numpy(X).type(torch.FloatTensor)
Y = torch.from_numpy(Y).type(torch.FloatTensor)
dataset = RegressionDataset(X, Y)
# Reproducibility
if args.seed is not None:
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
net = MLP()
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.wd)
# Load reference net if defined
if args.repulsive is not None:
reference_net = model.MLP(dropout_rate=args.dropout_rate)
reference_net.load_state_dict(torch.load(Path(args.repulsive)))
# Update of the network parameters
train_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False)
# Sampling a repulsive bandwidth parameter
alpha = -3
beta = -0.5
bandwidth_repulsive = float(10 ** (alpha + (beta - alpha) * np.random.rand()))
# Preparation of the optimization
if args.repulsive is not None:
_optimize = partial(optimize, bandwidth_repulsive=bandwidth_repulsive, lambda_repulsive=args.lambda_repulsive)
else:
_optimize = optimize
repulsive_sampler = repulsiveSampler(X, batch_size=args.batch_size_repulsive)
step = 0 # Number of batches seen
net.train()
# ----------------------------------------------------------------------
# Actual training
for epoch in tqdm(np.arange(args.n_epochs), disable=not args.verbose):
experiment.log_current_epoch(epoch)
for batch_idx, (data, target) in enumerate(train_loader):
# Sample repulsive batch if required
if args.repulsive is not None:
br = repulsive_sampler.sample_batch()
kwargs = {'reference_net': reference_net, 'batch_repulsive': br, 'bandwidth_repulsive': bandwidth_repulsive, 'lambda_repulsive':args.lambda_repulsive}
else:
kwargs = {}
data, target = data.cpu(), target.cpu()
info_batch = optimize(net, optimizer, batch=(data, target), add_repulsive_constraint=args.repulsive is not None,
**kwargs)
step += 1
for k, v in info_batch.items():
experiment.log_metric('train_{}'.format(k), v, step=step)
# Save the model
if not Path.exists(savepath / 'models'):
os.makedirs(savepath / 'models')
model_path = savepath / 'models' / '{}_{}epochs.pt'.format(model_name, epoch + 1)
if not Path.exists(model_path):
torch.save(net.state_dict(), model_path)
else:
raise ValueError('Error trying to save file at location {}: File already exists'.format(model_path))
| 34.414815 | 162 | 0.715239 |
4a24a872880dded888276612f0b1fb2abbca0c1e | 796 | py | Python | ptr/params.py | Wall-Facer-liuyu/slot_attention | a927960396011a108358f7b43c0f8e061e432564 | [
"Apache-2.0"
] | null | null | null | ptr/params.py | Wall-Facer-liuyu/slot_attention | a927960396011a108358f7b43c0f8e061e432564 | [
"Apache-2.0"
] | null | null | null | ptr/params.py | Wall-Facer-liuyu/slot_attention | a927960396011a108358f7b43c0f8e061e432564 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from typing import Tuple
import attr
@attr.s(auto_attribs=True)
class SlotAttentionParams:
lr: float = 0.0004
batch_size: int = 32
val_batch_size: int = 64
resolution: Tuple[int, int] = (128, 128)
num_slots: int = 20
num_iterations: int = 3
data_root: str = "/home/liuyu/data/ptr/"
gpus: int = 1
max_epochs: int = 100
num_sanity_val_steps: int = 1
scheduler_gamma: float = 0.5
weight_decay: float = 0.0
num_train_images: Optional[int] = None
num_val_images: Optional[int] = None
empty_cache: bool = True
is_logger_enabled: bool = True
is_verbose: bool = True
num_workers: int = 4
n_samples: int = 5
warmup_steps_pct: float = 0.02
decay_steps_pct: float = 0.2
max_n_objects = 3
| 25.677419 | 44 | 0.668342 |
4a24a877cea283a6072e8cd2293b7a56085eb3b2 | 1,553 | py | Python | setup.py | camerondurham/piazza-api | 095ad2fcac5aa90674faba09cbc72205337a536b | [
"MIT"
] | 171 | 2015-01-05T13:33:22.000Z | 2022-03-05T13:42:14.000Z | setup.py | camerondurham/piazza-api | 095ad2fcac5aa90674faba09cbc72205337a536b | [
"MIT"
] | 27 | 2015-01-11T08:30:52.000Z | 2021-09-15T03:36:28.000Z | setup.py | camerondurham/piazza-api | 095ad2fcac5aa90674faba09cbc72205337a536b | [
"MIT"
] | 52 | 2015-02-01T04:19:41.000Z | 2022-02-02T20:18:46.000Z | from __future__ import print_function
import codecs
import os
import re
from setuptools import setup
def read(filename):
"""Read and return `filename` in root dir of project and return string"""
here = os.path.abspath(os.path.dirname(__file__))
return codecs.open(os.path.join(here, filename), 'r').read()
# https://github.com/kennethreitz/requests/blob/master/setup.py#L32
with open('piazza_api/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
install_requires = read("requirements.txt").split()
long_description = read('README.md')
setup(
name='piazza-api',
version=version,
url='http://github.com/hfaran/piazza-api/',
license='MIT License',
author='Hamza Faran',
install_requires=install_requires,
description="Unofficial Client for Piazza's Internal API",
long_description=long_description,
long_description_content_type='text/markdown',
packages=['piazza_api'],
platforms='any',
classifiers = [
'Programming Language :: Python',
'Development Status :: 3 - Alpha',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
"License :: OSI Approved :: MIT License",
'Operating System :: OS Independent',
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 32.354167 | 77 | 0.650998 |
4a24a8d17d57f99d6cdfa445d87ed290c0deb0a8 | 17,451 | py | Python | haystack/preprocessor/utils.py | peterdemin/haystack | 9ec2406a05aac3dc8afab68945a6afc2871bd2a3 | [
"Apache-2.0"
] | 1 | 2021-01-19T00:34:03.000Z | 2021-01-19T00:34:03.000Z | haystack/preprocessor/utils.py | peterdemin/haystack | 9ec2406a05aac3dc8afab68945a6afc2871bd2a3 | [
"Apache-2.0"
] | null | null | null | haystack/preprocessor/utils.py | peterdemin/haystack | 9ec2406a05aac3dc8afab68945a6afc2871bd2a3 | [
"Apache-2.0"
] | null | null | null | import re
import logging
import tarfile
import tempfile
import zipfile
import gzip
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Union, Generator
import json
from farm.data_handler.utils import http_get
from haystack.file_converter.base import BaseConverter
from haystack.file_converter.docx import DocxToTextConverter
from haystack.file_converter.pdf import PDFToTextConverter
from haystack.file_converter.tika import TikaConverter
from haystack import Document, Label
from haystack.file_converter.txt import TextConverter
from haystack.preprocessor.preprocessor import PreProcessor
logger = logging.getLogger(__name__)
def eval_data_from_json(filename: str, max_docs: Union[int, bool] = None, preprocessor: PreProcessor = None) -> Tuple[List[Document], List[Label]]:
"""
Read Documents + Labels from a SQuAD-style file.
Document and Labels can then be indexed to the DocumentStore and be used for evaluation.
:param filename: Path to file in SQuAD format
:param max_docs: This sets the number of documents that will be loaded. By default, this is set to None, thus reading in all available eval documents.
:return: (List of Documents, List of Labels)
"""
docs: List[Document] = []
labels = []
problematic_ids = []
with open(filename, "r", encoding='utf-8') as file:
data = json.load(file)
if "title" not in data["data"][0]:
logger.warning(f"No title information found for documents in QA file: {filename}")
for document in data["data"]:
if max_docs:
if len(docs) > max_docs:
break
# Extracting paragraphs and their labels from a SQuAD document dict
cur_docs, cur_labels, cur_problematic_ids = _extract_docs_and_labels_from_dict(document, preprocessor)
docs.extend(cur_docs)
labels.extend(cur_labels)
problematic_ids.extend(cur_problematic_ids)
if len(problematic_ids) > 0:
logger.warning(f"Could not convert an answer for {len(problematic_ids)} questions.\n"
f"There were conversion errors for question ids: {problematic_ids}")
return docs, labels
def eval_data_from_jsonl(filename: str, batch_size: Optional[int] = None,
max_docs: Union[int, bool] = None, preprocessor: PreProcessor = None) -> Generator[Tuple[List[Document], List[Label]], None, None]:
"""
Read Documents + Labels from a SQuAD-style file in jsonl format, i.e. one document per line.
Document and Labels can then be indexed to the DocumentStore and be used for evaluation.
This is a generator which will yield one tuple per iteration containing a list
of batch_size documents and a list with the documents' labels.
If batch_size is set to None, this method will yield all documents and labels.
:param filename: Path to file in SQuAD format
:param max_docs: This sets the number of documents that will be loaded. By default, this is set to None, thus reading in all available eval documents.
:return: (List of Documents, List of Labels)
"""
docs: List[Document] = []
labels = []
problematic_ids = []
with open(filename, "r", encoding='utf-8') as file:
for document in file:
if max_docs:
if len(docs) > max_docs:
break
# Extracting paragraphs and their labels from a SQuAD document dict
document_dict = json.loads(document)
cur_docs, cur_labels, cur_problematic_ids = _extract_docs_and_labels_from_dict(document_dict, preprocessor)
docs.extend(cur_docs)
labels.extend(cur_labels)
problematic_ids.extend(cur_problematic_ids)
if batch_size is not None:
if len(docs) >= batch_size:
if len(problematic_ids) > 0:
logger.warning(f"Could not convert an answer for {len(problematic_ids)} questions.\n"
f"There were conversion errors for question ids: {problematic_ids}")
yield docs, labels
docs = []
labels = []
problematic_ids = []
yield docs, labels
def _extract_docs_and_labels_from_dict(document_dict: Dict, preprocessor: PreProcessor = None):
docs = []
labels = []
problematic_ids = []
# get all extra fields from document level (e.g. title)
meta_doc = {k: v for k, v in document_dict.items() if k not in ("paragraphs", "title")}
for paragraph in document_dict["paragraphs"]:
## Create Metadata
cur_meta = {"name": document_dict.get("title", None)}
# all other fields from paragraph level
meta_paragraph = {k: v for k, v in paragraph.items() if k not in ("qas", "context")}
cur_meta.update(meta_paragraph)
# meta from parent document
cur_meta.update(meta_doc)
## Create Document
cur_doc = Document(text=paragraph["context"], meta=cur_meta)
if preprocessor is not None:
splits_dicts = preprocessor.process(cur_doc.to_dict())
# we need to pull in _split_id into the document id for unique reference in labels
# todo: PreProcessor should work on Documents instead of dicts
splits = []
offset = 0
for d in splits_dicts:
id = f"{d['id']}-{d['meta']['_split_id']}"
d["meta"]["_split_offset"] = offset
offset += len(d["text"])
# offset correction based on splitting method
if preprocessor.split_by == "word":
offset += 1
elif preprocessor.split_by == "passage":
offset += 2
else:
raise NotImplementedError
mydoc = Document(text=d["text"],
id=id,
meta=d["meta"])
splits.append(mydoc)
else:
splits = [cur_doc]
docs.extend(splits)
## Assign Labels to corresponding documents
for qa in paragraph["qas"]:
if not qa.get("is_impossible", False):
for answer in qa["answers"]:
ans = answer["text"]
ans_position = cur_doc.text[answer["answer_start"]:answer["answer_start"]+len(ans)]
if ans != ans_position:
# do not use answer
problematic_ids.append(qa.get("id","missing"))
break
# find corresponding document or split
if len(splits) == 1:
cur_id = splits[0].id
cur_ans_start = answer["answer_start"]
else:
for s in splits:
# If answer start offset is contained in passage we assign the label to that passage
if (answer["answer_start"] >= s.meta["_split_offset"]) and (answer["answer_start"] < (s.meta["_split_offset"] + len(s.text))):
cur_id = s.id
cur_ans_start = answer["answer_start"] - s.meta["_split_offset"]
# If a document is splitting an answer we add the whole answer text to the document
if s.text[cur_ans_start:cur_ans_start+len(ans)] != ans:
s.text = s.text[:cur_ans_start] + ans
break
label = Label(
question=qa["question"],
answer=ans,
is_correct_answer=True,
is_correct_document=True,
document_id=cur_id,
offset_start_in_doc=cur_ans_start,
no_answer=qa.get("is_impossible", False),
origin="gold_label",
)
labels.append(label)
else:
# for no_answer we need to assign each split as not fitting to the question
for s in splits:
label = Label(
question=qa["question"],
answer="",
is_correct_answer=True,
is_correct_document=True,
document_id=s.id,
offset_start_in_doc=0,
no_answer=qa.get("is_impossible", False),
origin="gold_label",
)
labels.append(label)
return docs, labels, problematic_ids
def convert_files_to_dicts(dir_path: str, clean_func: Optional[Callable] = None, split_paragraphs: bool = False) -> \
List[dict]:
"""
Convert all files(.txt, .pdf, .docx) in the sub-directories of the given path to Python dicts that can be written to a
Document Store.
:param dir_path: path for the documents to be written to the DocumentStore
:param clean_func: a custom cleaning function that gets applied to each doc (input: str, output:str)
:param split_paragraphs: split text in paragraphs.
:return: None
"""
file_paths = [p for p in Path(dir_path).glob("**/*")]
allowed_suffixes = [".pdf", ".txt", ".docx"]
suffix2converter: Dict[str, BaseConverter] = {}
suffix2paths: Dict[str, List[Path]] = {}
for path in file_paths:
file_suffix = path.suffix.lower()
if file_suffix in allowed_suffixes:
if file_suffix not in suffix2paths:
suffix2paths[file_suffix] = []
suffix2paths[file_suffix].append(path)
elif not path.is_dir():
logger.warning('Skipped file {0} as type {1} is not supported here. '
'See haystack.file_converter for support of more file types'.format(path, file_suffix))
# No need to initialize converter if file type not present
for file_suffix in suffix2paths.keys():
if file_suffix == ".pdf":
suffix2converter[file_suffix] = PDFToTextConverter()
if file_suffix == ".txt":
suffix2converter[file_suffix] = TextConverter()
if file_suffix == ".docx":
suffix2converter[file_suffix] = DocxToTextConverter()
documents = []
for suffix, paths in suffix2paths.items():
for path in paths:
logger.info('Converting {}'.format(path))
document = suffix2converter[suffix].convert(file_path=path, meta=None)
text = document["text"]
if clean_func:
text = clean_func(text)
if split_paragraphs:
for para in text.split("\n\n"):
if not para.strip(): # skip empty paragraphs
continue
documents.append({"text": para, "meta": {"name": path.name}})
else:
documents.append({"text": text, "meta": {"name": path.name}})
return documents
def tika_convert_files_to_dicts(
dir_path: str,
clean_func: Optional[Callable] = None,
split_paragraphs: bool = False,
merge_short: bool = True,
merge_lowercase: bool = True
) -> List[dict]:
"""
Convert all files(.txt, .pdf) in the sub-directories of the given path to Python dicts that can be written to a
Document Store.
:param merge_lowercase: allow conversion of merged paragraph to lowercase
:param merge_short: allow merging of short paragraphs
:param dir_path: path for the documents to be written to the DocumentStore
:param clean_func: a custom cleaning function that gets applied to each doc (input: str, output:str)
:param split_paragraphs: split text in paragraphs.
:return: None
"""
converter = TikaConverter()
paths = [p for p in Path(dir_path).glob("**/*")]
allowed_suffixes = [".pdf", ".txt"]
file_paths: List[Path] = []
for path in paths:
file_suffix = path.suffix.lower()
if file_suffix in allowed_suffixes:
file_paths.append(path)
elif not path.is_dir():
logger.warning('Skipped file {0} as type {1} is not supported here. '
'See haystack.file_converter for support of more file types'.format(path, file_suffix))
documents = []
for path in file_paths:
logger.info('Converting {}'.format(path))
document = converter.convert(path)
meta = document["meta"] or {}
meta["name"] = path.name
text = document["text"]
pages = text.split("\f")
if split_paragraphs:
if pages:
paras = pages[0].split("\n\n")
# pop the last paragraph from the first page
last_para = paras.pop(-1) if paras else ''
for page in pages[1:]:
page_paras = page.split("\n\n")
# merge the last paragraph in previous page to the first paragraph in this page
if page_paras:
page_paras[0] = last_para + ' ' + page_paras[0]
last_para = page_paras.pop(-1)
paras += page_paras
if last_para:
paras.append(last_para)
if paras:
last_para = ''
for para in paras:
para = para.strip()
if not para:
continue
# merge paragraphs to improve qa
# merge this paragraph if less than 10 characters or 2 words
# or this paragraph starts with a lower case and last paragraph does not end with a punctuation
if merge_short and len(para) < 10 or len(re.findall(r'\s+', para)) < 2 \
or merge_lowercase and para and para[0].islower() and last_para \
and last_para[-1] not in r'.?!"\'\]\)':
last_para += ' ' + para
else:
if last_para:
documents.append({"text": last_para, "meta": meta})
last_para = para
# don't forget the last one
if last_para:
documents.append({"text": last_para, "meta": meta})
else:
if clean_func:
text = clean_func(text)
documents.append({"text": text, "meta": meta})
return documents
def fetch_archive_from_http(url: str, output_dir: str, proxies: Optional[dict] = None):
"""
Fetch an archive (zip or tar.gz) from a url via http and extract content to an output directory.
:param url: http address
:type url: str
:param output_dir: local path
:type output_dir: str
:param proxies: proxies details as required by requests library
:type proxies: dict
:return: bool if anything got fetched
"""
# verify & prepare local directory
path = Path(output_dir)
if not path.exists():
path.mkdir(parents=True)
is_not_empty = len(list(Path(path).rglob("*"))) > 0
if is_not_empty:
logger.info(
f"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data."
)
return False
else:
logger.info(f"Fetching from {url} to `{output_dir}`")
# download & extract
with tempfile.NamedTemporaryFile() as temp_file:
http_get(url, temp_file, proxies=proxies)
temp_file.flush()
temp_file.seek(0) # making tempfile accessible
# extract
if url[-4:] == ".zip":
zip_archive = zipfile.ZipFile(temp_file.name)
zip_archive.extractall(output_dir)
elif url[-7:] == ".tar.gz":
tar_archive = tarfile.open(temp_file.name)
tar_archive.extractall(output_dir)
elif url[-3:] == ".gz":
filename = url.split("/")[-1].replace(".gz", "")
output_filename = Path(output_dir) / filename
with gzip.open(temp_file.name) as f, open(output_filename, "wb") as output:
for line in f:
output.write(line)
else:
logger.warning('Skipped url {0} as file type is not supported here. '
'See haystack documentation for support of more file types'.format(url))
# temp_file gets deleted here
return True
def squad_json_to_jsonl(squad_file: str, output_file: str):
"""
Converts a SQuAD-json-file into jsonl format with one document per line.
:param squad_file: SQuAD-file in json format.
:type squad_file: str
:param output_file: Name of output file (SQuAD in jsonl format)
:type output_file: str
"""
with open(squad_file, encoding='utf-8') as json_file, open(output_file, "w", encoding='utf-8') as jsonl_file:
squad_json = json.load(json_file)
for doc in squad_json["data"]:
json.dump(doc, jsonl_file)
jsonl_file.write("\n")
| 42.87715 | 156 | 0.571944 |
4a24a90b4cf0d180008feeaa35be1cd02b62f3b2 | 347 | py | Python | presqt/utilities/utils/list_intersection.py | djordjetrajkovic/presqt | 8424b61b1c5b8d29de74c7a333889d9e9eb7aee8 | [
"Apache-2.0"
] | 3 | 2019-01-29T19:45:25.000Z | 2020-12-01T18:24:51.000Z | presqt/utilities/utils/list_intersection.py | djordjetrajkovic/presqt | 8424b61b1c5b8d29de74c7a333889d9e9eb7aee8 | [
"Apache-2.0"
] | 419 | 2018-09-13T23:11:15.000Z | 2021-09-22T17:49:00.000Z | presqt/utilities/utils/list_intersection.py | djordjetrajkovic/presqt | 8424b61b1c5b8d29de74c7a333889d9e9eb7aee8 | [
"Apache-2.0"
] | 2 | 2020-04-10T08:19:41.000Z | 2021-01-04T15:29:42.000Z | def list_intersection(list_one, list_two):
"""
Compares two lists and returns a list of shared values between them.
Parameters
----------
list_one: list
list_two: list
Returns
-------
A list of matching items between the two given lists.
"""
return [entry for entry in list_one if entry in list_two] | 21.6875 | 72 | 0.639769 |
4a24a984151c9e1682063447e1b0c07fe75d1054 | 21,832 | py | Python | detectron2/data/transforms/augmentation_impl.py | makman7/detectron2 | c8322e53fc61dacec7ce461886e66cf1a4545dae | [
"Apache-2.0"
] | null | null | null | detectron2/data/transforms/augmentation_impl.py | makman7/detectron2 | c8322e53fc61dacec7ce461886e66cf1a4545dae | [
"Apache-2.0"
] | null | null | null | detectron2/data/transforms/augmentation_impl.py | makman7/detectron2 | c8322e53fc61dacec7ce461886e66cf1a4545dae | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Implement many useful :class:`Augmentation`.
"""
import numpy as np
import sys
from typing import Tuple
from fvcore.transforms.transform import (
BlendTransform,
CropTransform,
HFlipTransform,
NoOpTransform,
PadTransform,
Transform,
TransformList,
VFlipTransform,
)
from PIL import Image
from .augmentation import Augmentation, _transform_to_aug
from .transform import ExtentTransform, ResizeTransform, RotationTransform
__all__ = [
"FixedSizeCrop",
"RandomApply",
"RandomBrightness",
"RandomContrast",
"RandomCrop",
"RandomExtent",
"RandomFlip",
"RandomSaturation",
"RandomLighting",
"RandomRotation",
"Resize",
"ResizeScale",
"ResizeShortestEdge",
"RandomCrop_CategoryAreaConstraint",
]
class RandomApply(Augmentation):
"""
Randomly apply an augmentation with a given probability.
"""
def __init__(self, tfm_or_aug, prob=0.5):
"""
Args:
tfm_or_aug (Transform, Augmentation): the transform or augmentation
to be applied. It can either be a `Transform` or `Augmentation`
instance.
prob (float): probability between 0.0 and 1.0 that
the wrapper transformation is applied
"""
super().__init__()
self.aug = _transform_to_aug(tfm_or_aug)
assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})"
self.prob = prob
def get_transform(self, *args):
do = self._rand_range() < self.prob
if do:
return self.aug.get_transform(*args)
else:
return NoOpTransform()
def __call__(self, aug_input):
do = self._rand_range() < self.prob
if do:
return self.aug(aug_input)
else:
return NoOpTransform()
class RandomFlip(Augmentation):
"""
Flip the image horizontally or vertically with the given probability.
"""
def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
"""
Args:
prob (float): probability of flip.
horizontal (boolean): whether to apply horizontal flipping
vertical (boolean): whether to apply vertical flipping
"""
super().__init__()
if horizontal and vertical:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horizontal and not vertical:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
do = self._rand_range() < self.prob
if do:
if self.horizontal:
return HFlipTransform(w)
elif self.vertical:
return VFlipTransform(h)
else:
return NoOpTransform()
class Resize(Augmentation):
"""Resize image to a fixed target size"""
def __init__(self, shape, interp=Image.BILINEAR):
"""
Args:
shape: (h, w) tuple or a int
interp: PIL interpolation method
"""
if isinstance(shape, int):
shape = (shape, shape)
shape = tuple(shape)
self._init(locals())
def get_transform(self, image):
return ResizeTransform(
image.shape[0], image.shape[1], self.shape[0], self.shape[1], self.interp
)
class ResizeShortestEdge(Augmentation):
"""
Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
def __init__(
self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR
):
"""
Args:
short_edge_length (list[int]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the shortest edge length.
If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
max_size (int): maximum allowed longest edge length.
sample_style (str): either "range" or "choice".
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
if self.is_range:
assert len(short_edge_length) == 2, (
"short_edge_length must be two values using 'range' sample style."
f" Got {short_edge_length}!"
)
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
if self.is_range:
size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
else:
size = np.random.choice(self.short_edge_length)
if size == 0:
return NoOpTransform()
scale = size * 1.0 / min(h, w)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
if max(newh, neww) > self.max_size:
scale = self.max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return ResizeTransform(h, w, newh, neww, self.interp)
class ResizeScale(Augmentation):
"""
Takes target size as input and randomly scales the given target size between `min_scale`
and `max_scale`. It then scales the input image such that it fits inside the scaled target
box, keeping the aspect ratio constant.
This implements the resize part of the Google's 'resize_and_crop' data augmentation:
https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/input_utils.py#L127
"""
def __init__(
self,
min_scale: float,
max_scale: float,
target_height: int,
target_width: int,
interp: int = Image.BILINEAR,
):
"""
Args:
min_scale: minimum image scale range.
max_scale: maximum image scale range.
target_height: target image height.
target_width: target image width.
interp: image interpolation method.
"""
super().__init__()
self._init(locals())
def get_transform(self, image: np.ndarray) -> Transform:
# Compute the image scale and scaled size.
input_size = image.shape[:2]
output_size = (self.target_height, self.target_width)
random_scale = np.random.uniform(self.min_scale, self.max_scale)
random_scale_size = np.multiply(output_size, random_scale)
scale = np.minimum(
random_scale_size[0] / input_size[0], random_scale_size[1] / input_size[1]
)
scaled_size = np.round(np.multiply(input_size, scale)).astype(int)
return ResizeTransform(
input_size[0], input_size[1], scaled_size[0], scaled_size[1], self.interp
)
class RandomRotation(Augmentation):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around the given center.
"""
def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None):
"""
Args:
angle (list[float]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the angle (in degrees).
If ``sample_style=="choice"``, a list of angles to sample from
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (list[[float, float]]): If ``sample_style=="range"``,
a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,
[0, 0] being the top left of the image and [1, 1] the bottom right.
If ``sample_style=="choice"``, a list of centers to sample from
Default: None, which means that the center of rotation is the center of the image
center has no effect if expand=True because it only affects shifting
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(angle, (float, int)):
angle = (angle, angle)
if center is not None and isinstance(center[0], (float, int)):
center = (center, center)
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
center = None
if self.is_range:
angle = np.random.uniform(self.angle[0], self.angle[1])
if self.center is not None:
center = (
np.random.uniform(self.center[0][0], self.center[1][0]),
np.random.uniform(self.center[0][1], self.center[1][1]),
)
else:
angle = np.random.choice(self.angle)
if self.center is not None:
center = np.random.choice(self.center)
if center is not None:
center = (w * center[0], h * center[1]) # Convert to absolute coordinates
if angle % 360 == 0:
return NoOpTransform()
return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)
class FixedSizeCrop(Augmentation):
"""
If `crop_size` is smaller than the input image size, then it uses a random crop of
the crop size. If `crop_size` is larger than the input image size, then it pads
the right and the bottom of the image to the crop size.
"""
def __init__(self, crop_size: Tuple[int], pad_value: float = 128.0):
"""
Args:
crop_size: target image (height, width).
pad_value: the padding value.
"""
super().__init__()
self._init(locals())
def get_transform(self, image: np.ndarray) -> TransformList:
# Compute the image scale and scaled size.
input_size = image.shape[:2]
output_size = self.crop_size
# Add random crop if the image is scaled up.
max_offset = np.subtract(input_size, output_size)
max_offset = np.maximum(max_offset, 0)
offset = np.multiply(max_offset, np.random.uniform(0.0, 1.0))
offset = np.round(offset).astype(int)
crop_transform = CropTransform(
offset[1], offset[0], output_size[1], output_size[0], input_size[1], input_size[0]
)
# Add padding if the image is scaled down.
pad_size = np.subtract(output_size, input_size)
pad_size = np.maximum(pad_size, 0)
original_size = np.minimum(input_size, output_size)
pad_transform = PadTransform(
0, 0, pad_size[1], pad_size[0], original_size[1], original_size[0], self.pad_value
)
return TransformList([crop_transform, pad_transform])
class RandomCrop(Augmentation):
"""
Randomly crop a rectangle region out of an image.
"""
def __init__(self, crop_type: str, crop_size):
"""
Args:
crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range".
crop_size (tuple[float, float]): two floats, explained below.
- "relative": crop a (H * crop_size[0], W * crop_size[1]) region from an input image of
size (H, W). crop size should be in (0, 1]
- "relative_range": uniformly sample two values from [crop_size[0], 1]
and [crop_size[1]], 1], and use them as in "relative" crop type.
- "absolute" crop a (crop_size[0], crop_size[1]) region from input image.
crop_size must be smaller than the input image size.
- "absolute_range", for an input of size (H, W), uniformly sample H_crop in
[crop_size[0], min(H, crop_size[1])] and W_crop in [crop_size[0], min(W, crop_size[1])].
Then crop a region (H_crop, W_crop).
"""
# TODO style of relative_range and absolute_range are not consistent:
# one takes (h, w) but another takes (min, max)
super().__init__()
assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"]
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
croph, cropw = self.get_crop_size((h, w))
assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
h0 = np.random.randint(h - croph + 1)
w0 = np.random.randint(w - cropw + 1)
return CropTransform(w0, h0, cropw, croph)
def get_crop_size(self, image_size):
"""
Args:
image_size (tuple): height, width
Returns:
crop_size (tuple): height, width in absolute pixels
"""
h, w = image_size
if self.crop_type == "relative":
ch, cw = self.crop_size
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "relative_range":
crop_size = np.asarray(self.crop_size, dtype=np.float32)
ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "absolute":
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
elif self.crop_type == "absolute_range":
assert self.crop_size[0] <= self.crop_size[1]
ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1)
cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1)
return ch, cw
else:
NotImplementedError("Unknown crop type {}".format(self.crop_type))
class RandomCrop_CategoryAreaConstraint(Augmentation):
"""
Similar to :class:`RandomCrop`, but find a cropping window such that no single category
occupies a ratio of more than `single_category_max_area` in semantic segmentation ground
truth, which can cause unstability in training. The function attempts to find such a valid
cropping window for at most 10 times.
"""
def __init__(
self,
crop_type: str,
crop_size,
single_category_max_area: float = 1.0,
ignored_category: int = None,
):
"""
Args:
crop_type, crop_size: same as in :class:`RandomCrop`
single_category_max_area: the maximum allowed area ratio of a
category. Set to 1.0 to disable
ignored_category: allow this category in the semantic segmentation
ground truth to exceed the area ratio. Usually set to the category
that's ignored in training.
"""
self.crop_aug = RandomCrop(crop_type, crop_size)
self._init(locals())
def get_transform(self, image, sem_seg):
if self.single_category_max_area >= 1.0:
return self.crop_aug.get_transform(image)
else:
h, w = sem_seg.shape
for _ in range(10):
crop_size = self.crop_aug.get_crop_size((h, w))
y0 = np.random.randint(h - crop_size[0] + 1)
x0 = np.random.randint(w - crop_size[1] + 1)
sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]]
labels, cnt = np.unique(sem_seg_temp, return_counts=True)
if self.ignored_category is not None:
cnt = cnt[labels != self.ignored_category]
if len(cnt) > 1 and np.max(cnt) < np.sum(cnt) * self.single_category_max_area:
break
crop_tfm = CropTransform(x0, y0, crop_size[1], crop_size[0])
return crop_tfm
class RandomExtent(Augmentation):
"""
Outputs an image by cropping a random "subrect" of the source image.
The subrect can be parameterized to include pixels outside the source image,
in which case they will be set to zeros (i.e. black). The size of the output
image will vary with the size of the random subrect.
"""
def __init__(self, scale_range, shift_range):
"""
Args:
output_size (h, w): Dimensions of output image
scale_range (l, h): Range of input-to-output size scaling factor
shift_range (x, y): Range of shifts of the cropped subrect. The rect
is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)],
where (w, h) is the (width, height) of the input image. Set each
component to zero to crop at the image's center.
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
img_h, img_w = image.shape[:2]
# Initialize src_rect to fit the input image.
src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])
# Apply a random scaling to the src_rect.
src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1])
# Apply a random shift to the coordinates origin.
src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5)
src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5)
# Map src_rect coordinates into image coordinates (center at corner).
src_rect[0::2] += 0.5 * img_w
src_rect[1::2] += 0.5 * img_h
return ExtentTransform(
src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]),
output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])),
)
class RandomContrast(Augmentation):
"""
Randomly transforms image contrast.
Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce contrast
- intensity = 1 will preserve the input image
- intensity > 1 will increase contrast
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min=.8, intensity_max=1.2):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=image.mean(), src_weight=1 - w, dst_weight=w)
class RandomBrightness(Augmentation):
"""
Randomly transforms image brightness.
Brightness intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce brightness
- intensity = 1 will preserve the input image
- intensity > 1 will increase brightness
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min=.8, intensity_max=1.2):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w)
class RandomSaturation(Augmentation):
"""
Randomly transforms saturation of an RGB image.
Input images are assumed to have 'RGB' channel order.
Saturation intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce saturation (make the image more grayscale)
- intensity = 1 will preserve the input image
- intensity > 1 will increase saturation
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min=.8, intensity_max=1.2):
"""
Args:
intensity_min (float): Minimum augmentation (1 preserves input).
intensity_max (float): Maximum augmentation (1 preserves input).
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
assert image.shape[-1] == 3, "RandomSaturation only works on RGB images"
w = np.random.uniform(self.intensity_min, self.intensity_max)
grayscale = image.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]
return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
class RandomLighting(Augmentation):
"""
The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet.
Input images are assumed to have 'RGB' channel order.
The degree of color jittering is randomly sampled via a normal distribution,
with standard deviation given by the scale parameter.
"""
def __init__(self, scale=2):
"""
Args:
scale (float): Standard deviation of principal component weighting.
"""
super().__init__()
self._init(locals())
self.eigen_vecs = np.array(
[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]
)
self.eigen_vals = np.array([0.2175, 0.0188, 0.0045])
def get_transform(self, image):
assert image.shape[-1] == 3, "RandomLighting only works on RGB images"
weights = np.random.normal(scale=self.scale, size=3)
return BlendTransform(
src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0
)
| 37.641379 | 101 | 0.609381 |
4a24a9f4ece6ba3267856a3860450b1a1cebe03a | 787 | py | Python | newsCrawl/fakeNews/actionfakeNews/migrations/0001_initial.py | ARIF-KHAN-420/Fake_News | acfbffcce454afc09c4a7b06205c1a632c11f822 | [
"MIT"
] | 1 | 2022-01-03T17:54:03.000Z | 2022-01-03T17:54:03.000Z | newsCrawl/fakeNews/actionfakeNews/migrations/0001_initial.py | arifkhan-silicornya/Fake_News | acfbffcce454afc09c4a7b06205c1a632c11f822 | [
"MIT"
] | null | null | null | newsCrawl/fakeNews/actionfakeNews/migrations/0001_initial.py | arifkhan-silicornya/Fake_News | acfbffcce454afc09c4a7b06205c1a632c11f822 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-11 20:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Authenticate_NEWS',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('link', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Fake_NEWS',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('link', models.CharField(max_length=500)),
],
),
]
| 27.137931 | 117 | 0.564168 |
4a24aa64073794a890b9caf311aa130ef19b30c8 | 1,509 | py | Python | backend/machinelearning/api.py | iloveyii/sdg-project | 02a53a0b10d36659410f045e700ad1931de2ffa9 | [
"MIT"
] | 1 | 2020-02-12T10:44:11.000Z | 2020-02-12T10:44:11.000Z | backend/machinelearning/api.py | iloveyii/sdg-project | 02a53a0b10d36659410f045e700ad1931de2ffa9 | [
"MIT"
] | 6 | 2021-03-10T07:33:51.000Z | 2022-02-27T10:28:13.000Z | backend/machinelearning/api.py | iloveyii/sdg-project | 02a53a0b10d36659410f045e700ad1931de2ffa9 | [
"MIT"
] | null | null | null | from flask import Flask
from flask import request
from flask_wtf.csrf import CSRFProtect
from flask_restful import Resource, Api
from ml import MachineLearning
from datetime import datetime
app = Flask(__name__)
csrf = CSRFProtect(app)
api = Api(app)
dt = datetime.now()
gl = {'server_start_ts': dt.microsecond}
class Product(Resource):
@csrf.exempt
def get(self):
global gl
plots = []
# return gl
file_id = request.args.get('id')
ch1 = request.args.get('ch1')
ch2 = request.args.get('ch2')
transformation = request.args.get('transformation')
bins = request.args.get('bins')
gl = {'server_start_ts': dt.microsecond, 'id': file_id, 'ch1': ch1, 'bins': bins,
'transformation': transformation}
if not file_id or not ch1 or not ch2:
file_id = 'default'
ch1 = 'HDR-T'
ch2 = 'FSC-A'
print('ML Default FCS and chs', ch1, ch2)
else:
print('ML RECEIVED FCS and chs', file_id, ch1, ch2)
try:
ml = MachineLearning(file_id, ch1, ch2, transformation, bins)
plots = ml.get_plots()
gl.update(plots)
except Exception as inst:
print('Err', inst)
return plots
api.add_resource(Product, '/')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
# May have error in container
# ssh to container
# pip3 uninstall bottleneck
# pip3 install bottleneck==1.2
| 28.471698 | 89 | 0.611001 |
4a24aa6fa4acd2117f66b3096e2d24335e4c7abe | 414 | py | Python | data/external/repositories/241493/Kaggle-Whats-cooking-master/jsonToCsv.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/241493/Kaggle-Whats-cooking-master/jsonToCsv.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/241493/Kaggle-Whats-cooking-master/jsonToCsv.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | import json
from pprint import pprint
fw = open('traindata.csv','w')
with open('train.json') as data_file:
data = json.load(data_file)
for i in range(len(data)) :
s = str(data[i]['id']) + "\t" + str(data[i]['cuisine']) + "\t"
ingd = data[i]['ingredients']
for j in ingd :
j = str(filter(lambda x:ord(x)>31 and ord(x)<128,j))
s += str(j).strip().replace("-","") + " "
fw.write(str(s).strip()+"\n")
| 24.352941 | 63 | 0.589372 |
4a24ab62c2b170753b6504b4d0ea460fec2f375c | 43,604 | py | Python | rmgpy/rmg/pdep.py | CanePan-cc/CanePanWorkshop | 349a4af759cf8877197772cd7eaca1e51d46eff5 | [
"MIT"
] | null | null | null | rmgpy/rmg/pdep.py | CanePan-cc/CanePanWorkshop | 349a4af759cf8877197772cd7eaca1e51d46eff5 | [
"MIT"
] | null | null | null | rmgpy/rmg/pdep.py | CanePan-cc/CanePanWorkshop | 349a4af759cf8877197772cd7eaca1e51d46eff5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green ([email protected]), #
# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
Contains classes for providing pressure-dependent kinetics estimation
functionality to RMG.
"""
import logging
import os.path
import mpmath as mp
import numpy as np
import scipy.optimize as opt
import rmgpy.pdep.network
import rmgpy.reaction
from rmgpy.constants import R
from rmgpy.data.kinetics.library import LibraryReaction
from rmgpy.exceptions import PressureDependenceError, NetworkError
from rmgpy.pdep import Configuration
from rmgpy.rmg.react import react_species
from rmgpy.statmech import Conformer
################################################################################
class PDepReaction(rmgpy.reaction.Reaction):
def __init__(self,
index=-1,
label='',
reactants=None,
products=None,
specific_collider=None,
network=None,
kinetics=None,
network_kinetics=None,
reversible=True,
transition_state=None,
duplicate=False,
degeneracy=1,
pairs=None
):
rmgpy.reaction.Reaction.__init__(self,
index=index,
label=label,
reactants=reactants,
products=products,
specific_collider=specific_collider,
kinetics=kinetics,
network_kinetics=network_kinetics,
reversible=reversible,
transition_state=transition_state,
duplicate=duplicate,
degeneracy=degeneracy,
pairs=pairs
)
self.network = network
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (PDepReaction, (self.index,
self.label,
self.reactants,
self.products,
self.specific_collider,
self.network,
self.kinetics,
self.reversible,
self.transition_state,
self.duplicate,
self.degeneracy,
self.pairs
))
def get_source(self):
"""
Get the source of this PDepReaction
"""
return str(self.network)
################################################################################
class PDepNetwork(rmgpy.pdep.network.Network):
"""
A representation of a *partial* unimolecular reaction network. Each partial
network has a single `source` isomer or reactant channel, and is responsible
only for :math:`k(T,P)` values for net reactions with source as the
reactant. Multiple partial networks can have the same source, but networks
with the same source and any explored isomers must be combined.
=================== ======================= ================================
Attribute Type Description
=================== ======================= ================================
`source` ``list`` The isomer or reactant channel that acts as the source
`explored` ``list`` A list of the unimolecular isomers whose reactions have been fully explored
=================== ======================= ================================
"""
def __init__(self, index=-1, source=None):
rmgpy.pdep.network.Network.__init__(self, label="PDepNetwork #{0}".format(index))
self.index = index
self.source = source
self.explored = []
def __str__(self):
return "PDepNetwork #{0}".format(self.index)
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (PDepNetwork, (self.index, self.source), self.__dict__)
def __setstate__(self, dict):
self.__dict__.update(dict)
def cleanup(self):
"""
Delete intermedate arrays used to compute k(T,P) values.
"""
for isomer in self.isomers:
isomer.cleanup()
for reactant in self.reactants:
reactant.cleanup()
for product in self.products:
product.cleanup()
self.e_list = None
self.j_list = None
self.dens_states = None
self.coll_freq = None
self.Mcoll = None
self.Kij = None
self.Fim = None
self.Gnj = None
self.E0 = None
self.n_grains = 0
self.n_j = 0
self.K = None
self.p0 = None
def get_leak_coefficient(self, T, P):
"""
Return the pressure-dependent rate coefficient :math:`k(T,P)` describing
the total rate of "leak" from this network. This is defined as the sum
of the :math:`k(T,P)` values for all net reactions to nonexplored
unimolecular isomers.
"""
k = 0.0
if len(self.net_reactions) == 0 and len(self.path_reactions) == 1:
# The network is of the form A + B -> C* (with C* nonincluded)
# For this special case we use the high-pressure limit k(T) to
# ensure that we're estimating the total leak flux
rxn = self.path_reactions[0]
if rxn.kinetics is None:
if rxn.reverse.kinetics is not None:
rxn = rxn.reverse
else:
raise PressureDependenceError('Path reaction {0} with no high-pressure-limit kinetics encountered '
'in PDepNetwork #{1:d} while evaluating leak flux.'.format(rxn, self.index))
if rxn.products is self.source:
k = rxn.get_rate_coefficient(T, P) / rxn.get_equilibrium_constant(T)
else:
k = rxn.get_rate_coefficient(T, P)
else:
# The network has at least one included isomer, so we can calculate
# the leak flux normally
for rxn in self.net_reactions:
if len(rxn.products) == 1 and rxn.products[0] not in self.explored:
k += rxn.get_rate_coefficient(T, P)
return k
def get_maximum_leak_species(self, T, P):
"""
Get the unexplored (unimolecular) isomer with the maximum leak flux.
Note that the leak rate coefficients vary with temperature and
pressure, so you must provide these in order to get a meaningful result.
"""
# Choose species with maximum leak flux
max_k = 0.0
max_species = None
if len(self.net_reactions) == 0 and len(self.path_reactions) == 1:
max_k = self.get_leak_coefficient(T, P)
rxn = self.path_reactions[0]
if rxn.products == self.source:
assert len(rxn.reactants) == 1
max_species = rxn.reactants[0]
else:
assert len(rxn.products) == 1
max_species = rxn.products[0]
else:
for rxn in self.net_reactions:
if len(rxn.products) == 1 and rxn.products[0] not in self.explored:
k = rxn.get_rate_coefficient(T, P)
if max_species is None or k > max_k:
max_species = rxn.products[0]
max_k = k
# Make sure we've identified a species
if max_species is None:
raise NetworkError('No unimolecular isomers left to explore!')
# Return the species
return max_species
def get_leak_branching_ratios(self, T, P):
"""
Return a dict with the unexplored isomers in the partial network as the
keys and the fraction of the total leak coefficient as the values.
"""
ratios = {}
if len(self.net_reactions) == 0 and len(self.path_reactions) == 1:
rxn = self.path_reactions[0]
assert rxn.reactants == self.source or rxn.products == self.source
if rxn.products == self.source:
assert len(rxn.reactants) == 1
ratios[rxn.reactants[0]] = 1.0
else:
assert len(rxn.products) == 1
ratios[rxn.products[0]] = 1.0
else:
for rxn in self.net_reactions:
if len(rxn.products) == 1 and rxn.products[0] not in self.explored:
ratios[rxn.products[0]] = rxn.get_rate_coefficient(T, P)
kleak = sum(ratios.values())
for spec in ratios:
ratios[spec] /= kleak
return ratios
def explore_isomer(self, isomer):
"""
Explore a previously-unexplored unimolecular `isomer` in this partial
network using the provided core-edge reaction model `reaction_model`,
returning the new reactions and new species.
"""
if isomer in self.explored:
logging.warning('Already explored isomer {0} in pressure-dependent network #{1:d}'.format(isomer,
self.index))
return []
assert isomer not in self.source, "Attempted to explore isomer {0}, but that is the source configuration for this network.".format(isomer)
for product in self.products:
if product.species == [isomer]:
break
else:
raise Exception('Attempted to explore isomer {0}, but that species not found in product channels.'.format(isomer))
logging.info('Exploring isomer {0} in pressure-dependent network #{1:d}'.format(isomer, self.index))
for mol in isomer.molecule:
mol.update()
self.explored.append(isomer)
self.isomers.append(product)
self.products.remove(product)
# Find reactions involving the found species as unimolecular
# reactant or product (e.g. A <---> products)
# Don't find reactions involving the new species as bimolecular
# reactants or products with itself (e.g. A + A <---> products)
# Don't find reactions involving the new species as bimolecular
# reactants or products with other core species (e.g. A + B <---> products)
new_reactions = react_species((isomer,))
return new_reactions
def add_path_reaction(self, newReaction):
"""
Add a path reaction to the network. If the path reaction already exists,
no action is taken.
"""
# Add this reaction to that network if not already present
found = False
for rxn in self.path_reactions:
if newReaction.reactants == rxn.reactants and newReaction.products == rxn.products:
found = True
break
elif newReaction.products == rxn.reactants and newReaction.reactants == rxn.products:
found = True
break
if not found:
self.path_reactions.append(newReaction)
self.invalidate()
def get_energy_filtered_reactions(self, T, tol):
"""
Returns a list of products and isomers that are greater in Free Energy
than a*R*T + Gfsource(T)
"""
dE = tol * R * T
for conf in self.isomers + self.products + self.reactants:
if len(conf.species) == len(self.source):
if len(self.source) == 1:
if self.source[0].is_isomorphic(conf.species[0]):
E0source = conf.E0
break
elif len(self.source) == 2:
boo00 = self.source[0].is_isomorphic(conf.species[0])
boo01 = self.source[0].is_isomorphic(conf.species[1])
if boo00 or boo01: # if we found source[0]
boo10 = self.source[1].is_isomorphic(conf.species[0])
boo11 = self.source[1].is_isomorphic(conf.species[1])
if (boo00 and boo11) or (boo01 and boo10):
E0source = conf.E0
break
else:
raise ValueError('No isomer, product or reactant channel is isomorphic to the source')
filtered_rxns = []
for rxn in self.path_reactions:
E0 = rxn.transition_state.conformer.E0.value_si
if E0 - E0source > dE:
filtered_rxns.append(rxn)
return filtered_rxns
def get_rate_filtered_products(self, T, P, tol):
"""
determines the set of path_reactions that have fluxes less than
tol at steady state where all A => B + C reactions are irreversible
and there is a constant flux from/to the source configuration of 1.0
"""
c = self.solve_ss_network(T, P)
isomer_spcs = [iso.species[0] for iso in self.isomers]
filtered_prod = []
if c is not None:
for rxn in self.net_reactions:
val = 0.0
val2 = 0.0
if rxn.reactants[0] in isomer_spcs:
ind = isomer_spcs.index(rxn.reactants[0])
kf = rxn.get_rate_coefficient(T, P)
val = kf * c[ind]
if rxn.products[0] in isomer_spcs:
ind2 = isomer_spcs.index(rxn.products[0])
kr = rxn.get_rate_coefficient(T, P) / rxn.get_equilibrium_constant(T)
val2 = kr * c[ind2]
if max(val, val2) < tol:
filtered_prod.append(rxn.products)
return filtered_prod
else:
logging.warning("Falling back flux reduction from Steady State analysis to rate coefficient analysis")
ks = np.array([rxn.get_rate_coefficient(T, P) for rxn in self.net_reactions])
frs = ks / ks.sum()
inds = [i for i in range(len(frs)) if frs[i] < tol]
filtered_prod = [self.net_reactions[i].products for i in inds]
return filtered_prod
def solve_ss_network(self, T, P):
"""
calculates the steady state concentrations if all A => B + C
reactions are irreversible and the flux from/to the source
configuration is 1.0
"""
A = np.zeros((len(self.isomers), len(self.isomers)))
b = np.zeros(len(self.isomers))
bimolecular = len(self.source) > 1
isomer_spcs = [iso.species[0] for iso in self.isomers]
for rxn in self.net_reactions:
if rxn.reactants[0] in isomer_spcs:
ind = isomer_spcs.index(rxn.reactants[0])
kf = rxn.get_rate_coefficient(T, P)
A[ind, ind] -= kf
else:
ind = None
if rxn.products[0] in isomer_spcs:
ind2 = isomer_spcs.index(rxn.products[0])
kr = rxn.get_rate_coefficient(T, P) / rxn.get_equilibrium_constant(T)
A[ind2, ind2] -= kr
else:
ind2 = None
if ind is not None and ind2 is not None:
A[ind, ind2] += kr
A[ind2, ind] += kf
if bimolecular:
if rxn.reactants[0] == self.source:
kf = rxn.get_rate_coefficient(T, P)
b[ind2] += kf
elif rxn.products[0] == self.source:
kr = rxn.get_rate_coefficient(T, P) / rxn.get_equilibrium_constant(T)
b[ind] += kr
if not bimolecular:
ind = isomer_spcs.index(self.source[0])
b[ind] = -1.0 # flux at source
else:
b = -b / b.sum() # 1.0 flux from source
if len(b) == 1:
return np.array([b[0] / A[0, 0]])
con = np.linalg.cond(A)
if np.log10(con) < 15:
c = np.linalg.solve(A, b)
else:
logging.warning("Matrix Ill-conditioned, attempting to use Arbitrary Precision Arithmetic")
mp.dps = 30 + int(np.log10(con))
Amp = mp.matrix(A.tolist())
bmp = mp.matrix(b.tolist())
try:
c = mp.qr_solve(Amp, bmp)
c = np.array(list(c[0]))
if any(c <= 0.0):
c, rnorm = opt.nnls(A, b)
c = c.astype(np.float64)
except: # fall back to raw flux analysis rather than solve steady state problem
return None
if np.isnan(c).any():
return None
return c
def remove_disconnected_reactions(self):
"""
gets rid of reactions/isomers/products not connected to the source by a reaction sequence
"""
kept_reactions = []
kept_products = [self.source]
incomplete = True
while incomplete:
s = len(kept_reactions)
for rxn in self.path_reactions:
if not rxn in kept_reactions:
if rxn.reactants in kept_products:
kept_products.append(rxn.products)
kept_reactions.append(rxn)
elif rxn.products in kept_products:
kept_products.append(rxn.reactants)
kept_reactions.append(rxn)
incomplete = s != len(kept_reactions)
logging.info('Removing disconnected items')
for rxn in self.path_reactions:
if rxn not in kept_reactions:
logging.info('Removing rxn: {}'.format(rxn))
self.path_reactions.remove(rxn)
nrxns = []
for nrxn in self.net_reactions:
if nrxn.products not in kept_products or nrxn.reactants not in kept_products:
logging.info('Removing net rxn: {}'.format(nrxn))
else:
logging.info('Keeping net rxn: {}'.format(nrxn))
nrxns.append(nrxn)
self.net_reactions = nrxns
prods = []
for prod in self.products:
if prod.species not in kept_products:
logging.info('Removing product: {}'.format(prod))
else:
logging.info("Keeping product: {}".format(prod))
prods.append(prod)
self.products = prods
rcts = []
for rct in self.reactants:
if rct.species not in kept_products:
logging.info('Removing product: {}'.format(rct))
else:
logging.info("Keeping product: {}".format(rct))
rcts.append(rct)
self.reactants = rcts
isos = []
for iso in self.isomers:
if iso.species not in kept_products:
logging.info('Removing isomer: {}'.format(iso))
else:
logging.info("Keeping isomer: {}".format(iso))
isos.append(iso)
self.isomers = isos
self.explored = [iso.species[0] for iso in isos]
self.n_isom = len(self.isomers)
self.n_reac = len(self.reactants)
self.n_prod = len(self.products)
def remove_reactions(self, reaction_model, rxns=None, prods=None):
"""
removes a list of reactions from the network and all reactions/products
left disconnected by removing those reactions
"""
if rxns:
for rxn in rxns:
self.path_reactions.remove(rxn)
if prods:
isomers = [x.species[0] for x in self.isomers]
for prod in prods:
prod = [x for x in prod]
if prod[0] in isomers: # skip isomers
continue
for rxn in self.path_reactions:
if rxn.products == prod or rxn.reactants == prod:
self.path_reactions.remove(rxn)
prodspc = [x[0] for x in prods]
for prod in prods:
prod = [x for x in prod]
if prod[0] in isomers: # deal with isomers
for rxn in self.path_reactions:
if rxn.reactants == prod and rxn.products[0] not in isomers and rxn.products[0] not in prodspc:
break
if rxn.products == prod and rxn.reactants[0] not in isomers and rxn.reactants not in prodspc:
break
else:
for rxn in self.path_reactions:
if rxn.reactants == prod or rxn.products == prod:
self.path_reactions.remove(rxn)
self.remove_disconnected_reactions()
self.cleanup()
self.invalidate()
assert self.path_reactions != [], 'Reduction process removed all reactions, cannot update network with no reactions'
reaction_model.update_unimolecular_reaction_networks()
if reaction_model.pressure_dependence.output_file:
path = os.path.join(reaction_model.pressure_dependence.output_file, 'pdep')
for name in os.listdir(path): # remove the old reduced file
if name.endswith('reduced.py'):
os.remove(os.path.join(path, name))
for name in os.listdir(path): # find the new file and name it network_reduced.py
if not name.endswith('full.py'):
os.rename(os.path.join(path, name), os.path.join(path, 'network_reduced.py'))
def merge(self, other):
"""
Merge the partial network `other` into this network.
"""
# Make sure the two partial networks have the same source configuration
assert self.source == other.source
# Merge isomers
for isomer in other.isomers:
if isomer not in self.isomers:
self.isomers.append(isomer)
# Merge explored
for isomer in other.explored:
if isomer not in self.explored:
self.explored.append(isomer)
# Merge reactants
for reactants in other.reactants:
if reactants not in self.reactants:
self.reactants.append(reactants)
# Merge products
for products in other.products:
if products not in self.products:
self.products.append(products)
# However, products that have been explored are actually isomers
# These should be removed from the list of products!
products_to_remove = []
for products in self.products:
if len(products.species) == 1 and products.species[0] in self.isomers:
products_to_remove.append(products)
for products in products_to_remove:
self.products.remove(products)
# Merge path reactions
for reaction in other.path_reactions:
found = False
for rxn in self.path_reactions:
if reaction.reactants == rxn.reactants and reaction.products == rxn.products:
# NB the isEquivalent() method that used to be on the previous line also checked reverse direction.
# I am not sure which is appropriate
found = True
break
if not found:
self.path_reactions.append(reaction)
# Also merge net reactions (so that when we update the network in the
# future, we update the existing net reactions rather than making new ones)
# Q: What to do when a net reaction exists in both networks being merged?
for reaction in other.net_reactions:
found = False
for rxn in self.net_reactions:
if reaction.reactants == rxn.reactants and reaction.products == rxn.products:
# NB the isEquivalent() method that used to be on the previous line also checked reverse direction.
# I am not sure which is appropriate
found = True
break
if not found:
self.net_reactions.append(reaction)
# Mark this network as invalid
self.valid = False
def update_configurations(self, reaction_model):
"""
Sort the reactants and products of each of the network's path reactions
into isomers, reactant channels, and product channels. You must pass
the current `reaction_model` because some decisions on sorting are made
based on which species are in the model core.
"""
reactants = []
products = []
# All explored species are isomers
isomers = self.explored[:]
# The source configuration is an isomer (if unimolecular) or a reactant channel (if bimolecular)
if len(self.source) == 1:
# The source is a unimolecular isomer
if self.source[0] not in isomers: isomers.insert(0, self.source[0])
else:
# The source is a bimolecular reactant channel
self.source.sort()
reactants.append(self.source)
# Iterate over path reactions and make sure each set of reactants and products is classified
for rxn in self.path_reactions:
# Sort bimolecular configurations so that we always encounter them in the
# same order
# The actual order doesn't matter, as long as it is consistent
rxn.reactants.sort()
rxn.products.sort()
# Reactants of the path reaction
if len(rxn.reactants) == 1 and rxn.reactants[0] not in isomers and rxn.reactants not in products:
# We've encountered a unimolecular reactant that is not classified
# These are always product channels (since they would be in source or explored otherwise)
products.append(rxn.reactants)
elif len(rxn.reactants) > 1 and rxn.reactants not in reactants and rxn.reactants not in products:
# We've encountered bimolecular reactants that are not classified
if all([reactant in reaction_model.core.species for reactant in rxn.reactants]):
# Both reactants are in the core, so treat as reactant channel
reactants.append(rxn.reactants)
else:
# One or more reactants is an edge species, so treat as product channel
products.append(rxn.reactants)
# Products of the path reaction
if len(rxn.products) == 1 and rxn.products[0] not in isomers and rxn.products not in products:
# We've encountered a unimolecular product that is not classified
# These are always product channels (since they would be in source or explored otherwise)
products.append(rxn.products)
elif len(rxn.products) > 1 and rxn.products not in reactants and rxn.products not in products:
# We've encountered bimolecular products that are not classified
if all([product in reaction_model.core.species for product in rxn.products]):
# Both products are in the core, so treat as reactant channel
reactants.append(rxn.products)
else:
# One or more reactants is an edge species, so treat as product channel
products.append(rxn.products)
# Clear existing configurations
self.isomers = []
self.reactants = []
self.products = []
# Make a configuration object for each
for isomer in isomers:
self.isomers.append(Configuration(isomer))
for reactant in reactants:
self.reactants.append(Configuration(*reactant))
for product in products:
self.products.append(Configuration(*product))
def update(self, reaction_model, pdep_settings):
"""
Regenerate the :math:`k(T,P)` values for this partial network if the
network is marked as invalid.
"""
from rmgpy.kinetics import Arrhenius, KineticsData, MultiArrhenius
# Get the parameters for the pressure dependence calculation
job = pdep_settings
job.network = self
output_directory = pdep_settings.output_file
Tmin = job.Tmin.value_si
Tmax = job.Tmax.value_si
Pmin = job.Pmin.value_si
Pmax = job.Pmax.value_si
Tlist = job.Tlist.value_si
Plist = job.Plist.value_si
maximum_grain_size = job.maximum_grain_size.value_si if job.maximum_grain_size is not None else 0.0
minimum_grain_count = job.minimum_grain_count
method = job.method
interpolation_model = job.interpolation_model
active_j_rotor = job.active_j_rotor
active_k_rotor = job.active_k_rotor
rmgmode = job.rmgmode
# Figure out which configurations are isomers, reactant channels, and product channels
self.update_configurations(reaction_model)
# Make sure we have high-P kinetics for all path reactions
for rxn in self.path_reactions:
if rxn.kinetics is None and rxn.reverse.kinetics is None:
raise PressureDependenceError('Path reaction {0} with no high-pressure-limit kinetics encountered in '
'PDepNetwork #{1:d}.'.format(rxn, self.index))
elif rxn.kinetics is not None and rxn.kinetics.is_pressure_dependent() and rxn.network_kinetics is None:
raise PressureDependenceError('Pressure-dependent kinetics encountered for path reaction {0} in '
'PDepNetwork #{1:d}.'.format(rxn, self.index))
# Do nothing if the network is already valid
if self.valid:
return
# Do nothing if there are no explored wells
if len(self.explored) == 0 and len(self.source) > 1:
return
# Log the network being updated
logging.info("Updating {0!s}".format(self))
# Generate states data for unimolecular isomers and reactants if necessary
for isomer in self.isomers:
spec = isomer.species[0]
if not spec.has_statmech():
spec.generate_statmech()
for reactants in self.reactants:
for spec in reactants.species:
if not spec.has_statmech():
spec.generate_statmech()
# Also generate states data for any path reaction reactants, so we can
# always apply the ILT method in the direction the kinetics are known
for reaction in self.path_reactions:
for spec in reaction.reactants:
if not spec.has_statmech():
spec.generate_statmech()
# While we don't need the frequencies for product channels, we do need
# the E0, so create a conformer object with the E0 for the product
# channel species if necessary
for products in self.products:
for spec in products.species:
if spec.conformer is None:
spec.conformer = Conformer(E0=spec.get_thermo_data().E0)
# Determine transition state energies on potential energy surface
# In the absence of any better information, we simply set it to
# be the reactant ground-state energy + the activation energy
# Note that we need Arrhenius kinetics in order to do this
for rxn in self.path_reactions:
if rxn.kinetics is None:
raise Exception('Path reaction "{0}" in PDepNetwork #{1:d} has no kinetics!'.format(rxn, self.index))
elif isinstance(rxn.kinetics, KineticsData):
if len(rxn.reactants) == 1:
kunits = 's^-1'
elif len(rxn.reactants) == 2:
kunits = 'm^3/(mol*s)'
elif len(rxn.reactants) == 3:
kunits = 'm^6/(mol^2*s)'
else:
kunits = ''
rxn.kinetics = Arrhenius().fit_to_data(Tlist=rxn.kinetics.Tdata.value_si,
klist=rxn.kinetics.kdata.value_si, kunits=kunits)
elif isinstance(rxn.kinetics, MultiArrhenius):
logging.info('Converting multiple kinetics to a single Arrhenius expression for reaction {rxn}'.format(
rxn=rxn))
rxn.kinetics = rxn.kinetics.to_arrhenius(Tmin=Tmin, Tmax=Tmax)
elif not isinstance(rxn.kinetics, Arrhenius) and rxn.network_kinetics is None:
raise Exception('Path reaction "{0}" in PDepNetwork #{1:d} has invalid kinetics '
'type "{2!s}".'.format(rxn, self.index, rxn.kinetics.__class__))
rxn.fix_barrier_height(force_positive=True)
if rxn.network_kinetics is None:
E0 = sum([spec.conformer.E0.value_si for spec in rxn.reactants]) + rxn.kinetics.Ea.value_si
else:
E0 = sum([spec.conformer.E0.value_si for spec in rxn.reactants]) + rxn.network_kinetics.Ea.value_si
rxn.transition_state = rmgpy.species.TransitionState(conformer=Conformer(E0=(E0 * 0.001, "kJ/mol")))
# Set collision model
bath_gas = [spec for spec in reaction_model.core.species if not spec.reactive]
assert len(bath_gas) > 0, 'No unreactive species to identify as bath gas'
self.bath_gas = {}
for spec in bath_gas:
# is this really the only/best way to weight them?
self.bath_gas[spec] = 1.0 / len(bath_gas)
# Save input file
if not self.label:
self.label = str(self.index)
if output_directory:
job.save_input_file(
os.path.join(output_directory, 'pdep', 'network{0:d}_{1:d}.py'.format(self.index, len(self.isomers))))
self.log_summary(level=logging.INFO)
# Calculate the rate coefficients
self.initialize(Tmin, Tmax, Pmin, Pmax, maximum_grain_size, minimum_grain_count, active_j_rotor, active_k_rotor,
rmgmode)
K = self.calculate_rate_coefficients(Tlist, Plist, method)
# Generate PDepReaction objects
configurations = []
configurations.extend([isom.species[:] for isom in self.isomers])
configurations.extend([reactant.species[:] for reactant in self.reactants])
configurations.extend([product.species[:] for product in self.products])
j = configurations.index(self.source)
for i in range(K.shape[2]):
if i != j:
# Find the path reaction
net_reaction = None
for r in self.net_reactions:
if r.has_template(configurations[j], configurations[i]):
net_reaction = r
# If net reaction does not already exist, make a new one
if net_reaction is None:
net_reaction = PDepReaction(
reactants=configurations[j],
products=configurations[i],
network=self,
kinetics=None
)
net_reaction = reaction_model.make_new_pdep_reaction(net_reaction)
self.net_reactions.append(net_reaction)
# Place the net reaction in the core or edge if necessary
# Note that leak reactions are not placed in the edge
if all([s in reaction_model.core.species for s in net_reaction.reactants]) \
and all([s in reaction_model.core.species for s in net_reaction.products]):
# Check whether netReaction already exists in the core as a LibraryReaction
for rxn in reaction_model.core.reactions:
if isinstance(rxn, LibraryReaction) \
and rxn.is_isomorphic(net_reaction, either_direction=True) \
and not rxn.allow_pdep_route and not rxn.elementary_high_p:
logging.info('Network reaction {0} matched an existing core reaction {1}'
' from the {2} library, and was not added to the model'.format(
str(net_reaction), str(rxn), rxn.library))
break
else:
reaction_model.add_reaction_to_core(net_reaction)
else:
# Check whether netReaction already exists in the edge as a LibraryReaction
for rxn in reaction_model.edge.reactions:
if isinstance(rxn, LibraryReaction) \
and rxn.is_isomorphic(net_reaction, either_direction=True) \
and not rxn.allow_pdep_route and not rxn.elementary_high_p:
logging.info('Network reaction {0} matched an existing edge reaction {1}'
' from the {2} library, and was not added to the model'.format(
str(net_reaction), str(rxn), rxn.library))
break
else:
reaction_model.add_reaction_to_edge(net_reaction)
# Set/update the net reaction kinetics using interpolation model
kdata = K[:, :, i, j].copy()
order = len(net_reaction.reactants)
kdata *= 1e6 ** (order - 1)
kunits = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[order]
net_reaction.kinetics = job.fit_interpolation_model(Tlist, Plist, kdata, kunits)
# Check: For each net reaction that has a path reaction, make
# sure the k(T,P) values for the net reaction do not exceed
# the k(T) values of the path reaction
# Only check the k(T,P) value at the highest P and lowest T,
# as this is the one most likely to be in the high-pressure
# limit
t = 0
p = len(Plist) - 1
for pathReaction in self.path_reactions:
if pathReaction.is_isomerization():
# Don't check isomerization reactions, since their
# k(T,P) values potentially contain both direct and
# well-skipping contributions, and therefore could be
# significantly larger than the direct k(T) value
# (This can also happen for association/dissociation
# reactions, but the effect is generally not too large)
continue
if pathReaction.reactants == net_reaction.reactants and pathReaction.products == net_reaction.products:
if pathReaction.network_kinetics is not None:
kinf = pathReaction.network_kinetics.get_rate_coefficient(Tlist[t])
else:
kinf = pathReaction.kinetics.get_rate_coefficient(Tlist[t])
if K[t, p, i, j] > 2 * kinf: # To allow for a small discretization error
logging.warning('k(T,P) for net reaction {0} exceeds high-P k(T) by {1:g} at {2:g} K, '
'{3:g} bar'.format(net_reaction, K[t, p, i, j] / kinf, Tlist[t], Plist[p] / 1e5))
logging.info(' k(T,P) = {0:9.2e} k(T) = {1:9.2e}'.format(K[t, p, i, j], kinf))
break
elif pathReaction.products == net_reaction.reactants and pathReaction.reactants == net_reaction.products:
if pathReaction.network_kinetics is not None:
kinf = pathReaction.network_kinetics.get_rate_coefficient(
Tlist[t]) / pathReaction.get_equilibrium_constant(Tlist[t])
else:
kinf = pathReaction.kinetics.get_rate_coefficient(
Tlist[t]) / pathReaction.get_equilibrium_constant(Tlist[t])
if K[t, p, i, j] > 2 * kinf: # To allow for a small discretization error
logging.warning('k(T,P) for net reaction {0} exceeds high-P k(T) by {1:g} at {2:g} K, '
'{3:g} bar'.format(net_reaction, K[t, p, i, j] / kinf, Tlist[t], Plist[p] / 1e5))
logging.info(' k(T,P) = {0:9.2e} k(T) = {1:9.2e}'.format(K[t, p, i, j], kinf))
break
# Delete intermediate arrays to conserve memory
self.cleanup()
# We're done processing this network, so mark it as valid
self.valid = True
| 45.947313 | 146 | 0.54628 |
4a24ab82e1442fc1b3d89f5cbffa75419baf4ef9 | 2,187 | py | Python | ipfs_common/src/ipfs_common/ipfs_rosbag.py | Vourhey/robonomics_comm | 1b7c6dc85985909cb925d82b1081ec556423029e | [
"BSD-3-Clause"
] | 16 | 2017-11-15T15:20:34.000Z | 2021-08-05T03:08:13.000Z | ipfs_common/src/ipfs_common/ipfs_rosbag.py | aang1985/robonomics_comm | 4f7a339e01cbd00fc0f51405c77d89d6ae5e0d7d | [
"BSD-3-Clause"
] | 80 | 2018-02-08T22:44:41.000Z | 2021-07-15T10:12:09.000Z | ipfs_common/src/ipfs_common/ipfs_rosbag.py | aang1985/robonomics_comm | 4f7a339e01cbd00fc0f51405c77d89d6ae5e0d7d | [
"BSD-3-Clause"
] | 13 | 2018-02-08T14:22:26.000Z | 2021-11-20T00:29:14.000Z | # -*- coding: utf-8 -*-
from ipfs_common.srv import IpfsUploadFile, IpfsDownloadFile
from ipfs_common.msg import Multihash, Filepath
from tempfile import NamedTemporaryFile
from rosbag import Bag
import rospy
import os
def ipfs_download(multihash: Multihash) -> (dict, Bag):
rospy.wait_for_service('/ipfs/get_file')
download = rospy.ServiceProxy('/ipfs/get_file', IpfsDownloadFile)
tmpfile = NamedTemporaryFile(delete=False)
res = download(multihash, Filepath(tmpfile.name))
tmpfile.close()
if not res.success:
raise Exception(res.error_msg)
messages = {}
bag = Bag(tmpfile.name, 'r')
for topic, msg, timestamp in bag.read_messages():
if topic not in messages:
messages[topic] = [msg]
else:
messages[topic].append(msg)
os.unlink(tmpfile.name)
return (messages, bag)
def ipfs_upload(messages: Multihash):
rospy.wait_for_service('/ipfs/add_file')
upload = rospy.ServiceProxy('/ipfs/add_file', IpfsUploadFile)
with NamedTemporaryFile(delete=False) as tmpfile:
recorder = Bag(tmpfile.name, 'w')
for topic in messages:
for msg in messages[topic]:
recorder.write(topic, msg)
recorder.close()
res = upload(Filepath(tmpfile.name))
if not res.success:
raise Exception(res.error_msg)
return res.ipfs_address
class IpfsRosBag:
def __init__(self, messages=None, multihash=None):
'''
Parameters
----------
messages : dict of lists of topic messages
Serialize messages as objective BAG and upload to IPFS (default is None).
multihash: Multihash
Download and parse objective BAG from IPFS (default is None).
'''
if messages is None and multihash is None:
raise NotImplementedError('messages or multihash should be set')
if messages is None:
self.multihash = multihash
self.messages, self.bag = ipfs_download(multihash)
else:
self.messages = messages
self.multihash = ipfs_upload(messages)
self.bag = None
| 33.136364 | 89 | 0.638317 |
4a24ace33a24aea45567c6b2c70a41907fc4b425 | 22,787 | py | Python | tests/quart/test_graphqlview.py | colelin26/graphql-server | 1ccebee8c6102f2855bcf64024d84091d8547f08 | [
"MIT"
] | 60 | 2020-08-12T11:16:36.000Z | 2022-03-02T02:39:51.000Z | tests/quart/test_graphqlview.py | colelin26/graphql-server | 1ccebee8c6102f2855bcf64024d84091d8547f08 | [
"MIT"
] | 24 | 2017-03-23T04:19:29.000Z | 2022-02-25T09:32:34.000Z | tests/quart/test_graphqlview.py | colelin26/graphql-server | 1ccebee8c6102f2855bcf64024d84091d8547f08 | [
"MIT"
] | 25 | 2020-08-01T10:58:24.000Z | 2022-03-22T04:03:19.000Z | import json
import sys
# from io import StringIO
from urllib.parse import urlencode
import pytest
from quart import Quart, Response, url_for
from quart.testing import QuartClient
from werkzeug.datastructures import Headers
from .app import create_app
@pytest.fixture
def app() -> Quart:
# import app factory pattern
app = create_app(graphiql=True)
# pushes an application context manually
# ctx = app.app_context()
# await ctx.push()
return app
@pytest.fixture
def client(app: Quart) -> QuartClient:
return app.test_client()
@pytest.mark.asyncio
async def execute_client(
app: Quart,
client: QuartClient,
method: str = "GET",
data: str = None,
headers: Headers = None,
**url_params
) -> Response:
if sys.version_info >= (3, 7):
test_request_context = app.test_request_context("/", method=method)
else:
test_request_context = app.test_request_context(method, "/")
async with test_request_context:
string = url_for("graphql")
if url_params:
string += "?" + urlencode(url_params)
if method == "POST":
return await client.post(string, data=data, headers=headers)
elif method == "PUT":
return await client.put(string, data=data, headers=headers)
else:
return await client.get(string)
def response_json(result):
return json.loads(result)
def json_dump_kwarg(**kwargs) -> str:
return json.dumps(kwargs)
def json_dump_kwarg_list(**kwargs):
return json.dumps([kwargs])
@pytest.mark.asyncio
async def test_allows_get_with_query_param(app: Quart, client: QuartClient):
response = await execute_client(app, client, query="{test}")
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"test": "Hello World"}}
@pytest.mark.asyncio
async def test_allows_get_with_variable_values(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_allows_get_with_operation_name(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.asyncio
async def test_reports_validation_errors(app: Quart, client: QuartClient):
response = await execute_client(
app, client, query="{ test, unknownOne, unknownTwo }"
)
assert response.status_code == 400
result = await response.get_data(raw=False)
assert response_json(result) == {
"errors": [
{
"message": "Cannot query field 'unknownOne' on type 'QueryRoot'.",
"locations": [{"line": 1, "column": 9}],
"path": None,
},
{
"message": "Cannot query field 'unknownTwo' on type 'QueryRoot'.",
"locations": [{"line": 1, "column": 21}],
"path": None,
},
]
}
@pytest.mark.asyncio
async def test_errors_when_missing_operation_name(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
)
assert response.status_code == 400
result = await response.get_data(raw=False)
assert response_json(result) == {
"errors": [
{
"message": "Must provide operation name if query contains multiple operations.", # noqa: E501
"locations": None,
"path": None,
}
]
}
@pytest.mark.asyncio
async def test_errors_when_sending_a_mutation_via_get(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
query="""
mutation TestMutation { writeTest { test } }
""",
)
assert response.status_code == 405
result = await response.get_data(raw=False)
assert response_json(result) == {
"errors": [
{
"message": "Can only perform a mutation operation from a POST request.",
"locations": None,
"path": None,
}
]
}
@pytest.mark.asyncio
async def test_errors_when_selecting_a_mutation_within_a_get(
app: Quart, client: QuartClient
):
response = await execute_client(
app,
client,
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestMutation",
)
assert response.status_code == 405
result = await response.get_data(raw=False)
assert response_json(result) == {
"errors": [
{
"message": "Can only perform a mutation operation from a POST request.",
"locations": None,
"path": None,
}
]
}
@pytest.mark.asyncio
async def test_allows_mutation_to_exist_within_a_get(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestQuery",
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"test": "Hello World"}}
@pytest.mark.asyncio
async def test_allows_post_with_json_encoding(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
method="POST",
data=json_dump_kwarg(query="{test}"),
headers=Headers({"Content-Type": "application/json"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"test": "Hello World"}}
@pytest.mark.asyncio
async def test_allows_sending_a_mutation_via_post(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
method="POST",
data=json_dump_kwarg(query="mutation TestMutation { writeTest { test } }"),
headers=Headers({"Content-Type": "application/json"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"writeTest": {"test": "Hello World"}}}
@pytest.mark.asyncio
async def test_allows_post_with_url_encoding(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
method="POST",
data=urlencode(dict(query="{test}")),
headers=Headers({"Content-Type": "application/x-www-form-urlencoded"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"test": "Hello World"}}
@pytest.mark.asyncio
async def test_supports_post_json_query_with_string_variables(
app: Quart, client: QuartClient
):
response = await execute_client(
app,
client,
method="POST",
data=json_dump_kwarg(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
headers=Headers({"Content-Type": "application/json"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_json_query_with_json_variables(
app: Quart, client: QuartClient
):
response = await execute_client(
app,
client,
method="POST",
data=json_dump_kwarg(
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
),
headers=Headers({"Content-Type": "application/json"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_url_encoded_query_with_string_variables(
app: Quart, client: QuartClient
):
response = await execute_client(
app,
client,
method="POST",
data=urlencode(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
),
headers=Headers({"Content-Type": "application/x-www-form-urlencoded"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_json_query_with_get_variable_values(
app: Quart, client: QuartClient
):
response = await execute_client(
app,
client,
method="POST",
data=json_dump_kwarg(query="query helloWho($who: String){ test(who: $who) }",),
headers=Headers({"Content-Type": "application/json"}),
variables=json.dumps({"who": "Dolly"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_post_url_encoded_query_with_get_variable_values(
app: Quart, client: QuartClient
):
response = await execute_client(
app,
client,
method="POST",
data=urlencode(dict(query="query helloWho($who: String){ test(who: $who) }",)),
headers=Headers({"Content-Type": "application/x-www-form-urlencoded"}),
variables=json.dumps({"who": "Dolly"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_raw_text_query_with_get_variable_values(
app: Quart, client: QuartClient
):
response = await execute_client(
app,
client=client,
method="POST",
data="query helloWho($who: String){ test(who: $who) }",
headers=Headers({"Content-Type": "application/graphql"}),
variables=json.dumps({"who": "Dolly"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_allows_post_with_operation_name(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
method="POST",
data=json_dump_kwarg(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
),
headers=Headers({"Content-Type": "application/json"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.asyncio
async def test_allows_post_with_get_operation_name(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
method="POST",
data="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
headers=Headers({"Content-Type": "application/graphql"}),
operationName="helloWorld",
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.asyncio
@pytest.mark.parametrize("app", [create_app(pretty=True)])
async def test_supports_pretty_printing(app: Quart, client: QuartClient):
response = await execute_client(app, client, query="{test}")
result = await response.get_data(raw=False)
assert result == ("{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}")
@pytest.mark.asyncio
@pytest.mark.parametrize("app", [create_app(pretty=False)])
async def test_not_pretty_by_default(app: Quart, client: QuartClient):
response = await execute_client(app, client, query="{test}")
result = await response.get_data(raw=False)
assert result == '{"data":{"test":"Hello World"}}'
@pytest.mark.asyncio
async def test_supports_pretty_printing_by_request(app: Quart, client: QuartClient):
response = await execute_client(app, client, query="{test}", pretty="1")
result = await response.get_data(raw=False)
assert result == ("{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}")
@pytest.mark.asyncio
async def test_handles_field_errors_caught_by_graphql(app: Quart, client: QuartClient):
response = await execute_client(app, client, query="{thrower}")
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {
"errors": [
{
"locations": [{"column": 2, "line": 1}],
"path": ["thrower"],
"message": "Throws!",
}
],
"data": None,
}
@pytest.mark.asyncio
async def test_handles_syntax_errors_caught_by_graphql(app: Quart, client: QuartClient):
response = await execute_client(app, client, query="syntaxerror")
assert response.status_code == 400
result = await response.get_data(raw=False)
assert response_json(result) == {
"errors": [
{
"locations": [{"column": 1, "line": 1}],
"message": "Syntax Error: Unexpected Name 'syntaxerror'.",
"path": None,
}
]
}
@pytest.mark.asyncio
async def test_handles_errors_caused_by_a_lack_of_query(
app: Quart, client: QuartClient
):
response = await execute_client(app, client)
assert response.status_code == 400
result = await response.get_data(raw=False)
assert response_json(result) == {
"errors": [
{"message": "Must provide query string.", "locations": None, "path": None}
]
}
@pytest.mark.asyncio
async def test_handles_batch_correctly_if_is_disabled(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
method="POST",
data="[]",
headers=Headers({"Content-Type": "application/json"}),
)
assert response.status_code == 400
result = await response.get_data(raw=False)
assert response_json(result) == {
"errors": [
{
"message": "Batch GraphQL requests are not enabled.",
"locations": None,
"path": None,
}
]
}
@pytest.mark.asyncio
async def test_handles_incomplete_json_bodies(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
method="POST",
data='{"query":',
headers=Headers({"Content-Type": "application/json"}),
)
assert response.status_code == 400
result = await response.get_data(raw=False)
assert response_json(result) == {
"errors": [
{"message": "POST body sent invalid JSON.", "locations": None, "path": None}
]
}
@pytest.mark.asyncio
async def test_handles_plain_post_text(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
method="POST",
data="query helloWho($who: String){ test(who: $who) }",
headers=Headers({"Content-Type": "text/plain"}),
variables=json.dumps({"who": "Dolly"}),
)
assert response.status_code == 400
result = await response.get_data(raw=False)
assert response_json(result) == {
"errors": [
{"message": "Must provide query string.", "locations": None, "path": None}
]
}
@pytest.mark.asyncio
async def test_handles_poorly_formed_variables(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
query="query helloWho($who: String){ test(who: $who) }",
variables="who:You",
)
assert response.status_code == 400
result = await response.get_data(raw=False)
assert response_json(result) == {
"errors": [
{"message": "Variables are invalid JSON.", "locations": None, "path": None}
]
}
@pytest.mark.asyncio
async def test_handles_unsupported_http_methods(app: Quart, client: QuartClient):
response = await execute_client(app, client, method="PUT", query="{test}")
assert response.status_code == 405
result = await response.get_data(raw=False)
assert response.headers["Allow"] in ["GET, POST", "HEAD, GET, POST, OPTIONS"]
assert response_json(result) == {
"errors": [
{
"message": "GraphQL only supports GET and POST requests.",
"locations": None,
"path": None,
}
]
}
@pytest.mark.asyncio
async def test_passes_request_into_request_context(app: Quart, client: QuartClient):
response = await execute_client(app, client, query="{request}", q="testing")
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == {"data": {"request": "testing"}}
@pytest.mark.asyncio
@pytest.mark.parametrize("app", [create_app(context={"session": "CUSTOM CONTEXT"})])
async def test_passes_custom_context_into_context(app: Quart, client: QuartClient):
response = await execute_client(app, client, query="{context { session request }}")
assert response.status_code == 200
result = await response.get_data(raw=False)
res = response_json(result)
assert "data" in res
assert "session" in res["data"]["context"]
assert "request" in res["data"]["context"]
assert "CUSTOM CONTEXT" in res["data"]["context"]["session"]
assert "Request" in res["data"]["context"]["request"]
@pytest.mark.asyncio
@pytest.mark.parametrize("app", [create_app(context="CUSTOM CONTEXT")])
async def test_context_remapped_if_not_mapping(app: Quart, client: QuartClient):
response = await execute_client(app, client, query="{context { session request }}")
assert response.status_code == 200
result = await response.get_data(raw=False)
res = response_json(result)
assert "data" in res
assert "session" in res["data"]["context"]
assert "request" in res["data"]["context"]
assert "CUSTOM CONTEXT" not in res["data"]["context"]["request"]
assert "Request" in res["data"]["context"]["request"]
# @pytest.mark.asyncio
# async def test_post_multipart_data(app: Quart, client: QuartClient):
# query = "mutation TestMutation { writeTest { test } }"
# response = await execute_client(
# app,
# client,
# method='POST',
# data={"query": query, "file": (StringIO(), "text1.txt")},
# headers=Headers({"Content-Type": "multipart/form-data"})
# )
#
# assert response.status_code == 200
# result = await response.get_data()
# assert response_json(result) == {
# "data": {u"writeTest": {u"test": u"Hello World"}}
# }
@pytest.mark.asyncio
@pytest.mark.parametrize("app", [create_app(batch=True)])
async def test_batch_allows_post_with_json_encoding(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
method="POST",
data=json_dump_kwarg_list(query="{test}"),
headers=Headers({"Content-Type": "application/json"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == [{"data": {"test": "Hello World"}}]
@pytest.mark.asyncio
@pytest.mark.parametrize("app", [create_app(batch=True)])
async def test_batch_supports_post_json_query_with_json_variables(
app: Quart, client: QuartClient
):
response = await execute_client(
app,
client,
method="POST",
data=json_dump_kwarg_list(
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
),
headers=Headers({"Content-Type": "application/json"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == [{"data": {"test": "Hello Dolly"}}]
@pytest.mark.asyncio
@pytest.mark.parametrize("app", [create_app(batch=True)])
async def test_batch_allows_post_with_operation_name(app: Quart, client: QuartClient):
response = await execute_client(
app,
client,
method="POST",
data=json_dump_kwarg_list(
# id=1,
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
),
headers=Headers({"Content-Type": "application/json"}),
)
assert response.status_code == 200
result = await response.get_data(raw=False)
assert response_json(result) == [
{"data": {"test": "Hello World", "shared": "Hello Everyone"}}
]
| 31.087312 | 110 | 0.615877 |
4a24ace64d7788974b73d6362a1c8c287c02637a | 188 | py | Python | redirink/insights/tests/conftest.py | Egor4ik325/redirink | 17ef85f48145ee6112f2fcbab60dcd9d65ba78bf | [
"MIT"
] | null | null | null | redirink/insights/tests/conftest.py | Egor4ik325/redirink | 17ef85f48145ee6112f2fcbab60dcd9d65ba78bf | [
"MIT"
] | null | null | null | redirink/insights/tests/conftest.py | Egor4ik325/redirink | 17ef85f48145ee6112f2fcbab60dcd9d65ba78bf | [
"MIT"
] | 1 | 2021-12-31T00:46:31.000Z | 2021-12-31T00:46:31.000Z | import factory
from .factories import InsightFactory
# def insight_data() -> dict:
# factory_dict = factory.build(dict, FACTORY_CLASS=InsightFactory)
# delete factory_dict["id"]
| 23.5 | 70 | 0.744681 |
4a24ae111c406b784d7cc2c2e0f28bf7d85058ba | 1,287 | py | Python | ucsrb/management/commands/set_baseline_flow.py | Ecotrust/ucsrb | 29d97cf1f21537aaf24f38e7dedc7c8cfccf1f12 | [
"MIT"
] | 1 | 2018-07-31T00:58:30.000Z | 2018-07-31T00:58:30.000Z | ucsrb/management/commands/set_baseline_flow.py | Ecotrust/ucsrb | 29d97cf1f21537aaf24f38e7dedc7c8cfccf1f12 | [
"MIT"
] | 264 | 2017-10-24T23:54:52.000Z | 2021-10-16T15:40:47.000Z | ucsrb/management/commands/set_baseline_flow.py | Ecotrust/ucsrb | 29d97cf1f21537aaf24f38e7dedc7c8cfccf1f12 | [
"MIT"
] | 1 | 2019-07-16T06:37:45.000Z | 2019-07-16T06:37:45.000Z | from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.contrib.auth.models import User
from ucsrb.models import FocusArea, TreatmentScenario
from ucsrb.tasks import runBaseline
from time import sleep
class Command(BaseCommand):
help = 'Set Baseline Data. 1 argument - the name of the basin to reset: Entiat, Methow, Okanogan, or Wenatchee'
def add_arguments(self, parser):
parser.add_argument('basin', type=str)
def handle(self, *args, **options):
import sys, csv
# Check out Input
try:
basin_name = options['basin']
except IndexError:
self.stdout.write(
'--- ERROR: You must provide the basin for which to run baseline data ---'
)
sys.exit()
if not basin_name.lower() in settings.BASIN_RESET_LOOKUP.keys():
self.stdout.write(
'--- ERROR: Provided basin { not one of the valid options: Entiat, Methow, Okanogan, or Wenatchee'.format(basin_name)
)
sys.exit()
runBaseline.delay(basin_name, settings.NORMAL_YEAR_LABEL)
runBaseline.delay(basin_name, settings.WET_YEAR_LABEL)
runBaseline.delay(basin_name, settings.DRY_YEAR_LABEL)
| 39 | 133 | 0.663559 |
4a24affac94dd960d0e1d37ec5ee6aa218262901 | 1,526 | py | Python | problems/knapsackProblemMaximizedSum.py | lnogueir/swe-interview-prep | 48ef00e94d4603b392db6ac272277f5f3d37d2f5 | [
"MIT"
] | null | null | null | problems/knapsackProblemMaximizedSum.py | lnogueir/swe-interview-prep | 48ef00e94d4603b392db6ac272277f5f3d37d2f5 | [
"MIT"
] | null | null | null | problems/knapsackProblemMaximizedSum.py | lnogueir/swe-interview-prep | 48ef00e94d4603b392db6ac272277f5f3d37d2f5 | [
"MIT"
] | null | null | null | '''
Prompt:
You're given an array of arrays where each subarray holds two integer values
and represents an item; the first integer is the item's value, and the second
integer is the item's weight. You're also given an integer representing the
maximum capacity of a knapsack that you have.
Your goal is to fit items in your knapsack without having the sum of their weights
exceed the knapsack's capacity, all that while maximizing their combined values.
Note that you only have one of each item at your disposal.
Example:
input:
{
"items": [
[1, 2],
[4, 3],
[5, 6],
[6, 7]
],
"capacity": 10
}
output:
[10, [1, 3]] since items [4, 3] and [6, 7] will be maximum sum to 10.
'''
class Solver():
def __init__(self):
self.maxValue = 0
self.indexes = []
def solve(self, items, carryCapacity, carryValue=0, i=0, idxs = []):
if carryCapacity < 0:
return
if carryValue > self.maxValue:
self.maxValue = carryValue
self.indexes = idxs
for idx in range(i, len(items)):
value, weight = items[idx]
remainder = carryCapacity - weight
self.solve(items, remainder, carryValue+value, idx+1, [*idxs, idx])
continue
def knapsackProblem(items, capacity):
solver = Solver()
solver.solve(items, capacity)
return [
solver.maxValue,
solver.indexes
]
print(knapsackProblem([
[1, 2],
[4, 3],
[5, 6],
[6, 7]
], 10))
print(knapsackProblem([
[1, 3],
[4, 5],
[5, 2],
[6, 4]
], 8))
| 21.492958 | 83 | 0.625819 |
4a24b0e8d72529acba8583a75a0c177f70d1a6af | 2,005 | py | Python | ReadCategoriesFromExcel.py | mbuckaway/CrossMgr | 4c64e429eb3215fda1b685c5e684c56f5d0c02cf | [
"MIT"
] | 1 | 2020-02-05T11:22:03.000Z | 2020-02-05T11:22:03.000Z | ReadCategoriesFromExcel.py | mbuckaway/CrossMgr | 4c64e429eb3215fda1b685c5e684c56f5d0c02cf | [
"MIT"
] | null | null | null | ReadCategoriesFromExcel.py | mbuckaway/CrossMgr | 4c64e429eb3215fda1b685c5e684c56f5d0c02cf | [
"MIT"
] | null | null | null | import six
import Utils
import Model
sheetName = '--CrossMgr-Categories'
def ReadCategoriesFromExcel( reader ):
race = Model.race
if not race or sheetName not in reader.sheet_names():
return False
HeadersFields = (
('Category Type', 'catType'),
('Name', 'name'),
('Gender', 'gender'),
('Numbers', 'catStr'),
('Start Offset', 'startOffset'),
('Race Laps', 'numLaps'),
('Race Distance', 'distance'),
('Race Minutes', None),
('Publish', 'publishFlag'),
('Upload', 'uploadFlag'),
('Series', 'seriesFlag'),
)
HeadersToFields = dict( (k, v) for k, v in HeadersFields )
HeaderSet = set( k for k, v in HeadersFields )
# If the course is defined, default the Categories to the course length.
if race.geoTrack:
distance = race.geoTrack.lengthKm if race.distanceUnit == race.UnitKm else race.geoTrack.lengthMiles
else:
distance = None
raceMinutesMax = -1
headerMap = {}
categories = []
for r, row in enumerate(reader.iter_list(sheetName)):
# Since this is machine generated, assume the headers are in the first row.
if not headerMap:
for c, v in enumerate(row):
if v in HeaderSet:
headerMap[v] = c
continue
catRow = {}
for h, c in six.iteritems(headerMap):
catField = HeadersToFields[h]
if h == 'Race Minutes' and row[c]:
try:
raceMinutes = int(row[c])
raceMinutesMax = max( raceMinutesMax, raceMinutes )
catRow['raceMinutes'] = raceMinutes
except ValueError:
pass
elif h == 'Race Distance' and not row[c] and distance:
catRow['distance'] = distance
if catField is not None:
catRow[catField] = row[c]
categories.append( catRow )
if categories:
try:
race.setCategories( race.mergeExistingCategoryAttributes(categories) )
race.adjustAllCategoryWaveNumbers()
if raceMinutesMax > 0:
race.minutes = raceMinutesMax
return True
except Exception as e:
Utils.writeLog( 'ReadCategoriesFromExcel: error: {}'.format(e) )
return False
else:
return False
| 26.733333 | 102 | 0.673317 |
4a24b1265c6cd501daa6777d1dcde17510cbb3b4 | 638 | py | Python | test_apps/test.py | alexborsch/wms-assistant | 745593f55894466389e09d01e2a7aa140d4ce6c1 | [
"MIT"
] | null | null | null | test_apps/test.py | alexborsch/wms-assistant | 745593f55894466389e09d01e2a7aa140d4ce6c1 | [
"MIT"
] | null | null | null | test_apps/test.py | alexborsch/wms-assistant | 745593f55894466389e09d01e2a7aa140d4ce6c1 | [
"MIT"
] | null | null | null | import sys
import time
def updt(total, progress):
"""
Displays or updates a console progress bar.
Original source: /questions/27978/python-progress-bar/205178#205178
"""
barLength, status = 20, ""
progress = float(progress) / float(total)
if progress >= 1.:
progress, status = 1, "\r\n"
block = int(round(barLength * progress))
text = "\r[{}] {:.0f}% {}".format(
"#" * block + "-" * (barLength - block), round(progress * 100, 0),
status)
sys.stdout.write(text)
sys.stdout.flush()
runs = 300
for run_num in range(runs):
time.sleep(.1)
updt(runs, run_num + 1) | 24.538462 | 74 | 0.594044 |
4a24b140337b2e2f1ba9e2e28a5096fdc57168f0 | 2,757 | py | Python | exp/taskB/inference.py | temi92/epic-kitchens-55-action-models | 40e984bbdcf502539b3569774cb6b5526eb71c3c | [
"Apache-2.0"
] | null | null | null | exp/taskB/inference.py | temi92/epic-kitchens-55-action-models | 40e984bbdcf502539b3569774cb6b5526eb71c3c | [
"Apache-2.0"
] | null | null | null | exp/taskB/inference.py | temi92/epic-kitchens-55-action-models | 40e984bbdcf502539b3569774cb6b5526eb71c3c | [
"Apache-2.0"
] | null | null | null | import torch
from pathlib import Path
import sys
import cv2
sys.path.append("..")
from models.model import get_tsn_model
import numpy as np
import json
import argparse
parser = argparse.ArgumentParser(description='running inference on video')
parser.add_argument("weights", type=Path, help="weights file for model")
parser.add_argument("video_file", type=Path, help="path to video file")
parser.add_argument("json_file", type=Path, help="json file containing index to class mappings")
args = parser.parse_args()
weights = args.weights
video_file = args.video_file
json_file = args.json_file
def pre_process_img(img):
img = cv2.resize(img,(tsn.input_size, tsn.input_size), interpolation=cv2.INTER_LINEAR)
#convert to RGB..
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def get_class_name(out):
#load json file that contains index to class name mapping ..
with open(json_file, "r") as f:
content = json.load(f)
_, pred = out.topk(1, dim=-1, largest=True, sorted =True) #returns index of largest element
pred = pred.item()
class_name = [k for k, v in content.items() if v == pred][0]
return class_name
def infer(img_stack):
img_tensor = torch.from_numpy(img_stack)
#normalize and permute
img_tensor = (img_tensor.float()/255.0 - tsn.input_mean[0])/tsn.input_std[0]
img_tensor = img_tensor.permute(2,0, 1)
#add batch dimenstion
img_tensor = img_tensor.unsqueeze(0)
with torch.no_grad():
#run inference on img
out, _ = tsn(img_tensor)
class_name = get_class_name(out)
return class_name
#load model and weights ..
tsn = get_tsn_model(base_model="resnet50", segment_count=8, tune_model=True)
tsn.eval()
w_dict = torch.load(weights)
tsn.load_state_dict(w_dict)
cap = cv2.VideoCapture(str(args.video_file))
#write video
fourcc = cv2.VideoWriter_fourcc(*'XVID')
_, frame = cap.read()
out = cv2.VideoWriter('output.avi',fourcc, 10.0, (frame.shape[1], frame.shape[0]))
img_stack = []
num_segments = 8
while (cap.isOpened()):
ret, frame = cap.read()
if frame is None:
break
img_stack.append(frame.copy())
if len(img_stack) == num_segments:
images = list(map(pre_process_img,img_stack))
images = np.stack(images, axis=2)
images = images.reshape((images.shape[0], images.shape[1], -1))
class_name = infer(images)
img_stack = []
cv2.putText(frame, class_name, org= (frame.shape[1] -250, 55),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2.5,
color=(255, 0, 0))
out.write(frame)
cv2.imshow("frame", frame)
if cv2.waitKey(100) & 0xFF == ord('q'): #output at 10FPS.
break
cap.release()
cv2.destroyAllWindows()
out.release()
| 28.42268 | 119 | 0.686253 |
4a24b18b41e683a39094ec74d00965204db0a6d5 | 2,826 | py | Python | examples/benchmark_with_filtering.py | jina-ai/pqlite | 2ce1ec2283b381f5153ea60141a6bb474bbf0f0c | [
"Apache-2.0"
] | 45 | 2021-12-10T07:39:39.000Z | 2022-02-20T22:58:28.000Z | examples/benchmark_with_filtering.py | jina-ai/pqlite | 2ce1ec2283b381f5153ea60141a6bb474bbf0f0c | [
"Apache-2.0"
] | 30 | 2021-12-10T07:46:28.000Z | 2022-02-18T09:27:48.000Z | examples/benchmark_with_filtering.py | jina-ai/annlite | e4e706e313ba5cbfb7083a5dea9e75b8d2813394 | [
"Apache-2.0"
] | null | null | null | import os
import shutil
import numpy as np
from jina import Document, DocumentArray
from jina.logging.profile import TimeContext
from annlite import AnnLite
n_index = [10_000, 100_000, 500_000, 1_000_000]
n_query = [1, 8, 64]
D = 768
R = 5
B = 5000
n_cells = 1
probs = [[0.20, 0.30, 0.50], [0.05, 0.15, 0.80]]
categories = ['comic', 'movie', 'audiobook']
def clean_workspace():
if os.path.exists('./data'):
shutil.rmtree('./data')
if os.path.exists('./workspace'):
shutil.rmtree('./workspace')
def docs_with_tags(N, D, probs, categories):
all_docs = []
for k, prob in enumerate(probs):
n_current = int(N * prob)
X = np.random.random((n_current, D)).astype(np.float32)
docs = [
Document(
embedding=X[i],
tags={
'category': categories[k],
},
)
for i in range(n_current)
]
all_docs.extend(docs)
return DocumentArray(all_docs)
results = []
for n_i in n_index:
results_ni = []
for current_probs in probs:
clean_workspace()
columns = [('category', str)]
idxer = AnnLite(
dim=D,
initial_size=n_i,
n_cells=n_cells,
metas={'workspace': './workspace'},
columns=columns,
)
da = docs_with_tags(n_i, D, current_probs, categories)
with TimeContext(f'indexing {n_i} docs') as t_i:
for i, _batch in enumerate(da.batch(batch_size=B)):
idxer.index(_batch)
for cat, prob in zip(categories, current_probs):
f = {'category': {'$eq': cat}}
query_times = []
for n_q in n_query:
qa = DocumentArray.empty(n_q)
q_embs = np.random.random([n_q, D]).astype(np.float32)
qa.embeddings = q_embs
t_qs = []
for _ in range(R):
with TimeContext(f'searching {n_q} docs') as t_q:
idxer.search(qa, filter=f)
t_qs.append(t_q.duration)
query_times.append(np.mean(t_qs[1:]))
print(f'\n\nprob={prob}, current_probs={current_probs}, n_i={n_i}\n\n')
results_ni.append([n_i, int(100 * prob), t_i.duration] + query_times)
results.append(results_ni)
title = '| Stored data |% same filter| Indexing time | Query size=1 | Query size=8 | Query size=64|'
print(title)
print('|-----' * 6 + '|')
for block in results:
sorted_elements_in_block = np.argsort([b[1] for b in block])
for pos in sorted_elements_in_block:
res = block[pos]
print(
''.join(
[f'| {x} ' for x in res[0:2]] + [f'| {x:.3f} ' for x in res[2:]] + ['|']
)
)
| 26.660377 | 101 | 0.536093 |
4a24b20775fa4f2a9e2ee1e4770cd32651978482 | 5,218 | py | Python | src/engine/main_engine.py | Sarajvega/kaggle-birdsong-recognition | cbe1c8b59d03a1ac210439fef6045ce4e57235dd | [
"MIT"
] | 137 | 2020-09-17T16:36:28.000Z | 2022-03-23T23:54:09.000Z | src/engine/main_engine.py | Sarajvega/kaggle-birdsong-recognition | cbe1c8b59d03a1ac210439fef6045ce4e57235dd | [
"MIT"
] | 3 | 2020-09-18T07:42:37.000Z | 2021-07-19T22:37:38.000Z | src/engine/main_engine.py | Sarajvega/kaggle-birdsong-recognition | cbe1c8b59d03a1ac210439fef6045ce4e57235dd | [
"MIT"
] | 38 | 2020-09-20T07:24:07.000Z | 2022-03-14T03:06:18.000Z | from torch.utils.data import DataLoader
import torch
from tqdm.auto import tqdm
import os
import cProfile
from ignite.engine import Events, Engine
from ignite.handlers import Checkpoint
from engine.base.base_engine import BaseEngine
from ignite.utils import convert_tensor
class MainEngine(BaseEngine):
def __init__(self, local_rank, hparams):
super().__init__(local_rank, hparams)
def prepare_batch(self, batch, mode = 'valid'):
if mode == 'train':
x, y = batch["images"], batch["coded_labels"]
elif mode == 'valid':
x, y = batch["images"], batch["coded_labels"]
elif mode == 'test':
x, inputs = batch["images"], batch
return (
convert_tensor(x, device=self.device, non_blocking=True),
(inputs)
)
return (
convert_tensor(x, device=self.device, non_blocking=True),
convert_tensor(y, device=self.device, non_blocking=True)
)
def loss_fn(self, y_pred, y):
loss, dict_loss = self.ls_fn(y_pred, y)
return loss, dict_loss
def output_transform(self, x, y, y_pred, loss=None, dict_loss=None, mode = 'valid'):
if mode == 'train':
return {"loss": loss.detach(), "x": x, "y_pred": y_pred, "y":y}
elif mode == 'valid':
return {"loss": loss.detach(), "x": x, "y_pred": y_pred, "y":y}
elif mode == 'test':
return {"y_pred": y_pred, "x": x, "input":y}
def _init_optimizer(self):
if self.hparams.optimizer_name == "adamw":
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.hparams.lr)
elif self.hparams.optimizer_name == "adam":
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.hparams.lr)
def _init_criterion_function(self):
if self.hparams.criterion_name == "bce":
from loss.bce_loss import BCELoss
self.criterion = BCELoss()
elif self.hparams.criterion_name == "smooth_bce":
from loss.smooth_bce_loss import SmoothBCELoss
self.criterion = SmoothBCELoss(smooth=self.hparams.smooth)
def _init_scheduler(self):
if self.hparams.scheduler_name == "none":
self.scheduler = None
elif self.hparams.scheduler_name == "warmup_with_cosine":
from ignite.contrib.handlers import LinearCyclicalScheduler, CosineAnnealingScheduler, ConcatScheduler
lr = self.hparams.lr
if self.hparams.run_params["epoch_length"]:
epoch_length = self.hparams.run_params["epoch_length"]
else:
epoch_length = len(self.train_loader)
num_epochs = self.hparams.run_params["max_epochs"]
scheduler_1 = LinearCyclicalScheduler(self.optimizer, "lr", start_value=lr*0.01, end_value=lr, cycle_size=epoch_length*2)
scheduler_2 = CosineAnnealingScheduler(self.optimizer, "lr", start_value=lr, end_value=lr*0.001, cycle_size=num_epochs*epoch_length)
durations = [epoch_length, ]
self.scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations)
def _init_logger(self):
if self.hparams.logger_name == "print":
from logger.print.print_logger import PrintLogger
self.logger = PrintLogger(**self.hparams.logger_params)
elif self.hparams.logger_name == "neptune":
from logger.neptune.neptune_logger import MyNeptuneLogger
self.logger = MyNeptuneLogger(**self.hparams.logger_params)
def _init_metrics(self):
from ignite.metrics import Loss, RunningAverage
self.train_metrics = {
'train_avg_loss': RunningAverage(output_transform=lambda x: x["loss"])
}
self.validation_metrics = {
'valid_avg_loss': RunningAverage(output_transform=lambda x: x["loss"])
}
if "f1score" in self.hparams.metrics:
from metrics.custom_f1score import CustomF1Score
self.validation_metrics["f1score"] = CustomF1Score(output_transform=lambda x: (x["y_pred"], x["y"]))
def _init_model(self):
if self.hparams.model_name == "dcase":
from models.classifier_dcase import Classifier_DCase
self.model = Classifier_DCase(self.hparams.num_classes)
def _init_augmentation(self):
if self.hparams.aug_name == "baseline":
from augmentations.base_augment import get_transforms
self.tfms = get_transforms()
def _init_train_datalader(self):
from dataloaders.audio_dataset import AudioDataset
self.train_ds = AudioDataset(**self.hparams.train_ds_params, transform=self.tfms["train"])
def _init_valid_dataloader(self):
from dataloaders.audio_dataset import AudioDataset
self.valid_ds = AudioDataset(**self.hparams.valid_ds_params, transform=self.tfms["valid"])
def _init_test_dataloader(self):
from dataloaders.audio_dataset import AudioDataset
self.test_ds = AudioDataset(**self.hparams.test_ds_params, transform=self.tfms["valid"])
| 43.848739 | 144 | 0.645458 |
4a24b2488da72c5f6be45ec9a836b8e9c0f0a3d4 | 2,938 | py | Python | configs/eftnet/R2_ttf53_beta03_3lr_log_1x.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | configs/eftnet/R2_ttf53_beta03_3lr_log_1x.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | configs/eftnet/R2_ttf53_beta03_3lr_log_1x.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='CenterNet',
pretrained='./pretrain/darknet53.pth',
backbone=dict(
type='DarknetV3',
layers=[1, 2, 8, 8, 4],
inplanes=[3, 32, 64, 128, 256, 512],
planes=[32, 64, 128, 256, 512, 1024],
norm_cfg=dict(type='BN'),
out_indices=(1, 2, 3, 4),
frozen_stages=1,
norm_eval=False),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(128, 256, 512, 1024),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_agnostic=True,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
hm_center_ratio=0.27,
center_ratio=0.3,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'ttf53_beta03_3lr_log_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 29.676768 | 86 | 0.640231 |
4a24b35fb5d7d03f8770e7abaf33216dbc8d6b45 | 13,218 | py | Python | musicbot/playlist.py | thisistoomuchwork/chris-music-bot-1.0 | e3364874fd4bca0d239c15422ed867ed9f45e229 | [
"MIT"
] | null | null | null | musicbot/playlist.py | thisistoomuchwork/chris-music-bot-1.0 | e3364874fd4bca0d239c15422ed867ed9f45e229 | [
"MIT"
] | null | null | null | musicbot/playlist.py | thisistoomuchwork/chris-music-bot-1.0 | e3364874fd4bca0d239c15422ed867ed9f45e229 | [
"MIT"
] | null | null | null | import os.path
import logging
import datetime
from random import shuffle
from itertools import islice
from collections import deque
from urllib.error import URLError
from youtube_dl.utils import ExtractorError, DownloadError, UnsupportedError
from .utils import get_header
from .constructs import Serializable
from .lib.event_emitter import EventEmitter
from .entry import URLPlaylistEntry, StreamPlaylistEntry
from .exceptions import ExtractionError, WrongEntryTypeError
log = logging.getLogger(__name__)
class Playlist(EventEmitter, Serializable):
"""
A playlist is manages the list of songs that will be played.
"""
def __init__(self, bot):
super().__init__()
self.bot = bot
self.loop = bot.loop
self.downloader = bot.downloader
self.entries = deque()
def __iter__(self):
return iter(self.entries)
def __len__(self):
return len(self.entries)
def shuffle(self):
shuffle(self.entries)
def clear(self):
self.entries.clear()
async def add_entry(self, song_url, **meta):
"""
Validates and adds a song_url to be played. This does not start the download of the song.
Returns the entry & the position it is in the queue.
:param song_url: The song url to add to the playlist.
:param meta: Any additional metadata to add to the playlist entry.
"""
try:
info = await self.downloader.extract_info(self.loop, song_url, download=False)
except Exception as e:
raise ExtractionError('Could not extract information from {}\n\n{}'.format(song_url, e))
if not info:
raise ExtractionError('Could not extract information from %s' % song_url)
# TODO: Sort out what happens next when this happens
if info.get('_type', None) == 'playlist':
raise WrongEntryTypeError("This is a playlist.", True, info.get('webpage_url', None) or info.get('url', None))
if info.get('is_live', False):
return await self.add_stream_entry(song_url, info=info, **meta)
# TODO: Extract this to its own function
if info['extractor'] in ['generic', 'Dropbox']:
try:
headers = await get_header(self.bot.aiosession, info['url'])
content_type = headers.get('CONTENT-TYPE')
log.debug("Got content type {}".format(content_type))
except Exception as e:
log.warning("Failed to get content type for url {} ({})".format(song_url, e))
content_type = None
if content_type:
if content_type.startswith(('application/', 'image/')):
if not any(x in content_type for x in ('/ogg', '/octet-stream')):
# How does a server say `application/ogg` what the actual fuck
raise ExtractionError("Invalid content type \"%s\" for url %s" % (content_type, song_url))
elif content_type.startswith('text/html'):
log.warning("Got text/html for content-type, this might be a stream. Attempting to stream.")
return await self.add_stream_entry(song_url, info=info, **meta) # TODO: Check for shoutcast/icecast
elif not content_type.startswith(('audio/', 'video/')):
log.warning("Questionable content-type \"{}\" for url {}".format(content_type, song_url))
entry = URLPlaylistEntry(
self,
song_url,
info.get('title', 'Untitled'),
info.get('duration', 0) or 0,
self.downloader.ytdl.prepare_filename(info),
**meta
)
self._add_entry(entry)
return entry, len(self.entries)
async def add_stream_entry(self, song_url, info=None, **meta):
if info is None:
info = {'title': song_url, 'extractor': None}
try:
info = await self.downloader.extract_info(self.loop, song_url, download=False)
except DownloadError as e:
if e.exc_info[0] == UnsupportedError: # ytdl doesn't like it but its probably a stream
log.debug("Assuming content is a direct stream")
elif e.exc_info[0] == URLError:
if os.path.exists(os.path.abspath(song_url)):
raise ExtractionError("This is not a stream, this is a file path.")
else: # it might be a file path that just doesn't exist
raise ExtractionError("Invalid input: {0.exc_info[0]}: {0.exc_info[1].reason}".format(e))
else:
# traceback.print_exc()
raise ExtractionError("Unknown error: {}".format(e))
except Exception as e:
log.error('Could not extract information from {} ({}), falling back to direct'.format(song_url, e), exc_info=True)
dest_url = song_url
if info.get('extractor'):
dest_url = info.get('url')
if info.get('extractor', None) == 'twitch:stream': # may need to add other twitch types
title = info.get('description')
else:
title = info.get('title', 'Untitled')
# TODO: A bit more validation, "~stream some_url" should not just say :ok_hand:
entry = StreamPlaylistEntry(
self,
song_url,
title,
destination = dest_url,
**meta
)
self._add_entry(entry)
return entry, len(self.entries)
async def import_from(self, playlist_url, **meta):
"""
Imports the songs from `playlist_url` and queues them to be played.
Returns a list of `entries` that have been enqueued.
:param playlist_url: The playlist url to be cut into individual urls and added to the playlist
:param meta: Any additional metadata to add to the playlist entry
"""
position = len(self.entries) + 1
entry_list = []
try:
info = await self.downloader.safe_extract_info(self.loop, playlist_url, download=False)
except Exception as e:
raise ExtractionError('Could not extract information from {}\n\n{}'.format(playlist_url, e))
if not info:
raise ExtractionError('Could not extract information from %s' % playlist_url)
# Once again, the generic extractor fucks things up.
if info.get('extractor', None) == 'generic':
url_field = 'url'
else:
url_field = 'webpage_url'
baditems = 0
for item in info['entries']:
if item:
try:
entry = URLPlaylistEntry(
self,
item[url_field],
item.get('title', 'Untitled'),
item.get('duration', 0) or 0,
self.downloader.ytdl.prepare_filename(item),
**meta
)
self._add_entry(entry)
entry_list.append(entry)
except Exception as e:
baditems += 1
log.warning("Could not add item", exc_info=e)
log.debug("Item: {}".format(item), exc_info=True)
else:
baditems += 1
if baditems:
log.info("Skipped {} bad entries".format(baditems))
return entry_list, position
async def async_process_youtube_playlist(self, playlist_url, **meta):
"""
Processes youtube playlists links from `playlist_url` in a questionable, async fashion.
:param playlist_url: The playlist url to be cut into individual urls and added to the playlist
:param meta: Any additional metadata to add to the playlist entry
"""
try:
info = await self.downloader.safe_extract_info(self.loop, playlist_url, download=False, process=False)
except Exception as e:
raise ExtractionError('Could not extract information from {}\n\n{}'.format(playlist_url, e))
if not info:
raise ExtractionError('Could not extract information from %s' % playlist_url)
gooditems = []
baditems = 0
for entry_data in info['entries']:
if entry_data:
baseurl = info['webpage_url'].split('playlist?list=')[0]
song_url = baseurl + 'watch?v=%s' % entry_data['id']
try:
entry, elen = await self.add_entry(song_url, **meta)
gooditems.append(entry)
except ExtractionError:
baditems += 1
except Exception as e:
baditems += 1
log.error("Error adding entry {}".format(entry_data['id']), exc_info=e)
else:
baditems += 1
if baditems:
log.info("Skipped {} bad entries".format(baditems))
return gooditems
async def async_process_sc_bc_playlist(self, playlist_url, **meta):
"""
Processes soundcloud set and bancdamp album links from `playlist_url` in a questionable, async fashion.
:param playlist_url: The playlist url to be cut into individual urls and added to the playlist
:param meta: Any additional metadata to add to the playlist entry
"""
try:
info = await self.downloader.safe_extract_info(self.loop, playlist_url, download=False, process=False)
except Exception as e:
raise ExtractionError('Could not extract information from {}\n\n{}'.format(playlist_url, e))
if not info:
raise ExtractionError('Could not extract information from %s' % playlist_url)
gooditems = []
baditems = 0
for entry_data in info['entries']:
if entry_data:
song_url = entry_data['url']
try:
entry, elen = await self.add_entry(song_url, **meta)
gooditems.append(entry)
except ExtractionError:
baditems += 1
except Exception as e:
baditems += 1
log.error("Error adding entry {}".format(entry_data['id']), exc_info=e)
else:
baditems += 1
if baditems:
log.info("Skipped {} bad entries".format(baditems))
return gooditems
def _add_entry(self, entry, *, head=False):
if head:
self.entries.appendleft(entry)
else:
self.entries.append(entry)
self.emit('entry-added', playlist=self, entry=entry)
if self.peek() is entry:
entry.get_ready_future()
async def get_next_entry(self, predownload_next=True):
"""
A coroutine which will return the next song or None if no songs left to play.
Additionally, if predownload_next is set to True, it will attempt to download the next
song to be played - so that it's ready by the time we get to it.
"""
if not self.entries:
return None
entry = self.entries.popleft()
if predownload_next:
next_entry = self.peek()
if next_entry:
next_entry.get_ready_future()
return await entry.get_ready_future()
def peek(self):
"""
Returns the next entry that should be scheduled to be played.
"""
if self.entries:
return self.entries[0]
async def estimate_time_until(self, position, player):
"""
(very) Roughly estimates the time till the queue will 'position'
"""
estimated_time = sum(e.duration for e in islice(self.entries, position - 1))
# When the player plays a song, it eats the first playlist item, so we just have to add the time back
if not player.is_stopped and player.current_entry:
estimated_time += player.current_entry.duration - player.progress
return datetime.timedelta(seconds=estimated_time)
def count_for_user(self, user):
return sum(1 for e in self.entries if e.meta.get('author', None) == user)
def __json__(self):
return self._enclose_json({
'entries': list(self.entries)
})
@classmethod
def _deserialize(cls, raw_json, bot=None):
assert bot is not None, cls._bad('bot')
# log.debug("Deserializing playlist")
pl = cls(bot)
for entry in raw_json['entries']:
pl.entries.append(entry)
# TODO: create a function to init downloading (since we don't do it here)?
return pl
| 37.02521 | 131 | 0.564306 |
4a24b4d7e3ae3acd7d049c6e1303f4516cf8105a | 2,725 | py | Python | weld/pandas_weld/tests/io/test_parsers.py | radujica/data-analysis-pipelines | 64a6e5613cb1ab2ba2eb2f763c2aa1e3bc5e0d3b | [
"MIT"
] | 5 | 2018-03-05T13:19:35.000Z | 2020-11-17T15:59:41.000Z | weld/pandas_weld/tests/io/test_parsers.py | radujica/data-analysis-pipelines | 64a6e5613cb1ab2ba2eb2f763c2aa1e3bc5e0d3b | [
"MIT"
] | 1 | 2021-06-01T22:27:44.000Z | 2021-06-01T22:27:44.000Z | weld/pandas_weld/tests/io/test_parsers.py | radujica/data-analysis-pipelines | 64a6e5613cb1ab2ba2eb2f763c2aa1e3bc5e0d3b | [
"MIT"
] | null | null | null | import unittest
from datetime import date
import numpy as np
import os
import pandas_weld as pdw
from pandas_weld.tests import test_equal_multiindex
class ParserTests(unittest.TestCase):
PATH_EXT = (os.path.dirname(__file__)) + '/sample_ext.nc'
def test_read_netcdf4(self):
data = {'tg': np.array([-99.99, 10., 10.099999, -99.99, -99.99, 10.2, -99.99, -99.99, -99.99, 10.3, 10.4, 10.5,
10.599999, 10.7, 10.8, 10.9, -99.99, -99.99, -99.99, -99.99, 11., 11., 11., 11.,
-99.99, -99.99, -99.99, -99.99, 12., 13.],
dtype=np.float32),
'tg_ext': np.array([-9999, 1000., 1010., -9999, -9999, 1020., -9999, -9999, -9999, 1030., 10401.,
10502., 10603., 10704., 10805., 10906., -9999, -9999, -9999, -9999, 11001.,
11002., 11003., 11004., -9999, -9999, -9999, -9999, 12005., 13006.],
dtype=np.float32)}
index = pdw.MultiIndex.from_product([np.array([25.5, 26.], dtype=np.float32),
np.array([10., 11., 12.], dtype=np.float32),
np.array([str(date(1950, 1, 1)), str(date(1950, 1, 2)),
str(date(1950, 1, 3)), str(date(1950, 1, 4)),
str(date(1950, 1, 5))])],
['longitude', 'latitude', 'time'])
expected_result = pdw.DataFrame(data, index)
result = pdw.read_netcdf4(ParserTests.PATH_EXT)
self.assertListEqual(expected_result.data.keys(), result.data.keys())
np.testing.assert_array_equal(expected_result.data['tg'], result.data['tg'].evaluate(verbose=False))
np.testing.assert_array_equal(expected_result.data['tg_ext'], result.data['tg_ext'].evaluate(verbose=False))
test_equal_multiindex(expected_result.index, result.index)
# TODO
def test_read_csv(self):
pass
def test_netcdf4_lazy_eager(self):
result_lazy = pdw.read_netcdf4(ParserTests.PATH_EXT)
result_eager = pdw.read_netcdf4_eager(ParserTests.PATH_EXT)
self.assertListEqual(result_lazy.data.keys(), result_eager.data.keys())
np.testing.assert_array_equal(result_lazy.data['tg'].evaluate(), result_eager.data['tg'].evaluate())
np.testing.assert_array_equal(result_lazy.data['tg_ext'].evaluate(), result_eager.data['tg_ext'].evaluate())
test_equal_multiindex(result_lazy.index, result_eager.index)
def main():
unittest.main()
if __name__ == '__main__':
main()
| 45.416667 | 119 | 0.557064 |
4a24b4f390edc1470f26537cd35a232adccc5132 | 1,437 | py | Python | frappe/contacts/doctype/address_template/test_address_template.py | erpnext-tm/frappe | 7b470f28e1cf00b0659c01e06a2d0a4693b28d98 | [
"MIT"
] | null | null | null | frappe/contacts/doctype/address_template/test_address_template.py | erpnext-tm/frappe | 7b470f28e1cf00b0659c01e06a2d0a4693b28d98 | [
"MIT"
] | null | null | null | frappe/contacts/doctype/address_template/test_address_template.py | erpnext-tm/frappe | 7b470f28e1cf00b0659c01e06a2d0a4693b28d98 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
import frappe
class TestAddressTemplate(unittest.TestCase):
def setUp(self):
self.make_default_address_template()
def test_default_is_unset(self):
a = frappe.get_doc("Address Template", "India")
a.is_default = 1
a.save()
b = frappe.get_doc("Address Template", "Brazil")
b.is_default = 1
b.save()
self.assertEqual(frappe.db.get_value("Address Template", "India", "is_default"), 0)
def tearDown(self):
a = frappe.get_doc("Address Template", "India")
a.is_default = 1
a.save()
@classmethod
def make_default_address_template(self):
template = """{{ address_line1 }}<br>{% if address_line2 %}{{ address_line2 }}<br>{% endif -%}{{ city }}<br>{% if state %}{{ state }}<br>{% endif -%}{% if pincode %}{{ pincode }}<br>{% endif -%}{{ country }}<br>{% if phone %}Phone: {{ phone }}<br>{% endif -%}{% if fax %}Fax: {{ fax }}<br>{% endif -%}{% if email_id %}Email: {{ email_id }}<br>{% endif -%}"""
if not frappe.db.exists("Address Template", "India"):
frappe.get_doc(
{"doctype": "Address Template", "country": "India", "is_default": 1, "template": template}
).insert()
if not frappe.db.exists("Address Template", "Brazil"):
frappe.get_doc(
{"doctype": "Address Template", "country": "Brazil", "template": template}
).insert()
| 32.659091 | 360 | 0.654141 |
4a24b5928a682fe8424bd335832b341648aa97fe | 3,773 | py | Python | amath586/hw4/heat_CN_FWE.py | interesting-courses/UW_coursework | 987e336e70482622c5d03428b5532349483f87f4 | [
"MIT"
] | 2 | 2020-08-19T01:59:25.000Z | 2021-12-31T12:32:59.000Z | amath586/hw4/heat_CN_FWE.py | interesting-courses/UW_coursework | 987e336e70482622c5d03428b5532349483f87f4 | [
"MIT"
] | null | null | null | amath586/hw4/heat_CN_FWE.py | interesting-courses/UW_coursework | 987e336e70482622c5d03428b5532349483f87f4 | [
"MIT"
] | 3 | 2021-03-31T22:23:46.000Z | 2022-01-29T22:13:01.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 21:11:44 2018
@author: tyler
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
from scipy.sparse import linalg
#<start>
def heat_CN_FWE(m):
#
# heat_CN.py
#
# Solve u_t = kappa * u_{xx} on [ax,bx] with Dirichlet boundary conditions,
# using the Crank-Nicolson method with m interior points.
#
# Returns k, h, and the max-norm of the error.
# This routine can be embedded in a loop on m to test the accuracy,
# perhaps with calls to error_table and/or error_loglog.
#
# Original MATLAB code from http://www.amath.washington.edu/~rjl/fdmbook/ (2007)
# Ported to Python by Tyler Chen (2018)
plt.figure() # clear graphics
# Put all plots on the same graph (comment out if desired)
ax = 0;
bx = 1;
kappa = .02; # heat conduction coefficient:
tfinal = 1; # final time
h = (bx-ax)/(m+1); # h = delta x
x = np.linspace(ax,bx,m+2); # note x(1)=0 and x(m+2)=1
# u(1)=g0 and u(m+2)=g1 are known from BC's
k = 24*h**2; # time step
nsteps = round(tfinal / k); # number of time steps
#nplot = 1; # plot solution every nplot time steps
# (set nplot=2 to plot every 2 time steps, etc.)
nplot = nsteps; # only plot at final time
if abs(k*nsteps - tfinal) > 1e-5:
# The last step won't go exactly to tfinal.
print(' ')
print('WARNING *** k does not divide tfinal, k = %1.5f' % k)
print(' ')
# true solution for comparison:
# For Gaussian initial conditions u(x,0) = exp(-beta * (x-0.4)^2)
beta = 150;
utrue = lambda x,t: np.exp(-(x-0.4)**2 / (4*kappa*t + 1/beta)) / np.sqrt(4*beta*kappa*t+1);
# initial conditions:
u0 = utrue(x,0);
# Each time step we solve MOL system U' = AU + g using the TRBDF2
# set up matrices:
r = kappa * k/(h**2);
e = np.ones(m);
A = sparse.spdiags([e,-2*e,e],[-1,0,1],m,m)
A1_ = sparse.eye(m) + (r / 4) * A;
A2_ = sparse.eye(m) - (r / 4) * A;
A2 = sparse.eye(m) - (r / 3) * A;
# initial data on fine grid for plotting:
xfine = np.linspace(ax,bx,1001);
ufine = utrue(xfine,0);
# initialize u and plot:
tn = 0;
u = u0;
plt.plot(x,u,'b.-', xfine,ufine,'r')
plt.legend(['computed','true'])
plt.title('Initial data at time = 0')
# main time-stepping loop:
for n in range(nsteps):
tnp = tn + k; # = t_{n+1}
# boundary values u(0,t) and u(1,t) at times tn and tnp:
g0n = u[0];
g1n = u[m+1];
g0np = utrue(ax,tnp);
g1np = utrue(bx,tnp);
# compute right hand side for intermediate linear system:
uint = u[1:-1]; # interior points (unknowns)
rhs = r*A @ uint;
# fix-up right hand side using BC's (i.e. add vector g to A2*uint)
rhs[0] += r * g0n;
rhs[m-1] += r * g1n;
uint += rhs
# augment with boundary values:
u = np.concatenate([[g0np], uint, [g1np]]);
# plot results at desired times:
if (n+1)%nplot==0 or (n+1)==nsteps:
print(n)
ufine = utrue(xfine,tnp);
plt.plot(x,u,'b.-', xfine,ufine,'r')
plt.title('t = %1.5f after %i time steps with %i grid points' % (tnp,n+1,m+2))
error = max(abs(u-utrue(x,tnp)));
print('at time t = %.5f max error = %.5f'%(tnp,error))
if (n+1)<nsteps: input('Hit <return> to continue ')
tn = tnp; # for next time step
plt.show()
return k,h,error
#<end> | 31.441667 | 95 | 0.530612 |
4a24b69e45f2fea5c11ed05dc4271bc0a3c5e55e | 23,784 | py | Python | emat/database/sqlite/sql_queries.py | jinsanity07git/tmip-emat | ff816cf50f141825078bb276d6da46d92c5028a9 | [
"BSD-3-Clause"
] | 13 | 2019-03-26T13:27:43.000Z | 2022-02-02T18:30:36.000Z | emat/database/sqlite/sql_queries.py | jinsanity07git/tmip-emat | ff816cf50f141825078bb276d6da46d92c5028a9 | [
"BSD-3-Clause"
] | 19 | 2019-04-24T20:58:10.000Z | 2020-09-11T22:31:06.000Z | emat/database/sqlite/sql_queries.py | jinsanity07git/tmip-emat | ff816cf50f141825078bb276d6da46d92c5028a9 | [
"BSD-3-Clause"
] | 17 | 2019-02-19T16:13:52.000Z | 2022-02-14T20:50:36.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 09:41:27 2018
@author: mmilkovits
"""
CONDITIONAL_INSERT_XL = (
'''INSERT OR IGNORE INTO ema_parameter( name, ptype )
VALUES(?1, CASE WHEN ?2 LIKE '%uncertainty%' THEN 1 WHEN ?2 LIKE '%constant%' THEN 2 ELSE 0 END)
'''
)
CONDITIONAL_INSERT_M = (
'''INSERT OR IGNORE INTO ema_measure( name, transform )
VALUES(?,?)
'''
)
INSERT_SCOPE = (
'''INSERT INTO ema_scope( name, sheet, content )
VALUES(?1, ?2, ?3)
'''
)
UPDATE_SCOPE_CONTENT = '''
UPDATE
ema_scope
SET
content = @scope_pickle
WHERE
name = @scope_name
'''
GET_SCOPE = (
'''SELECT content FROM ema_scope WHERE name = ?'''
)
DELETE_SCOPE = (
''' DELETE from ema_scope WHERE name = ?
'''
)
INSERT_SCOPE_XL = '''
INSERT INTO
ema_scope_parameter( scope_id, parameter_id )
SELECT
ema_scope.scope_id,
ema_parameter.parameter_id
FROM
ema_scope
JOIN ema_parameter
WHERE
ema_scope.name = ?
AND ema_parameter.name = ?
'''
INSERT_SCOPE_M = (
'''INSERT INTO ema_scope_measure( scope_id, measure_id )
SELECT ema_scope.scope_id, ema_measure.measure_id FROM
ema_scope JOIN ema_measure
WHERE ema_scope.name = ? AND ema_measure.name = ?
'''
)
GET_SCOPE_XL = (
'''SELECT ema_parameter.name
FROM ema_parameter JOIN ema_scope_parameter sv ON (ema_parameter.parameter_id = sv.parameter_id)
JOIN ema_scope s ON (sv.scope_id = s.scope_id)
WHERE s.name = ?
'''
)
GET_SCOPE_X = '''
SELECT
ema_parameter.name
FROM
ema_parameter
JOIN ema_scope_parameter sv
ON (ema_parameter.parameter_id = sv.parameter_id)
JOIN ema_scope s
ON (sv.scope_id = s.scope_id)
WHERE
s.name = ?
AND ema_parameter.ptype = 1
'''
GET_SCOPE_L = (
'''SELECT ema_parameter.name
FROM ema_parameter JOIN ema_scope_parameter sv ON (ema_parameter.parameter_id = sv.parameter_id)
JOIN ema_scope s ON (sv.scope_id = s.scope_id)
WHERE s.name = ?
AND ema_parameter.ptype = 0
'''
)
GET_SCOPE_C = (
'''SELECT ema_parameter.name
FROM ema_parameter JOIN ema_scope_parameter sv ON (ema_parameter.parameter_id = sv.parameter_id)
JOIN ema_scope s ON (sv.scope_id = s.scope_id)
WHERE s.name = ?
AND ema_parameter.ptype = 2
'''
)
GET_SCOPE_M = (
'''SELECT ema_measure.name
FROM ema_measure JOIN ema_scope_measure sp ON (ema_measure.measure_id = sp.measure_id)
JOIN ema_scope s ON (sp.scope_id = s.scope_id)
WHERE s.name = ?
'''
)
INSERT_EX = (
'''INSERT INTO ema_experiment ( scope_id, design )
SELECT ema_scope.scope_id, ?
FROM ema_scope WHERE ema_scope.name = ?
'''
)
INSERT_DESIGN = '''
INSERT OR IGNORE INTO ema_design (scope_id, design)
SELECT ema_scope.scope_id, ?2
FROM ema_scope WHERE ema_scope.name = ?1
'''
INSERT_EXPERIMENT = '''
INSERT INTO ema_experiment ( scope_id )
SELECT ema_scope.scope_id
FROM ema_scope WHERE ema_scope.name = ?
'''
INSERT_EXPERIMENT_WITH_ID = '''
INSERT INTO ema_experiment ( experiment_id, scope_id )
SELECT ?2, ema_scope.scope_id
FROM ema_scope WHERE ema_scope.name = ?1
'''
INSERT_DESIGN_EXPERIMENT = '''
INSERT OR IGNORE INTO ema_design_experiment (experiment_id, design_id)
SELECT ?3, d.design_id
FROM ema_design d
JOIN ema_scope s ON (d.scope_id = s.scope_id)
WHERE d.design = ?2
AND s.name = ?1
'''
NEW_EXPERIMENT_RUN = '''
INSERT INTO
ema_experiment_run (
run_id,
experiment_id,
run_status,
run_valid,
run_location,
run_source )
VALUES (
@run_id,
@experiment_id,
'init',
1,
@run_location,
@run_source )
'''
DELETE_DESIGN_EXPERIMENTS = '''
DELETE FROM ema_design_experiment
WHERE ema_design_experiment.design_id IN (
SELECT ema_design.design_id
FROM ema_design
JOIN ema_scope s ON (ema_design.scope_id = s.scope_id)
WHERE s.name = ? AND ema_design.design = ?
)
'''
DELETE_LOOSE_EXPERIMENTS = '''
DELETE FROM ema_experiment
WHERE ema_experiment.experiment_id NOT IN (
SELECT edd.experiment_id
FROM ema_design_experiment edd
JOIN ema_design ed ON (ed.design_id = edd.design_id)
JOIN ema_scope s ON (ed.scope_id = s.scope_id)
WHERE s.name = ?
)
'''
DELETE_MEASURES_BY_EXPERIMENT_ID = '''
DELETE FROM main.ema_experiment_measure
WHERE ema_experiment_measure.experiment_id IN (?)
'''
DELETE_RUN_ID = '''
DELETE FROM ema_experiment_run
WHERE ema_experiment_run.run_id = @run_id
'''
INVALIDATE_RUN_ID = '''
UPDATE
ema_experiment_run
SET
run_valid = 0
WHERE
ema_experiment_run.run_id = @run_id
AND run_valid != 0
'''
INSERT_EX_XL = (
'''INSERT INTO ema_experiment_parameter( experiment_id, parameter_id, parameter_value )
SELECT ?, ema_parameter.parameter_id, ? FROM
ema_parameter WHERE ema_parameter.name = ?
'''
)
GET_EXPERIMENT_PARAMETERS = '''
SELECT
eep.experiment_id, ep.name, parameter_value
FROM
ema_experiment_parameter eep
JOIN ema_parameter ep
ON eep.parameter_id = ep.parameter_id -- convert parameter_id to name
JOIN ema_experiment ee
ON eep.experiment_id = ee.experiment_id -- connect to experiment table to allow filtering
JOIN ema_scope s
ON ee.scope_id = s.scope_id -- connect to scope table, filter on matching scope
JOIN ema_design_experiment ede
ON ee.experiment_id = ede.experiment_id -- make design_id available
JOIN ema_design ed
ON (s.scope_id = ed.scope_id AND ede.design_id = ed.design_id)
WHERE
s.name = @scope_name
AND ed.design = @design_name
'''
GET_EXPERIMENT_IDS_BY_VALUE = '''
SELECT
eep.experiment_id
FROM
ema_experiment_parameter eep
JOIN ema_parameter ep
ON eep.parameter_id = ep.parameter_id
JOIN ema_experiment ee
ON eep.experiment_id = ee.experiment_id
JOIN ema_scope s
ON ee.scope_id = s.scope_id
WHERE
s.name =?1
AND ep.name = ?2
AND parameter_value = ?3;
'''
GET_EXPERIMENT_IDS_BY_DESIGN_AND_VALUE = '''
SELECT
eep.experiment_id
FROM
ema_experiment_parameter eep
JOIN ema_parameter ep
ON eep.parameter_id = ep.parameter_id
JOIN ema_experiment ee
ON eep.experiment_id = ee.experiment_id
JOIN ema_scope s
ON ee.scope_id = s.scope_id
WHERE
s.name =?1
AND ee.design = ?2
AND ep.name = ?3
AND parameter_value = ?4;
'''
GET_EX_XL_ALL = '''
SELECT
eep.experiment_id,
ep.name,
parameter_value
FROM
ema_experiment_parameter eep
JOIN ema_parameter ep
ON eep.parameter_id = ep.parameter_id
JOIN ema_experiment ee
ON eep.experiment_id = ee.experiment_id
JOIN ema_scope s
ON ee.scope_id = s.scope_id
WHERE
s.name = @scope_name;
'''
GET_EX_XL_IDS_IN = '''
SELECT
eep.experiment_id,
ep.name,
parameter_value
FROM ema_experiment_parameter eep
JOIN ema_parameter ep
ON eep.parameter_id = ep.parameter_id
JOIN ema_experiment ee
ON eep.experiment_id = ee.experiment_id
JOIN ema_scope s
ON ee.scope_id = s.scope_id
WHERE
s.name =?1
AND eep.experiment_id in (???);
'''
INSERT_EX_M = '''
REPLACE INTO ema_experiment_measure (
experiment_id,
measure_id,
measure_value,
measure_run )
SELECT
@experiment_id,
ema_measure.measure_id,
@measure_value,
eer.run_rowid
FROM
ema_measure
JOIN ema_experiment_run eer
ON eer.run_id = @measure_run
WHERE ema_measure.name = @measure_name
'''
_DEBUG_INSERT_EX_M = '''
SELECT
@experiment_id,
ema_measure.measure_id,
@measure_value,
eer.run_rowid
FROM
ema_measure
LEFT JOIN ema_experiment_run eer
ON eer.run_id = @measure_run
WHERE ema_measure.name = @measure_name
'''
GET_EXPERIMENT_PARAMETERS_AND_MEASURES = '''
SELECT eep.experiment_id, ep.name, parameter_value
FROM ema_parameter ep
JOIN ema_experiment_parameter eep on eep.parameter_id = ep.parameter_id
JOIN ema_experiment ee ON eep.experiment_id = ee.experiment_id
JOIN ema_scope s on ee.scope_id = s.scope_id
JOIN ema_design_experiment ede ON ee.experiment_id = ede.experiment_id
JOIN ema_design ed ON (s.scope_id = ed.scope_id AND ed.design_id = ede.design_id)
WHERE s.name =?1 and ed.design = ?2
UNION
SELECT eem.experiment_id, ema_measure.name, measure_value
FROM ema_experiment_measure eem JOIN ema_measure on eem.measure_id = ema_measure.measure_id
JOIN ema_experiment ee ON eem.experiment_id = ee.experiment_id
JOIN ema_scope es on ee.scope_id = es.scope_id
JOIN ema_design_experiment ede ON ee.experiment_id = ede.experiment_id
JOIN ema_design ed ON (es.scope_id = ed.scope_id AND ed.design_id = ede.design_id)
WHERE es.name =?1 and ed.design = ?2
/*source*/
'''
GET_EXPERIMENT_PARAMETERS_AND_MEASURES_BYSOURCE = GET_EXPERIMENT_PARAMETERS_AND_MEASURES.replace(
'/*source*/',
' AND eem.measure_source =?3'
)
GET_EXPERIMENT_MEASURES_MASTER = '''
SELECT DISTINCT
eem.experiment_id, --index_type
runs.run_id,
ema_measure.name,
measure_value,
runs.run_source,
runs.run_rowid,
runs.experiment_id as run_ex_id
FROM
ema_experiment_measure eem
JOIN ema_measure
ON eem.measure_id = ema_measure.measure_id
JOIN ema_experiment ee
ON eem.experiment_id = ee.experiment_id
JOIN ema_scope es
ON ee.scope_id = es.scope_id
JOIN ema_design_experiment ede
ON ee.experiment_id = ede.experiment_id
JOIN ema_design ed
ON (es.scope_id = ed.scope_id AND ed.design_id = ede.design_id)
JOIN /* most recent valid run with results matching target source */ (
SELECT
*,
max(run_timestamp)
FROM
ema_experiment_run
WHERE
(
run_rowid IN (
SELECT DISTINCT measure_run
FROM ema_experiment_measure eem3
WHERE eem3.measure_value IS NOT NULL
)
)
AND run_valid = 1
AND run_source = @measure_source
GROUP BY
experiment_id, run_source
) /* end most recent */ runs
ON runs.run_rowid = eem.measure_run
WHERE
es.name = @scope_name
AND ed.design = @design_name
AND eem.experiment_id = @experiment_id
AND measure_value IS NOT NULL
AND run_source = @measure_source
AND run_valid = 1
'''
GET_EX_XLM_ALL = (
'''
SELECT
eep.experiment_id, ep.name, parameter_value
FROM
ema_parameter ep
JOIN ema_experiment_parameter eep
ON eep.parameter_id = ep.parameter_id
JOIN ema_experiment ee
ON eep.experiment_id = ee.experiment_id
JOIN ema_scope s
ON ee.scope_id = s.scope_id
WHERE
s.name =?1
UNION
SELECT
eem.experiment_id, em.name, measure_value
FROM
ema_experiment_measure eem
JOIN ema_measure em
ON eem.measure_id = em.measure_id
JOIN ema_experiment ee
ON eem.experiment_id = ee.experiment_id
JOIN ema_scope s
ON ee.scope_id = s.scope_id
WHERE
s.name =?1
'''
)
GET_EX_XLM_ALL_BYSOURCE = GET_EX_XLM_ALL + ' AND ema_experiment_measure.measure_source =?2'
GET_EXPERIMENT_MEASURE_SOURCES = '''
SELECT DISTINCT
eer.run_source
FROM
ema_experiment_measure eem
JOIN ema_measure em
ON eem.measure_id = em.measure_id
JOIN ema_experiment ee
ON eem.experiment_id = ee.experiment_id
JOIN ema_scope es
ON ee.scope_id = es.scope_id
JOIN ema_experiment_run eer
ON eem.measure_run = eer.run_rowid
/*by-design-join*/
WHERE
es.name = @scope_name
AND measure_value IS NOT NULL
/*by-design-where*/
'''
GET_EXPERIMENT_MEASURE_SOURCES_BY_DESIGN = GET_EXPERIMENT_MEASURE_SOURCES.replace("/*by-design-join*/", '''
JOIN ema_design_experiment ede
ON ee.experiment_id = ede.experiment_id
JOIN ema_design ed
ON (es.scope_id = ed.scope_id AND ed.design_id = ede.design_id)
''').replace("/*by-design-where*/", '''
AND ed.design = @design_name
''')
CREATE_META_MODEL = (
'''
INSERT INTO meta_model(scope_id, measure_id, lr_r2, gpr_cv, rmse)
SELECT s.scope_id, ema_measure.measure_id, ?, ?, ? FROM
ema_scope s JOIN ema_measure
WHERE s.name = ? AND ema_measure.name = ?
'''
)
GET_META_MODEL = (
'''
SELECT lr_r2, gpr_cv, rmse
FROM meta_model mm JOIN ema_scope s ON mm.scope_id = s.scope_id
JOIN ema_measure ON mm.measure_id = ema_measure.measure_id
WHERE s.name = ? AND ema_measure.name = ?
'''
)
UPDATE_META_MODEL = (
'''
UPDATE meta_model
SET lr_r2 = ?, gpr_cv = ?, rmse = ?
WHERE EXISTS
(SELECT * FROM meta_model mm
JOIN ema_scope s ON mm.scope_id = s.scope_id
JOIN ema_measure ON mm.measure_id = ema_measure.measure_id
WHERE s.name = ? AND ema_measure.name = ?)
'''
)
ADD_MM_COEFF = (
'''
INSERT OR REPLACE INTO meta_model_param( scope_id, measure_id, parameter_id, est, std_error, pvalue )
SELECT s.scope_id, ema_measure.measure_id, ema_parameter.parameter_id, ?, ?, ? FROM
ema_scope s JOIN ema_measure JOIN ema_parameter
WHERE s.name = ? AND ema_measure.name = ? AND ema_parameter.name = ?
'''
)
GET_MM_COEFF = (
'''SELECT ema_parameter.name, est, std_error, pvalue
FROM meta_model_param mmp JOIN meta_model mm
ON (mmp.scope_id = mm.scope_id AND mmp.measure_id = mm.measure_id)
JOIN ema_scope s ON mm.scope_id = s.scope_id
JOIN ema_measure ON mm.measure_id = ema_measure.measure_id
JOIN ema_parameter ON mmp.parameter_id = ema_parameter.parameter_id
WHERE s.name = ? AND ema_measure.name = ?
'''
)
GET_SCOPE_NAMES = (
'''SELECT name
FROM ema_scope
ORDER BY name;
'''
)
GET_SCOPES_CONTAINING_DESIGN_NAME = (
'''SELECT DISTINCT s.name
FROM ema_design
JOIN ema_scope s on ema_design.scope_id = s.scope_id
WHERE ema_design.design =?
ORDER BY s.name;
'''
)
GET_DESIGN_NAMES = '''
SELECT DISTINCT ema_design.design
FROM ema_design
JOIN ema_scope s on ema_design.scope_id = s.scope_id
WHERE s.name =?;
'''
GET_EXPERIMENT_IDS_IN_DESIGN = (
'''
SELECT ema_experiment.experiment_id
FROM ema_experiment
JOIN ema_scope s ON ema_experiment.scope_id = s.scope_id
JOIN ema_design_experiment de ON ema_experiment.experiment_id = de.experiment_id
JOIN ema_design d ON de.design_id = d.design_id
WHERE s.name =?1
AND d.design = ?2;
'''
)
GET_EXPERIMENT_IDS_ALL = (
'''
SELECT ema_experiment.experiment_id
FROM ema_experiment
JOIN ema_scope s ON ema_experiment.scope_id = s.scope_id
WHERE s.name =?1;
'''
)
INSERT_METAMODEL_PICKLE = (
'''INSERT OR REPLACE INTO meta_model_pickles ( scope_id, metamodel_id, name, pickled_mm )
SELECT ema_scope.scope_id, ?2, ?3, ?4
FROM ema_scope WHERE ema_scope.name = ?1
'''
)
GET_METAMODEL_PICKLE = (
'''
SELECT meta_model_pickles.name, meta_model_pickles.pickled_mm
FROM meta_model_pickles
JOIN ema_scope s ON meta_model_pickles.scope_id = s.scope_id
WHERE s.name =?1 AND meta_model_pickles.metamodel_id =?2;
'''
)
GET_METAMODEL_IDS = (
'''
SELECT meta_model_pickles.metamodel_id
FROM meta_model_pickles
JOIN ema_scope s ON meta_model_pickles.scope_id = s.scope_id
WHERE s.name =?1 AND meta_model_pickles.pickled_mm NOT NULL ;
'''
)
GET_NEW_METAMODEL_ID = (
# '''
# SELECT MAX(IFNULL(MAX(meta_model_pickles.metamodel_id), 0), IFNULL(MAX(meta_model_pickles.rowid), 0))+1
# FROM meta_model_pickles;
# '''
'''
SELECT IFNULL(MAX(meta_model_pickles.metamodel_id), 0)+1
FROM meta_model_pickles;
'''
)
GET_BOX_THRESHOLDS = (
'''
SELECT
ema_parameter.name,
threshold_value,
threshold_type
FROM
ema_box_parameter
JOIN ema_scope_box
ON ema_scope_box.box_id = ema_box_parameter.box_id
JOIN ema_parameter
ON ema_parameter.parameter_id = ema_box_parameter.parameter_id
JOIN ema_scope_parameter
ON ema_scope_parameter.parameter_id = ema_box_parameter.parameter_id
JOIN ema_scope
ON ema_scope.scope_id = ema_scope_parameter.scope_id
WHERE
ema_scope.name = ?1
AND
ema_scope_box.box_name = ?2
UNION ALL
SELECT
ema_measure.name,
threshold_value,
threshold_type
FROM
ema_box_measure
JOIN ema_scope_box
ON ema_scope_box.box_id = ema_box_measure.box_id
JOIN ema_measure
ON ema_measure.measure_id = ema_box_measure.measure_id
JOIN ema_scope_measure
ON ema_scope_measure.measure_id = ema_box_measure.measure_id
JOIN ema_scope
ON ema_scope.scope_id = ema_scope_measure.scope_id
WHERE
ema_scope.name = ?1
AND
ema_scope_box.box_name = ?2
'''
)
INSERT_BOX = (
"""
INSERT OR REPLACE INTO ema_scope_box (parent_box_id, scope_id, box_name)
SELECT null, ema_scope.scope_id, ?2
FROM ema_scope
WHERE ema_scope.name = ?1
"""
)
INSERT_SUBBOX = (
"""
INSERT OR REPLACE INTO ema_scope_box (parent_box_id, scope_id, box_name)
SELECT parent.box_id, ema_scope.scope_id, ?2
FROM ema_scope
JOIN ema_scope_box parent
ON parent.scope_id = ema_scope.scope_id AND parent.box_name = ?3
WHERE ema_scope.name = ?1
"""
)
GET_BOX_NAMES = (
"""
SELECT DISTINCT
ema_scope_box.box_name
FROM
ema_scope_box
JOIN ema_scope
ON ema_scope.scope_id = ema_scope_box.scope_id
WHERE
ema_scope.name = ?1
"""
)
GET_BOX_PARENT_NAMES = (
"""
SELECT
child.box_name, parent.box_name
FROM
ema_scope_box child
JOIN ema_scope
ON ema_scope.scope_id = child.scope_id
JOIN ema_scope_box parent
ON parent.box_id = child.parent_box_id
WHERE
ema_scope.name = ?1
"""
)
GET_BOX_PARENT_NAME = (
"""
SELECT
parent.box_name
FROM
ema_scope_box child
JOIN ema_scope
ON ema_scope.scope_id = child.scope_id
JOIN ema_scope_box parent
ON parent.box_id = child.parent_box_id
WHERE
ema_scope.name = ?1
AND child.box_name = ?2
"""
)
CLEAR_BOX_THRESHOLD_P = (
'''
DELETE FROM ema_box_parameter
WHERE EXISTS (
SELECT
*
FROM
ema_box_parameter
JOIN ema_scope_box
ON ema_scope_box.box_id = ema_box_parameter.box_id
JOIN ema_parameter
ON ema_parameter.parameter_id = ema_box_parameter.parameter_id
JOIN ema_scope_parameter
ON ema_scope_parameter.parameter_id = ema_box_parameter.parameter_id
JOIN ema_scope
ON ema_scope.scope_id = ema_scope_parameter.scope_id
WHERE
ema_scope.name = ?1
AND
ema_scope_box.box_name = ?2
AND
ema_parameter.name = ?3
);
'''
)
CLEAR_BOX_THRESHOLD_M = (
'''
DELETE FROM ema_box_measure
WHERE EXISTS (
SELECT
*
FROM
ema_box_measure
JOIN ema_scope_box
ON ema_scope_box.box_id = ema_box_measure.box_id
JOIN ema_measure
ON ema_measure.measure_id = ema_box_measure.measure_id
JOIN ema_scope_measure
ON ema_scope_measure.measure_id = ema_box_measure.measure_id
JOIN ema_scope
ON ema_scope.scope_id = ema_scope_measure.scope_id
WHERE
ema_scope.name = ?1
AND
ema_scope_box.box_name = ?2
AND
ema_measure.name = ?3
);
'''
)
SET_BOX_THRESHOLD_P = (
'''
INSERT OR REPLACE INTO ema_box_parameter (
box_id,
parameter_id,
threshold_value,
threshold_type
)
SELECT
ema_scope_box.box_id,
ema_parameter.parameter_id,
?4,
?5
FROM
ema_scope_box
JOIN ema_parameter
JOIN ema_scope
ON ema_scope.scope_id = ema_scope_box.scope_id
WHERE ema_scope.name = ?1
AND ema_scope_box.box_name = ?2
AND ema_parameter.name = ?3
'''
)
SET_BOX_THRESHOLD_M = (
'''
INSERT OR REPLACE INTO ema_box_measure (
box_id,
measure_id,
threshold_value,
threshold_type
)
SELECT
ema_scope_box.box_id,
ema_measure.measure_id,
?4,
?5
FROM
ema_scope_box
JOIN ema_measure
JOIN ema_scope
ON ema_scope.scope_id = ema_scope_box.scope_id
WHERE ema_scope.name = ?1
AND ema_scope_box.box_name = ?2
AND ema_measure.name = ?3
'''
)
UPDATE_DATABASE_ema_design_experiment = (
"PRAGMA foreign_keys = OFF",
'''
INSERT OR IGNORE INTO ema_design ( scope_id, design )
SELECT DISTINCT scope_id, design FROM ema_experiment;
''',
'''
INSERT OR IGNORE INTO ema_design_experiment ( experiment_id, design_id )
SELECT ema_experiment.experiment_id, ema_design.design_id
FROM ema_experiment
JOIN ema_design ON ema_design.design = ema_experiment.design;
''',
)
UPDATE_DATABASE_ema_experiment_measure_ADD_measure_run = (
'''
ALTER TABLE ema_experiment_measure
ADD COLUMN measure_run UUID;
''',
)
UPDATE_DATABASE_ema_experiment_run_ADD_run_source = (
'''
ALTER TABLE ema_experiment_run
ADD COLUMN run_source INT NOT NULL DEFAULT 0;
''',
)
from ... import __version__
import numpy as np
__version_as_int__ = np.asarray([
int(i)
for i in __version__.replace("a",'').replace("b",'').split(".")
]) @ np.asarray([1000000,1000,1])
SET_VERSION_DATABASE = f'''
INSERT OR IGNORE INTO ema_tool_info VALUES ('version', {__version_as_int__});
'''
SET_MINIMUM_VERSION_DATABASE = f'''
INSERT OR IGNORE INTO ema_tool_info VALUES ('minimum_version', 4000); -- 0.4.0
'''
GET_VERSION_DATABASE = f'''
SELECT val FROM ema_tool_info WHERE tag='version'
'''
GET_MINIMUM_VERSION_DATABASE = f'''
SELECT val FROM ema_tool_info WHERE tag='minimum_version'
'''
| 26.574302 | 119 | 0.623949 |
4a24b70bd0e8e9961b5ea64afa2ef5157ac1b89e | 3,085 | py | Python | SimpleEnc/components/Decryptor.py | momoji123/Tools | 9b1a026e3346f4c26291018587409e86973925c6 | [
"MIT"
] | null | null | null | SimpleEnc/components/Decryptor.py | momoji123/Tools | 9b1a026e3346f4c26291018587409e86973925c6 | [
"MIT"
] | null | null | null | SimpleEnc/components/Decryptor.py | momoji123/Tools | 9b1a026e3346f4c26291018587409e86973925c6 | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import Frame, Button, Label, Entry, StringVar
from cryptography.fernet import Fernet
from components import KeyGenerator, ResultWindow
import traceback
class Decryptor:
root = None
master = None
mainContainer = None
console = None
fileManager = None
passInput = None
password = ""
result = ""
def __init__(self, root, master, fileManager, console):
self.root = root
self.fileManager = fileManager
self.master = master
self.mainContainer = Frame(self.master)
self.console = console
self.show()
self.addEmptySpace()
self.showPasswordInput()
self.addEmptySpace()
self.showStartBtn()
def show(self, mode=True):
if mode:
self.mainContainer.pack(side=tk.TOP, fill=tk.BOTH)
else:
self.mainContainer.pack_forget()
def showPasswordInput(self):
container = Frame(self.mainContainer, bg="black")
container.pack(side=tk.TOP, fill=tk.BOTH)
self.showPasswordEntry(container)
def showPasswordEntry(self, master):
subContPass = Frame(master)
subContPass.pack(side=tk.TOP, fill=tk.BOTH)
labelPass = Label(subContPass, anchor="w", text="Password: ", font="Verdana 12 bold", width=15)
labelPass.pack(side=tk.LEFT)
svPass = StringVar()
svPass.trace("w", lambda name, index, mode, sv=svPass: self.setPassword(sv.get()))
self.passInput = Entry(subContPass, show="*", width=50, textvariable=svPass)
self.passInput.pack(side=tk.LEFT)
def setPassword(self, password):
self.password = password
def showStartBtn(self):
button = Button(self.mainContainer, text="Start Decrpyt", command=self.startDecrypt, height=5, font="Verdana 18 bold")
button.pack(side=tk.BOTTOM, fill=tk.BOTH)
def startDecrypt(self):
self.console.insertProcess("Start decrypting file")
try:
reader = self.fileManager.getFileReader(mode="rb")
textBin = b""
for line in reader:
textBin += line
encodedText = textBin
encryptor = Fernet(KeyGenerator.generateKey(self.password))
encryptedText = encryptor.decrypt(encodedText)
self.result = encryptedText.decode()
self.showResult()
self.console.insertSuccess("File was successfully decrypted!")
except Exception as e:
traceback.print_exc()
if str(e) == "":
self.console.insertFailed("Failed to encrypt file! please make sure opened file is encrypted file and password is right")
else:
self.console.insertFailed(str(e))
finally:
self.fileManager.closeFileReader()
self.fileManager.closeFileWriter()
def showResult(self):
ResultWindow.Window(self.root, self.result, self.fileManager, self.password)
def addEmptySpace(self):
Frame(self.mainContainer, height=50).pack(side=tk.TOP, fill=tk.X)
| 35.872093 | 137 | 0.635981 |
4a24b806fa4ff97d94b7e6972f79d113901f10d4 | 133 | py | Python | web/zenmai.config.sample.py | mmktomato/zenmai-bts | e8915aed1174f9bc62f945d7be946d00fb43d4b8 | [
"MIT"
] | null | null | null | web/zenmai.config.sample.py | mmktomato/zenmai-bts | e8915aed1174f9bc62f945d7be946d00fb43d4b8 | [
"MIT"
] | null | null | null | web/zenmai.config.sample.py | mmktomato/zenmai-bts | e8915aed1174f9bc62f945d7be946d00fb43d4b8 | [
"MIT"
] | null | null | null | SQLALCHEMY_DATABASE_URI = 'sqlite:///../develop.db'
SECRET_KEY = 'your_own_secret_key'
MAX_CONTENT_LENGTH = 16 * 1024 * 1024 # 16MB
| 33.25 | 51 | 0.744361 |
4a24b912ca773c65fb3b57da0ad34a42f3fabf84 | 8,586 | py | Python | packages/python/plotly/plotly/graph_objs/layout/annotation/hoverlabel/__init__.py | potpath/plotly.py | 46cd47f441d8bda9b14b4ba66a33f02731faf8f0 | [
"MIT"
] | 1 | 2020-04-06T20:57:36.000Z | 2020-04-06T20:57:36.000Z | packages/python/plotly/plotly/graph_objs/layout/annotation/hoverlabel/__init__.py | potpath/plotly.py | 46cd47f441d8bda9b14b4ba66a33f02731faf8f0 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/layout/annotation/hoverlabel/__init__.py | potpath/plotly.py | 46cd47f441d8bda9b14b4ba66a33f02731faf8f0 | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.annotation.hoverlabel"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets the hover label text font. By default uses the global
hover font and size, with color from `hoverlabel.bordercolor`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.annotat
ion.hoverlabel.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.annotation.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.annotation.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.annotation.hoverlabel import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["size"] = v_font.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Font"]
| 37.168831 | 84 | 0.569648 |
4a24b94aca9432d37d8f15358acbaa71238d795c | 968 | py | Python | openverse_api/catalog/api/migrations/0013_contentprovider.py | ritesh-pandey/openverse-api | 7456e9ec4dd45800d5527039e466aa50991b3812 | [
"MIT"
] | 122 | 2018-09-12T13:49:37.000Z | 2021-12-05T07:04:59.000Z | cccatalog-api/cccatalog/api/migrations/0013_contentprovider.py | senyor/cccatalog-api | a18f75fccdd7345beff820dff4ee69604cd53748 | [
"MIT"
] | 500 | 2018-04-30T15:26:43.000Z | 2021-06-07T16:28:44.000Z | cccatalog-api/cccatalog/api/migrations/0013_contentprovider.py | senyor/cccatalog-api | a18f75fccdd7345beff820dff4ee69604cd53748 | [
"MIT"
] | 144 | 2018-08-11T17:11:50.000Z | 2022-01-12T20:39:09.000Z | # Generated by Django 2.0.8 on 2019-01-22 18:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0012_auto_20190102_2012'),
]
operations = [
migrations.CreateModel(
name='ContentProvider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('provider_identifier', models.CharField(max_length=50)),
('provider_name', models.CharField(max_length=250)),
('domain_name', models.CharField(max_length=500)),
('filter_content', models.BooleanField(default=False)),
],
options={
'db_table': 'content_provider',
},
),
]
| 33.37931 | 114 | 0.576446 |
4a24b96692673c8720f4165b0bbcc0a08ad744c2 | 268 | py | Python | sdk/__init__.py | Elishanto/HarryBotter | e1977dbade44840288145f08aef60746ac66982b | [
"MIT"
] | 3 | 2016-06-12T19:37:05.000Z | 2016-06-12T20:23:33.000Z | sdk/__init__.py | Elishanto/HarryBotter | e1977dbade44840288145f08aef60746ac66982b | [
"MIT"
] | null | null | null | sdk/__init__.py | Elishanto/HarryBotter | e1977dbade44840288145f08aef60746ac66982b | [
"MIT"
] | null | null | null | from .button import ButtonPayload, ButtonElement
from .generic import GenericPayload, GenericElement
from .attachment import Attachment
from .message import Message
from .recipient import Recipient
from .image import ImagePayload
from .location import LocationPayload
| 33.5 | 51 | 0.854478 |
4a24b9da02165bb3efc3065d8ce46808f75867d0 | 1,517 | py | Python | scripts/helper/a7_parallel_evaluation.py | dslaborg/sumo | 1e9bfedaff201d4bd37b4889b6091cc4b9c8ad01 | [
"MIT"
] | 3 | 2022-02-03T22:54:14.000Z | 2022-03-31T09:59:02.000Z | scripts/helper/a7_parallel_evaluation.py | dslaborg/sumo | 1e9bfedaff201d4bd37b4889b6091cc4b9c8ad01 | [
"MIT"
] | null | null | null | scripts/helper/a7_parallel_evaluation.py | dslaborg/sumo | 1e9bfedaff201d4bd37b4889b6091cc4b9c8ad01 | [
"MIT"
] | null | null | null | """
As the parallel execution using the ProcessPoolExecutor only seems to work if the parallelized function is imported,
the split_evaluation function is extracted into this helper file.
"""
import pickle
import numpy as np
from scripts.a7.detect_spindles import detect_spindles
from sumo.data import spindle_vect_to_indices
from sumo.evaluation import get_true_positives, metric_scores
sampling_rate = 100
win_length_sec = 0.3
win_step_sec = 0.1
thresholds = np.array([1.25, 1.6, 1.3, 0.69])
n_overlaps = 21
overlap_thresholds = np.linspace(0, 1, n_overlaps)
def split_evaluation(input_path):
with open(input_path, 'rb') as input_file:
subjects_test = pickle.load(input_file)['test']
subjects_test = [subject for cohort in subjects_test for subject in cohort]
n_spindles, n_spindles_gs = 0, 0
n_true_positives = np.zeros_like(overlap_thresholds, dtype=int)
for subject in subjects_test:
data_blocks = subject.data
spindle_blocks = subject.spindles
for data_vect, spindle_vect in zip(data_blocks, spindle_blocks):
spindles_gs = spindle_vect_to_indices(spindle_vect)
spindles = detect_spindles(data_vect, thresholds, win_length_sec, win_step_sec, sampling_rate)[1]
n_spindles += spindles.shape[0]
n_spindles_gs += spindles_gs.shape[0]
n_true_positives += get_true_positives(spindles, spindles_gs, overlap_thresholds)
return metric_scores(n_spindles, n_spindles_gs, n_true_positives)[2].mean()
| 37 | 116 | 0.749506 |
4a24baf39bd618835c2985026e77d7575c1534cf | 4,673 | py | Python | verify/academic3d-done/superp_init.py | zhaohj2017/FAoC-tool | 9931a87a4831d45f4109af2cd1f990d4b30fc2dd | [
"BSD-3-Clause"
] | null | null | null | verify/academic3d-done/superp_init.py | zhaohj2017/FAoC-tool | 9931a87a4831d45f4109af2cd1f990d4b30fc2dd | [
"BSD-3-Clause"
] | null | null | null | verify/academic3d-done/superp_init.py | zhaohj2017/FAoC-tool | 9931a87a4831d45f4109af2cd1f990d4b30fc2dd | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn as nn
import acti
import numpy as np
############################################
# set default data type to double; for GPU
# training use float
############################################
torch.set_default_dtype(torch.float64)
torch.set_default_tensor_type(torch.DoubleTensor)
# torch.set_default_dtype(torch.float32)
# torch.set_default_tensor_type(torch.FloatTensor)
VERBOSE = 1 # set to 1 to display epoch and batch losses in the training process
VISUAL = 1 # plot figure or not
FINE_TUNE = 0 # set to 1 for fine-tuning a pre-trained model
FIX_CTRL = 0
FIX_BARR = 0
############################################
# set the system dimension
############################################
DIM_S = 3 # dimension of system
DIM_C = 1 # dimension of controller input
############################################
# set the network architecture
############################################
N_H_B = 1 # the number of hidden layers for the barrier
D_H_B = 10 # the number of neurons of each hidden layer for the barrier
N_H_C = 1 # the number of hidden layers for the controller
D_H_C = 5 # the number of neurons of each hidden layer for the controller
############################################
# for activation function definition
############################################
BENT_DEG = 0.0001
BARR_ACT = acti.my_act(BENT_DEG)
CTRL_ACT = nn.ReLU()
BARR_OUT_BOUND = 1e16 # set the output bound of the barrier NN
CTRL_OUT_BOUND = 1e16 # set the output bound of the controller NN: for bounded controller
############################################
# set loss function definition
############################################
TOL_INIT = 0.0
TOL_SAFE = 0.0
TOL_LIE = 0.0
TOL_NORM_LIE = 0.0
TOL_BOUNDARY = 0.02 # initial boundary 0.01
WEIGHT_LIE = 1.0
WEIGHT_NORM_LIE = 1.0
HEIGHT_ASYMP = 0.1 # set the norm lower bound outside a neighborhood of the asymptotic stability point with radius
RADIUS_ASYMP = 0.1 # set the radius of the neighborhood around the asymptotic stability point
ZERO_ASYMP = 0.01 # set the norm upper bound at the asymptotic stability point
WEIGHT_ASYMP_DOMAIN = 1
WEIGHT_ASYMP_POINT = 1
DECAY_LIE = 0.1 # decay of lie weight 0.1 works, 1 does not work
DECAY_INIT = 1
DECAY_UNSAFE = 1
DECAY_ASYMP = 0 # set the weight of the asymptotic stability loss
############################################
# number of training epochs
############################################
EPOCHS = 200
############################################
# my own scheduling policy:
# rate = alpha / (1 + beta * epoch^gamma)
############################################
ALPHA = 0.01
BETA = 0.2
GAMMA = 5
############################################
# training termination flags
############################################
LOSS_OPT_FLAG = 1e-16
TOL_MAX_GRAD = 5
GRAD_CTRL_FACTOR = 1.4
############################################
# for training set generation
############################################
TOL_DATA_GEN = 1e-16
DATA_EXP_I = np.array([5, 5, 5]) # for sampling from initial; length = prob.DIM
DATA_LEN_I = np.power(2, DATA_EXP_I) # the number of samples for each dimension of domain
BLOCK_EXP_I = np.array([2, 2, 2]) # 0 <= BATCH_EXP <= DATA_EXP
BLOCK_LEN_I = np.power(2, BLOCK_EXP_I) # number of batches for each dimension
DATA_EXP_U = np.array([7, 7, 7]) # for sampling from initial; length = prob.DIM
DATA_LEN_U = np.power(2, DATA_EXP_U) # the number of samples for each dimension of domain
BLOCK_EXP_U = np.array([4, 4, 4]) # 0 <= BATCH_EXP <= DATA_EXP
BLOCK_LEN_U = np.power(2, BLOCK_EXP_U) # number of batches for each dimension
DATA_EXP_D = np.array([7, 7, 7]) # for sampling from initial; length = prob.DIM
DATA_LEN_D = np.power(2, DATA_EXP_D) # the number of samples for each dimension of domain
BLOCK_EXP_D = np.array([4, 4, 4]) # 0 <= BATCH_EXP <= DATA_EXP
BLOCK_LEN_D = np.power(2, BLOCK_EXP_D) # number of batches for each dimension
############################################
# for plotting
############################################
PLOT_EXP_B = np.array([6, 6, 6]) # sampling from domain for plotting the boundary of barrier using contour plot
PLOT_LEN_B = np.power(2, PLOT_EXP_B) # the number of samples for each dimension of domain, usually larger than superp.DATA_LEN_D
PLOT_EXP_V = np.array([3, 3, 3]) # sampling from domain for plotting the vector field
PLOT_LEN_V = np.power(2, PLOT_EXP_V) # the number of samples for each dimension of domain, usually equal to PLOT_LEN_P
PLOT_EXP_P = np.array([3, 3, 3]) # sampling from domain for plotting the scattering sampling points, could be equal to PLOT_EXP_V
PLOT_LEN_P = np.power(2, PLOT_EXP_P) # the number of samples for each dimension of domain
| 36.507813 | 129 | 0.607961 |
4a24bb89a57d1b0e126a3e9a23379314f28952c8 | 16,545 | py | Python | cogs/fun.py | hyarsan/mewtwo-bot | 26bd66d524c004e26e228e013e51092e1c0b10d3 | [
"MIT"
] | null | null | null | cogs/fun.py | hyarsan/mewtwo-bot | 26bd66d524c004e26e228e013e51092e1c0b10d3 | [
"MIT"
] | null | null | null | cogs/fun.py | hyarsan/mewtwo-bot | 26bd66d524c004e26e228e013e51092e1c0b10d3 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import asyncio
import random
import aiohttp
import dateutil.parser
import dataset
import json
from urllib.parse import quote_plus
botver = "Mewtwo v2.0" #--Bot's version, obviously--#
melo =[ #--List of images for meloetta command--#
'https://sks316.s-ul.eu/gKaVnpMW',
'https://sks316.s-ul.eu/XrcEzi0D',
'https://sks316.s-ul.eu/QhOlFzPo',
'https://sks316.s-ul.eu/dTZahOws',
'https://sks316.s-ul.eu/gxOaIYS4',
'https://sks316.s-ul.eu/Nie0Y5r5',
'https://sks316.s-ul.eu/axWfYbxq',
'https://sks316.s-ul.eu/uQBUeHgU',
'https://sks316.s-ul.eu/kxy3fBZR',
'https://sks316.s-ul.eu/rR0KGQA6',
'https://sks316.s-ul.eu/axV5qlIv',
'https://sks316.s-ul.eu/RHcrxLUq',
'https://sks316.s-ul.eu/OPJxBIbi',
'https://sks316.s-ul.eu/BXfeZjyA',
'https://sks316.s-ul.eu/lvOv7s3l',
'https://sks316.s-ul.eu/4gHuLUIt',
'https://sks316.s-ul.eu/gXimbOvb',
'https://sks316.s-ul.eu/DXemfAIc',
'https://sks316.s-ul.eu/wRa5aW45',
'https://sks316.s-ul.eu/vFeRbpN0',
'https://sks316.s-ul.eu/kUj7aMYn',
'https://sks316.s-ul.eu/mhSt7XIt',
'https://sks316.s-ul.eu/oG1C1Fdj',
'https://sks316.s-ul.eu/l3rSSHA3',
'https://sks316.s-ul.eu/GR0djZpM',
'https://sks316.s-ul.eu/d3DsRTkt',
'https://sks316.s-ul.eu/aFAdkPwl',
'https://sks316.s-ul.eu/2Lfgxr8u',
'https://sks316.s-ul.eu/menN6SzZ',
]
sylv =[ #--List of images for sylveon command--#
'https://sks316.s-ul.eu/lI9yl512',
'https://sks316.s-ul.eu/Cd3WEZbC',
'https://sks316.s-ul.eu/3ad6iGd7',
'https://sks316.s-ul.eu/gfAJkE9h',
'https://sks316.s-ul.eu/koqtiQkG',
'https://sks316.s-ul.eu/IEvNaJKG',
'https://sks316.s-ul.eu/aCRWOb6o',
'https://sks316.s-ul.eu/HA5kRZ82',
'https://sks316.s-ul.eu/TtDIYyj3',
'https://sks316.s-ul.eu/cI5m3G3d',
'https://sks316.s-ul.eu/QXNRl1Tc',
'https://sks316.s-ul.eu/RqyWtcwB',
'https://sks316.s-ul.eu/thxdo9LZ',
'https://sks316.s-ul.eu/qtE5EnkO',
'https://sks316.s-ul.eu/chQPM1Up',
'https://sks316.s-ul.eu/Rfv8y8Mk',
'https://sks316.s-ul.eu/y0cDN1Ke',
'https://sks316.s-ul.eu/unwK2yuH',
'https://sks316.s-ul.eu/s944FXa5',
'https://sks316.s-ul.eu/P2HPReUq',
'https://sks316.s-ul.eu/MdflREtZ',
'https://sks316.s-ul.eu/VAxU1Ec1',
'https://sks316.s-ul.eu/ZBiFfWKI',
'https://sks316.s-ul.eu/d6znfTqy',
'https://sks316.s-ul.eu/VfyASOnw',
'https://sks316.s-ul.eu/gwITmAHt',
'https://sks316.s-ul.eu/mYo1KKW3',
'https://sks316.s-ul.eu/MPbW5CLJ',
]
f_meme =[ #--List of images for F command--#
'https://sks316.s-ul.eu/4UcpmYzH',
'https://sks316.s-ul.eu/sRhWN9Jh',
'https://sks316.s-ul.eu/jIz8Jr9f',
'https://sks316.s-ul.eu/TP1QOm9m',
'https://sks316.s-ul.eu/oX6ZAfTP',
'https://sks316.s-ul.eu/2ALb0Hdr',
'https://sks316.s-ul.eu/0zUx9W6J',
'https://sks316.s-ul.eu/zv1apj1v.gif',
'https://sks316.s-ul.eu/w92uwhgy',
'https://sks316.s-ul.eu/uegYRi8z',
'https://sks316.s-ul.eu/eLnNc2yC',
'https://sks316.s-ul.eu/FhtyhBGl',
'https://sks316.s-ul.eu/BVDgB6Yh',
'https://sks316.s-ul.eu/DEQOBojh',
'https://sks316.s-ul.eu/Hgl2703u.gif',
'https://sks316.s-ul.eu/6icO2tDG',
'https://sks316.s-ul.eu/hlno0gnA',
'https://sks316.s-ul.eu/umOkjS6D',
]
class Fun(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def greet(self, ctx):
await ctx.send(":smiley: :wave: Hey there!")
@commands.command(aliases=["respects"])
@commands.cooldown(3, 5, commands.BucketType.user)
async def f(self, ctx):
embed = discord.Embed(title='😔 Today, we pay our respects to those that have left us.', color=0x8253c3)
embed.set_image(url=random.choice(f_meme))
embed.set_footer(text=botver + " by sks316#2523", icon_url='https://sks316.s-ul.eu/bsHvTCLJ')
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(3, 5, commands.BucketType.user)
async def meloetta(self, ctx):
embed = discord.Embed(title="<:meloetta_aria:598168128345604127> Here you go, a cute Meloetta! :smile:",color=0x9fdf42)
embed.add_field(name='List of image sources:', value="https://pastebin.com/cRd5vguH")
embed.set_image(url=random.choice(melo))
embed.set_footer(text=botver + " by sks316#2523", icon_url='https://sks316.s-ul.eu/bsHvTCLJ')
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(3, 5, commands.BucketType.user)
async def sylveon(self, ctx):
embed = discord.Embed(title="<:sylveon:597725070764277786> Here, have some cute Sylveon art :3",color=0xffccfe)
embed.add_field(name='List of image sources:', value="https://pastebin.com/RwGHXDmS")
embed.set_image(url=random.choice(sylv))
embed.set_footer(text=botver + " by sks316#2523", icon_url='https://sks316.s-ul.eu/bsHvTCLJ')
await ctx.send(embed=embed)
@commands.command(aliases=["pokemon", "pkmn"])
@commands.cooldown(1, 5, commands.BucketType.user)
async def pokedex(self, ctx, *, arg):
#--Some Pokemon with several forms are named differently on the API, so if one of those Pokemon are specified, we replace the query with the correct name--#
pkmn = {
'meloetta': 'Meloetta - Aria Forme',
'keldeo': 'Keldeo - Ordinary Form',
'burmy': 'Burmy - Plant Cloak',
'wormadam': 'Wormadam - Plant Cloak',
'cherrim': 'Cherrim - Overcast Form',
'giratina': 'Giratina - Altered Forme',
'shaymin': 'Shaymin - Land Forme',
'basculin': 'Basculin - Red-Striped Form',
'deerling': 'Deerling - Spring Form',
'tornadus': 'Tornadus - Incarnate Forme',
'thundurus': 'Thundurus - Incarnate Forme',
'landorus': 'Landorus - Incarnate Forme',
'flabebe': 'Flabébé',
'zygarde': 'Zygarde - Complete Forme',
'hoopa': 'Hoopa Confined',
'oricorio': 'Oricorio - Baile Style',
'lycanroc': 'Lycanroc - Midday Form',
'wishiwashi': 'Wishiwashi - Solo Form',
'minior': 'Minior - Meteor Form',
'mimikyu': 'Mimikyu - Disguised Form',
}.get(arg.lower(), arg)
#--First we connect to the Pokedex API and download the Pokedex entry--#
async with aiohttp.ClientSession() as session:
async with session.get('https://pokeapi.glitch.me/v1/pokemon/' + pkmn) as dex_entry:
data = await dex_entry.json()
#--Now we attempt to extract information--#
try:
pkmn_name = data[0]['name']
pkmn_no = data[0]['number']
pkmn_desc = data[0]['description']
pkmn_img = data[0]['sprite']
pkmn_height = data[0]['height']
pkmn_weight = data[0]['weight']
pkmn_species = data[0]['species']
pkmn_type1 = data[0]['types'][0]
pkmn_gen = str(data[0]['gen'])
pkmn_ability1 = data[0]['abilities']['normal'][0]
#--Detect if Pokemon has a second ability--#
try:
pkmn_ability2 = data[0]['abilities']['normal'][1]
except IndexError:
pkmn_ability2 = None
#--Detect if Pokemon has a hidden ability--#
try:
pkmn_hiddenability = data[0]['abilities']['hidden'][0]
except IndexError:
pkmn_hiddenability = None
#--Detect if Pokemon has a second type--#
try:
pkmn_type2 = data[0]['types'][1]
except IndexError:
pkmn_type2 = None
#--Finally, we format it into a nice little embed--#
embed = discord.Embed(title="<:pokeball:609749611321753669> Pokédex information for " + pkmn_name + " (#" + pkmn_no + ")", description=pkmn_desc, color=0xd82626)
embed.add_field(name='Height', value=pkmn_height)
embed.add_field(name='Weight', value=pkmn_weight)
embed.add_field(name='Species', value=pkmn_species)
#--Detect if type2 is defined--#
if pkmn_type2 == None:
embed.add_field(name='Type', value=pkmn_type1)
else:
embed.add_field(name='Types', value=pkmn_type1 + ", " + pkmn_type2)
#--Detect if ability2 and hiddenability defined--#
if pkmn_ability2 == None:
if pkmn_hiddenability == None:
embed.add_field(name='Ability', value=pkmn_ability1)
else:
embed.add_field(name='Abilities', value=pkmn_ability1 + ";\n **Hidden:** " + pkmn_hiddenability)
else:
if pkmn_hiddenability == None:
embed.add_field(name='Abilities', value=pkmn_ability1 + ", " + pkmn_ability2)
else:
embed.add_field(name='Abilities', value=pkmn_ability1 + ", " + pkmn_ability2 + ";\n **Hidden:** " + pkmn_hiddenability)
embed.add_field(name='Generation Introduced', value="Gen " + pkmn_gen)
embed.set_thumbnail(url=pkmn_img)
embed.set_footer(text=botver + " by sks316#2523", icon_url='https://sks316.s-ul.eu/bsHvTCLJ')
await ctx.send(embed=embed)
except KeyError:
return await ctx.send(":x: I couldn't find any Pokémon with that name. Double-check your spelling and try again. \nIf you're certain that this Pokémon exists, file a bug report with **>bug**.")
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def hug(self, ctx, *, user: discord.Member = None):
if user == None:
return await ctx.send(":x: You need someone to hug! You can hug me if you want...")
if user == ctx.author:
return await ctx.send(":x: You can't hug yourself! You can hug me if you want...")
#--Get image from NekosLife API--#
async with aiohttp.ClientSession() as session:
async with session.get('https://nekos.life/api/v2/img/hug') as hug:
data = await hug.json()
result = data.get('url')
embed = discord.Embed(title="🤗 " + ctx.author.name + " hugs " + user.name + "!", color=0x8253c3)
embed.set_image(url=result)
embed.set_footer(text=botver + " by sks316#2523", icon_url='https://sks316.s-ul.eu/bsHvTCLJ')
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def cuddle(self, ctx, *, user: discord.Member = None):
if user == None:
return await ctx.send(":x: You need someone to cuddle! You can cuddle me if you want...")
if user == ctx.author:
return await ctx.send(":x: You can't cuddle yourself! You can cuddle me if you want...")
#--Get image from NekosLife API--#
async with aiohttp.ClientSession() as session:
async with session.get('https://nekos.life/api/v2/img/cuddle') as cuddle:
data = await cuddle.json()
result = data.get('url')
embed = discord.Embed(title="🤗 " + ctx.author.name + " cuddles " + user.name + "!", color=0x8253c3)
embed.set_image(url=result)
embed.set_footer(text=botver + " by sks316#2523", icon_url='https://sks316.s-ul.eu/bsHvTCLJ')
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def kiss(self, ctx, *, user: discord.Member = None):
if user == None:
return await ctx.send(":x: You need someone to kiss! You can kiss me if you want...")
if user == ctx.author:
return await ctx.send(":x: You can't kiss yourself! You can kiss me if you want...")
#--Get image from NekosLife API--#
async with aiohttp.ClientSession() as session:
async with session.get('https://nekos.life/api/v2/img/kiss') as kiss:
data = await kiss.json()
result = data.get('url')
embed = discord.Embed(title="❤ " + ctx.author.name + " kisses " + user.name + "!", color=0x8253c3)
embed.set_image(url=result)
embed.set_footer(text=botver + " by sks316#2523", icon_url='https://sks316.s-ul.eu/bsHvTCLJ')
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def snuggle(self, ctx, *, user: discord.Member = None):
if user == None:
return await ctx.send(":x: You need someone to cuddle! You can cuddle me if you want...")
if user == ctx.author:
return await ctx.send(":x: You can't cuddle yourself! You can cuddle me if you want...")
#--Get image from NekosLife API--#
async with aiohttp.ClientSession() as session:
async with session.get('https://nekos.life/api/v2/img/cuddle') as snuggle:
data = await snuggle.json()
result = data.get('url')
embed = discord.Embed(title="🤗 " + ctx.author.name + " snuggles " + user.name + "!", color=0x8253c3)
embed.set_image(url=result)
embed.set_footer(text=botver + " by sks316#2523", icon_url='https://sks316.s-ul.eu/bsHvTCLJ')
await ctx.send(embed=embed)
@commands.command(aliases=["nsl", "ns", "switch"])
@commands.cooldown(1, 5, commands.BucketType.user)
async def nslookup(self, ctx, *, game):
#return await ctx.send(":x: Sorry, nslookup is not functioning right now. The esho.pw API, which is what I use for getting information on Nintendo Switch games, is down for (presumably) upgrades and maintenance. This is not something I can fix, and I have no idea when it'll be back. Please have patience! Thank you!")
loading = await ctx.send('<a:loading:598027019447435285> Looking for a game on the eShop...')
#--First we connect to the eSho.pw API--#
async with aiohttp.ClientSession() as cs:
async with cs.get("https://api.esho.pw/games") as r:
data = await r.json(content_type="text/plain")
#--Now we find information for the game and attempt to extract it--#
for g in data:
if g["title_lower"] == game.lower():
gm = g
break
else:
gm = None
if gm is None:
await loading.edit(content=":x: I couldn't find that game. Double-check your spelling and try again.")
return
#--Now we format this into a nice embed to send back to Discord--#
embed = discord.Embed(title="ℹ Nintendo Switch game information", color=0xff0000)
embed.add_field(name="Title", value=gm["Title"], inline=True)
#embed.add_field(name="Price", value="${}.{}".format(str(gm["Prices"]["US"])[0:2], str(gm["Prices"]["US"])[-2:]), inline=True)
dt = dateutil.parser.parse(gm["Published"])
embed.add_field(name="Released", value="{}/{}/{}".format(dt.month, dt.day, dt.year), inline=True)
embed.add_field(name="Description", value=gm["Excerpt"], inline=True)
embed.add_field(name="Categories", value=", ".join(gm["Categories"]).title(), inline=True)
if "metascore" in gm["Metacritic"]:
embed.add_field(name="Metacritic Score", value=gm["Metacritic"]["metascore"], inline=True)
else:
embed.add_field(name="Metacritic Score", value="None found!", inline=True)
embed.set_image(url="https://" + gm["Image"][2:])
embed.set_footer(text=botver + " by sks316#2523", icon_url='https://sks316.s-ul.eu/bsHvTCLJ')
await loading.edit(content='', embed=embed)
def setup(bot):
bot.add_cog(Fun(bot)) | 51.542056 | 326 | 0.57806 |
4a24bc548153f77dfaa8e558053e8fb9a9fbf1cc | 1,186 | py | Python | yui/utils/__init__.py | item4/yui | 8628d0d54b94ada3cbe7d1b0f624063258bad10a | [
"MIT"
] | 36 | 2017-06-12T01:09:46.000Z | 2021-01-31T17:57:41.000Z | yui/utils/__init__.py | item4/yui | 8628d0d54b94ada3cbe7d1b0f624063258bad10a | [
"MIT"
] | 145 | 2017-06-21T13:31:29.000Z | 2021-06-20T01:01:30.000Z | yui/utils/__init__.py | item4/yui | 8628d0d54b94ada3cbe7d1b0f624063258bad10a | [
"MIT"
] | 21 | 2017-07-24T15:53:19.000Z | 2021-12-23T04:18:31.000Z | from .cast import AnyCaster
from .cast import BaseCaster
from .cast import BoolCaster
from .cast import CastError
from .cast import CasterBox
from .cast import DictCaster
from .cast import KNOWN_TYPES
from .cast import KnownTypesCaster
from .cast import ListCaster
from .cast import NewTypeCaster
from .cast import NoHandleCaster
from .cast import NoneType
from .cast import NoneTypeCaster
from .cast import SetCaster
from .cast import TupleCaster
from .cast import TypeVarCaster
from .cast import UnionCaster
from .cast import UnionType
from .cast import cast
from .datetime import datetime
from .datetime import now
from .format import bold
from .format import code
from .format import italics
from .format import preformatted
from .format import quote
from .format import strike
from .fuzz import KOREAN_ALPHABETS_FIRST_MAP
from .fuzz import KOREAN_ALPHABETS_MIDDLE_MAP
from .fuzz import KOREAN_END
from .fuzz import KOREAN_START
from .fuzz import match
from .fuzz import normalize_korean_nfc_to_nfd
from .fuzz import partial_ratio
from .fuzz import ratio
from .fuzz import token_sort_ratio
from .handler import get_handler
from .html import strip_tags
from .url import b64_redirect
| 29.65 | 45 | 0.835582 |
4a24bc7f9137a5247678617f2d58cdabe18309ea | 3,739 | py | Python | src/nodeconductor_gitlab/tests/test_resources.py | livenson/nodeconductor-gitlab | f668a1c1738c9ec23bbf8e6b6b27f7bdb491b873 | [
"MIT"
] | null | null | null | src/nodeconductor_gitlab/tests/test_resources.py | livenson/nodeconductor-gitlab | f668a1c1738c9ec23bbf8e6b6b27f7bdb491b873 | [
"MIT"
] | null | null | null | src/nodeconductor_gitlab/tests/test_resources.py | livenson/nodeconductor-gitlab | f668a1c1738c9ec23bbf8e6b6b27f7bdb491b873 | [
"MIT"
] | null | null | null | import mock
from rest_framework import status, test
from nodeconductor.structure.tests import factories as structure_factories
from . import factories
@mock.patch('nodeconductor.structure.models.ServiceProjectLink.get_backend')
class ProjectDeletionTest(test.APITransactionTestCase):
def setUp(self):
self.admin = structure_factories.UserFactory(is_staff=True)
def test_when_synced_project_deleted_view_calls_backend(self, mock_backend):
project = factories.GitLabProjectFactory(backend_id='valid_backend_id')
self.client.force_authenticate(user=self.admin)
url = factories.GitLabProjectFactory.get_url(project)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
mock_backend().destroy.assert_called_with(project, force=False)
def test_when_project_is_not_synced_backend_is_not_called(self, mock_backend):
project = factories.GitLabProjectFactory(backend_id='')
self.client.force_authenticate(user=self.admin)
url = factories.GitLabProjectFactory.get_url(project)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(mock_backend().destroy.called)
@mock.patch('nodeconductor.structure.models.ServiceProjectLink.get_backend')
class GroupCreationTest(test.APITransactionTestCase):
def setUp(self):
self.staff = structure_factories.UserFactory(is_staff=True)
self.spl = factories.GitLabServiceProjectLinkFactory()
self.valid_data = {
'path': 'test-group',
'name': 'Test Group',
'service_project_link': factories.GitLabServiceProjectLinkFactory.get_url(self.spl),
}
def test_group_cannot_be_created_if_group_with_such_path_already_exist(self, mock_backend):
self.client.force_authenticate(user=self.staff)
url = factories.GitLabGroupFactory.get_list_url()
factories.GitLabGroupFactory(path=self.valid_data['path'], service_project_link=self.spl)
response = self.client.post(url, self.valid_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@mock.patch('nodeconductor.structure.models.ServiceProjectLink.get_backend')
class GroupDeletionTest(test.APITransactionTestCase):
def setUp(self):
self.admin = structure_factories.UserFactory(is_staff=True)
def test_when_group_deleted_view_calls_backend(self, mock_backend):
group = factories.GitLabGroupFactory(backend_id='valid_backend_id')
self.client.force_authenticate(user=self.admin)
url = factories.GitLabGroupFactory.get_url(group)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
mock_backend().destroy.assert_called_with(group, force=False)
def test_when_group_is_not_synced_backend_is_not_called(self, mock_backend):
group = factories.GitLabGroupFactory(backend_id='')
self.client.force_authenticate(user=self.admin)
url = factories.GitLabGroupFactory.get_url(group)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(mock_backend().destroy.called)
def test_if_group_has_project_deletion_is_not_allowed(self, mock_backend):
group = factories.GitLabGroupFactory()
factories.GitLabProjectFactory(group=group)
self.client.force_authenticate(user=self.admin)
url = factories.GitLabGroupFactory.get_url(group)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
| 41.087912 | 97 | 0.754212 |
4a24bdbfb47559077775c123622e23b5a7838f5b | 6,274 | py | Python | BrainAnnex/pages/BA_pages_request_handler.py | BrainAnnex/brain-annex | 07701ba0309c448e9030a19a10dca4d73c155afe | [
"MIT"
] | null | null | null | BrainAnnex/pages/BA_pages_request_handler.py | BrainAnnex/brain-annex | 07701ba0309c448e9030a19a10dca4d73c155afe | [
"MIT"
] | 3 | 2021-12-19T03:58:42.000Z | 2022-02-11T07:40:46.000Z | BrainAnnex/pages/BA_pages_request_handler.py | BrainAnnex/brain-annex | 07701ba0309c448e9030a19a10dca4d73c155afe | [
"MIT"
] | null | null | null | from BrainAnnex.modules.neo_schema.neo_schema import NeoSchema
"""
MIT License. Copyright (c) 2021-2022 Julian A. West
"""
class PagesRequestHandler:
"""
Used by the UI for Page Generation.
This class does NOT get instantiated.
"""
db = None # "NeoAccess" object. MUST be set before using this class!
@classmethod
def get_content_items_by_category(cls, category_id = 1) -> [{}]:
"""
Return the records for all nodes linked to the Category node identified by its item_id value
:param category_id:
:return: A list of dictionaries
EXAMPLE:
[{'schema_code': 'i', 'item_id': 1,'width': 450, 'basename': 'my_pic', 'suffix': 'PNG', pos: 0, 'class_name': 'Images'},
{'schema_code': 'h', 'item_id': 1, 'text': 'Overview', pos: 10, 'class_name': 'Headers'},
{'schema_code': 'n', 'item_id': 1', basename': 'overview', 'suffix': 'htm', pos: 20, 'class_name': 'Notes'}
]
"""
# Locate all the Content Items linked to the given Category, and also extract the name of the schema Class they belong to
cypher = """
MATCH (cl :CLASS)<-[:SCHEMA]-(n :BA)-[r :BA_in_category]->(category :BA {schema_code:"cat", item_id:$category_id})
RETURN n, r.pos AS pos, cl.name AS class_name
ORDER BY r.pos
"""
result = cls.db.query(cypher, {"category_id": category_id})
#print(result)
#content_item_list = [elem["n"] for elem in result]
content_item_list = []
for elem in result:
item_record = elem["n"] # A dictionary with the various fields
# TODO: eliminate possible conflict if the node happens to have
# attributes named "pos" or "class_name"!
item_record["pos"] = elem["pos"] # Inject into the record a positional value
item_record["class_name"] = elem["class_name"] # Inject into the record the name of its Class
content_item_list.append(item_record)
#print(content_item_list)
return content_item_list
@classmethod
def get_node_labels(cls) -> [str]:
"""
Look up and return a list of all the node labels in the database.
EXAMPLE: ["my_label_1", "my_label_2"]
:return: A list of strings
"""
label_list = cls.db.get_labels() # Fetch all the node labels in the database
return label_list
@classmethod
def all_schema_classes(cls) -> [str]:
"""
Return a list of all the existing Schema classes
:return:
"""
return NeoSchema.get_all_classes()
############################# CATEGORY-RELATED (TODO: being moved to categories.py) #############################
@classmethod
def get_subcategories(cls, category_id) -> [dict]:
"""
Return all the (immediate) subcategories of the given category,
as a list of dictionaries with keys 'id' and 'name' TODO: fix
EXAMPLE:
OLD -> [{'id': 2, 'name': 'Work'}, {'id': 3, 'name': 'Hobbies'}]
[{'item_id': 2, 'name': 'Work', remarks: 'outside employment'}, {'item_id': 3, 'name': 'Hobbies'}]
:param category_id:
:return: A list of dictionaries
"""
q = '''
MATCH (sub:BA {schema_code:"cat"})-[BA_subcategory_of]->(c:BA {schema_code:"cat", item_id:$category_id})
RETURN sub.item_id AS id, sub.name AS name
'''
result = cls.db.query(q, {"category_id": category_id})
'''
new = cls.db.follow_links(labels="BA", key_name="item_id", key_value=category_id,
rel_name="BA_subcategory_of", rel_dir="IN",
neighbor_labels="BA")
# OR: properties_condition = {"item_id": category_id, "schema_code": "cat"}
'''
return result
@classmethod
def get_parent_categories(cls, category_id) -> [dict]:
"""
Return all the (immediate) parent categories of the given category,
as a list of dictionaries with all the keys of the Category Class
TODO: fix inconsistency. This function uses item_id ; others use just id
EXAMPLE:
[{'item_id': 2, 'name': 'Work', remarks: 'outside employment'}, {'item_id': 3, 'name': 'Hobbies'}]
:param category_id:
:return: A list of dictionaries
"""
match = cls.db.find(labels="BA",
properties={"item_id": category_id, "schema_code": "cat"})
result = cls.db.follow_links(match, rel_name="BA_subcategory_of", rel_dir="OUT",
neighbor_labels="BA")
return result
@classmethod
def get_all_categories(cls, exclude_root=True) -> [dict]:
"""
TODO: phase out, in favor of Categories.get_all_categories (which uses 'item_id' instead of 'id')
Return all the existing Categories - possibly except the root -
as a list of dictionaries with keys 'id', 'name', 'remarks'
sorted by name
EXAMPLE:
[{'id': 3, 'name': 'Hobbies'}, {'id': 2, 'name': 'Work', 'remarks': 'paid jobs'}]
Note that missing "remarks" values are not in the dictionaries
:param exclude_root:
:return: A list of dictionaries
"""
clause = ""
if exclude_root:
clause = "WHERE cat.item_id <> 1"
q = f'''
MATCH (cat:BA {{schema_code:"cat"}})
{clause}
RETURN cat.item_id AS id, cat.name AS name, cat.remarks AS remarks
ORDER BY toLower(cat.name)
'''
# Notes: 1 is the ROOT
# Sorting must be done across consistent capitalization, or "GSK" will appear before "German"!
result = cls.db.query(q)
# Ditch all the missing "remarks" values
for cat in result:
if cat["remarks"] is None:
del cat["remarks"]
return result
| 35.050279 | 140 | 0.551323 |
4a24bfba346fd488a39ee091046075a7e8e343bc | 6,284 | py | Python | river/stats/quantile.py | f3d3r1c00/river | bbf8af07ee75c30f416d5d4dc7ce4c61efc70fab | [
"BSD-3-Clause"
] | 2 | 2021-04-13T09:19:42.000Z | 2021-12-22T13:43:15.000Z | river/stats/quantile.py | f3d3r1c00/river | bbf8af07ee75c30f416d5d4dc7ce4c61efc70fab | [
"BSD-3-Clause"
] | null | null | null | river/stats/quantile.py | f3d3r1c00/river | bbf8af07ee75c30f416d5d4dc7ce4c61efc70fab | [
"BSD-3-Clause"
] | null | null | null | import math
from river import utils
from . import base
class Quantile(base.Univariate):
"""Running quantile.
Uses the P² algorithm, which is also known as the "Piecewise-Parabolic quantile estimator".
The code is inspired by LiveStat's implementation [^2].
Parameters
----------
q
Determines which quantile to compute, must be comprised between 0 and 1.
Examples
--------
>>> from river import stats
>>> import numpy as np
>>> np.random.seed(42 * 1337)
>>> mu, sigma = 0, 1
>>> s = np.random.normal(mu, sigma, 500)
>>> median = stats.Quantile(0.5)
>>> for x in s:
... _ = median.update(x)
>>> print(f'The estimated value of the 50th (median) quantile is {median.get():.4f}')
The estimated value of the 50th (median) quantile is -0.0275
>>> print(f'The real value of the 50th (median) quantile is {np.median(s):.4f}')
The real value of the 50th (median) quantile is -0.0135
>>> percentile_17 = stats.Quantile(0.17)
>>> for x in s:
... _ = percentile_17.update(x)
>>> print(f'The estimated value of the 17th quantile is {percentile_17.get():.4f}')
The estimated value of the 17th quantile is -0.8652
>>> print(f'The real value of the 17th quantile is {np.percentile(s,17):.4f}')
The real value of the 17th quantile is -0.9072
References
----------
[^1]: [The P² Algorithm for Dynamic Univariateal Computing Calculation of Quantiles and Editor Histograms Without Storing Observations](https://www.cse.wustl.edu/~jain/papers/ftp/psqr.pdf)
[^2]: [LiveStats](https://github.com/cxxr/LiveStats)
[^3]: [P² quantile estimator: estimating the median without storing values](https://aakinshin.net/posts/p2-quantile-estimator/)
"""
def __init__(self, q=0.5):
if not 0 < q < 1:
raise ValueError("q is not comprised between 0 and 1")
self.q = q
self.desired_marker_position = [0, self.q / 2, self.q, (1 + self.q) / 2, 1]
self.marker_position = [1, 1 + 2 * self.q, 1 + 4 * self.q, 3 + 2 * self.q, 5]
self.position = list(range(1, 6))
self.heights = []
self.heights_sorted = False
def _find_k(self, x):
if x < self.heights[0]:
self.heights[0] = x
k = 1
else:
for i in range(1, 5):
if self.heights[i - 1] <= x and x < self.heights[i]:
k = i
break
else:
k = 4
if self.heights[-1] < x:
self.heights[-1] = x
return k
@classmethod
def _compute_P2(cls, qp1, q, qm1, d, np1, n, nm1):
d = float(d)
n = float(n)
np1 = float(np1)
nm1 = float(nm1)
outer = d / (np1 - nm1)
inner_left = (n - nm1 + d) * (qp1 - q) / (np1 - n)
inner_right = (np1 - n - d) * (q - qm1) / (n - nm1)
return q + outer * (inner_left + inner_right)
def _adjust(self):
for i in range(1, 4):
n = self.position[i]
q = self.heights[i]
d = self.marker_position[i] - n
if (d >= 1 and self.position[i + 1] - n > 1) or (
d <= -1 and self.position[i - 1] - n < -1
):
d = int(math.copysign(1, d))
qp1 = self.heights[i + 1]
qm1 = self.heights[i - 1]
np1 = self.position[i + 1]
nm1 = self.position[i - 1]
qn = self._compute_P2(qp1, q, qm1, d, np1, n, nm1)
if qm1 < qn and qn < qp1:
self.heights[i] = qn
else:
self.heights[i] = q + d * (self.heights[i + d] - q) / (
self.position[i + d] - n
)
self.position[i] = n + d
return self
def update(self, x):
# Initialisation
if len(self.heights) != 5:
self.heights.append(x)
else:
if not self.heights_sorted:
self.heights.sort()
self.heights_sorted = True
# Find cell k such that qk < Xj <= qk+i and adjust extreme values (q1 and q) if necessary
k = self._find_k(x)
# Inrivernt all positions greater than k
self.position = [j if i < k else j + 1 for i, j in enumerate(self.position)]
self.marker_position = [
x + y
for x, y in zip(self.marker_position, self.desired_marker_position)
]
# Adjust heights of markers 2-4 if necessary
self._adjust()
return self
def get(self):
if self.heights_sorted:
return self.heights[2]
if self.heights:
self.heights.sort()
length = len(self.heights)
return self.heights[int(min(max(length - 1, 0), length * self.q))]
return 0
class RollingQuantile(base.RollingUnivariate, utils.SortedWindow):
"""Running quantile over a window.
Parameters
----------
q
Determines which quantile to compute, must be comprised between 0 and 1.
window_size
Size of the window.
Examples
--------
>>> from river import stats
>>> rolling_quantile = stats.RollingQuantile(
... q=.5,
... window_size=100,
... )
>>> for i in range(0, 1001):
... rolling_quantile = rolling_quantile.update(i)
... if i % 100 == 0:
... print(rolling_quantile.get())
0
50
150
250
350
450
550
650
750
850
950
References
----------
[^1]: [Left sorted](https://stackoverflow.com/questions/8024571/insert-an-item-into-sorted-list-in-python)
"""
def __init__(self, q, window_size):
super().__init__(size=window_size)
self.q = q
self.idx = int(round(self.q * self.size + 0.5)) - 1
@property
def window_size(self):
return self.size
def update(self, x):
self.append(x)
return self
def get(self):
if len(self) < self.size:
idx = int(round(self.q * len(self) + 0.5)) - 1
return self[idx]
return self[self.idx]
| 27.682819 | 192 | 0.527849 |
4a24c01bee8122cb9e7b9195080eef0936909581 | 1,318 | py | Python | CursoEmVideo/curso em video/ex95.py | elisio-ricardo/ExerciciosPythonCursoEmVideo | 47a10b2118a76f4f95a762876ef9ab90e92f4fd3 | [
"MIT"
] | null | null | null | CursoEmVideo/curso em video/ex95.py | elisio-ricardo/ExerciciosPythonCursoEmVideo | 47a10b2118a76f4f95a762876ef9ab90e92f4fd3 | [
"MIT"
] | null | null | null | CursoEmVideo/curso em video/ex95.py | elisio-ricardo/ExerciciosPythonCursoEmVideo | 47a10b2118a76f4f95a762876ef9ab90e92f4fd3 | [
"MIT"
] | null | null | null | time = list()
jogador = dict()
partidas = list()
while True:
jogador.clear()
jogador['nome'] = str(input('Nome do jogador: '))
tot = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
partidas.clear()
for c in range(0, tot):
partidas.append(int(input(f' Quantos gols na partida {c+1}? ')))
jogador['gols'] = partidas[:]
jogador['total'] = sum(partidas)
time.append(jogador.copy())
while True:
resp = str(input('Quer continuar ? [S/N]')).upper()[0]
if resp in 'SN':
break
print('ERRO! Responda apenas S ou N. ')
if resp == 'N':
break
print('-=' * 30)
print('cod', end=' ')
for i in jogador.keys():
print(f'{i:<15}', end=' ')
print()
print('-=' * 40)
for k, v in enumerate(time):
print(f'{k:>3}', end=' ')
for d in v.values():
print(f'{str(d):<15}', end=' ')
print()
print('-' * 40)
while True:
busca = int(input('Mostrar os dados de qual jogador? (999 para parar)'))
if busca == 999:
break
if busca >=len(time):
print(f'ERRO! Não existe jogador com codigo {busca}!')
else:
print(f'Levantamento do jogador {time[busca]["nome"]}:')
for i, g in enumerate(time[busca]['gols']):
print(f' No jogo {i+1} fez {g} gols.')
print('-' * 40) | 30.651163 | 76 | 0.547041 |
4a24c126e7c4046280b77c5df9b05b481415ca98 | 1,133 | py | Python | python/src/nnabla/backward_function/patch_correlation.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 2,792 | 2017-06-26T13:05:44.000Z | 2022-03-28T07:55:26.000Z | python/src/nnabla/backward_function/patch_correlation.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 138 | 2017-06-27T07:04:44.000Z | 2022-02-28T01:37:15.000Z | python/src/nnabla/backward_function/patch_correlation.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 380 | 2017-06-26T13:23:52.000Z | 2022-03-25T16:51:30.000Z | # Copyright 2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def patch_correlation_backward(inputs, patch=(1, 1), shift=(0, 0), patch_step=(1, 1), shift_step=(1, 1), padding=(0, 0, 0, 0)):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
dy = inputs[0]
x0 = inputs[1]
raise NotImplementedError("patch_correlation_backward is not implemented.")
| 40.464286 | 127 | 0.72639 |
4a24c1adceec55613f4620b0dba8fac9ddf47ff1 | 26,310 | py | Python | airflow/dag_processing/processor.py | rliuamzn/airflow | 177dfbd12a42a5c229640c6c830f43f280ea5caa | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-07-30T16:54:20.000Z | 2021-08-03T13:51:59.000Z | airflow/dag_processing/processor.py | rliuamzn/airflow | 177dfbd12a42a5c229640c6c830f43f280ea5caa | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6 | 2020-12-22T17:43:49.000Z | 2021-04-27T13:41:10.000Z | airflow/dag_processing/processor.py | rliuamzn/airflow | 177dfbd12a42a5c229640c6c830f43f280ea5caa | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-11-01T16:22:58.000Z | 2020-11-01T16:22:58.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import logging
import multiprocessing
import os
import signal
import threading
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import func, or_
from sqlalchemy.orm.session import Session
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import provide_session
from airflow.utils.state import State
TI = models.TaskInstance
class DagFileProcessorProcess(LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to DagFileProcessor.process_file
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
Returns a tuple of 'number of dags found' and 'the count of import errors'
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
self.log.info("Running SLA Checks for %s", dag.dag_id)
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
max_tis: List[TI] = (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
)
.all()
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not task.sla:
continue
if not isinstance(task.sla, timedelta):
raise TypeError(
f"SLA is expected to be timedelta object, got "
f"{type(task.sla)} in {task.dag_id}:{task.task_id}"
)
dttm = dag.following_schedule(ti.execution_date)
while dttm < ts:
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < ts:
session.merge(
SlaMiss(task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)
)
dttm = dag.following_schedule(dttm)
session.commit()
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
if slas:
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join(sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas)
blocking_task_list = "\n".join(
ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
Airflow Webserver URL: {conf.get(section='webserver', key='base_url')}
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception:
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(errors.ImportError.filename == dagbag_file).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
self.log.debug("Processing Callback Request: %s", request)
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception:
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
if request.is_failure_callback:
ti.handle_failure_with_callback(error=request.msg, test_mode=ti.test_mode)
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to this method.
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
| 40.476923 | 110 | 0.621475 |
4a24c2269b3c662ab174fda9c44b4df129da92f8 | 3,811 | py | Python | libraries/RealtimeUserSimulator.py | abhi-gm/Multi-Armed-Bandits-for-Recommendations-and-A-B-testing | 1626cc152e978a8cad223bce49b97fe5b5e1506b | [
"MIT"
] | null | null | null | libraries/RealtimeUserSimulator.py | abhi-gm/Multi-Armed-Bandits-for-Recommendations-and-A-B-testing | 1626cc152e978a8cad223bce49b97fe5b5e1506b | [
"MIT"
] | null | null | null | libraries/RealtimeUserSimulator.py | abhi-gm/Multi-Armed-Bandits-for-Recommendations-and-A-B-testing | 1626cc152e978a8cad223bce49b97fe5b5e1506b | [
"MIT"
] | 1 | 2021-10-20T22:27:58.000Z | 2021-10-20T22:27:58.000Z | '''
Author - Abhishek Maheshwarappa and Jiaxin Tong
'''
import numpy as np
from tqdm import tqdm
class ReplaySimulator(object):
'''
A class to provide base functionality for simulating the replayer method for online algorithms.
'''
def __init__(self, n_visits, reward_history, item_col_name, visitor_col_name, reward_col_name, n_iterations=1, random_seed=1):
np.random.seed(random_seed)
self.reward_history = reward_history
self.item_col_name = item_col_name
self.visitor_col_name = visitor_col_name
self.reward_col_name = reward_col_name
# number of visits to replay/simulate
self.n_visits = n_visits
# number of runs to average over
self.n_iterations = n_iterations
# items under test
self.items = self.reward_history[self.item_col_name].unique()
self.n_items = len(self.items)
# visitors in the historical reward_history (e.g., from ratings df)
self.visitors = self.reward_history[self.visitor_col_name].unique()
self.n_visitors = len(self.visitors)
def reset(self):
# number of times each item has been sampled (previously n_sampled)
self.n_item_samples = np.zeros(self.n_items)
# fraction of time each item has resulted in a reward (previously movie_clicks)
self.n_item_rewards = np.zeros(self.n_items)
def replay(self):
results = []
for iteration in tqdm(range(0, self.n_iterations)):
self.reset()
total_rewards = 0
fraction_relevant = np.zeros(self.n_visits)
for visit in range(0, self.n_visits):
found_match = False
while not found_match:
# choose a random visitor
visitor_idx = np.random.randint(self.n_visitors)
visitor_id = self.visitors[visitor_idx]
# select an item to offer the visitor
item_idx = self.select_item()
item_id = self.items[item_idx]
# if this interaction exists in the history, count it
reward = self.reward_history.query(
'{} == @item_id and {} == @visitor_id'.format(self.item_col_name, self.visitor_col_name))[self.reward_col_name]
found_match = reward.shape[0] > 0
reward_value = reward.iloc[0]
self.record_result(visit, item_idx, reward_value)
## record metrics
total_rewards += reward_value
fraction_relevant[visit] = total_rewards * 1. / (visit + 1)
result = {}
result['iteration'] = iteration
result['visit'] = visit
result['item_id'] = item_id
result['visitor_id'] = visitor_id
result['reward'] = reward_value
result['total_reward'] = total_rewards
result['fraction_relevant'] = total_rewards * 1. / (visit + 1)
results.append(result)
return results
def select_item(self):
return np.random.randint(self.n_items)
def record_result(self, visit, item_idx, reward):
self.n_item_samples[item_idx] += 1
alpha = 1./self.n_item_samples[item_idx]
self.n_item_rewards[item_idx] += alpha * (reward - self.n_item_rewards[item_idx])
| 35.616822 | 136 | 0.544739 |
4a24c36ce57e948ae8f1054073a649fcb99c21fb | 1,006 | py | Python | watchman/integration/test_clock.py | istiak101/watchman | 8bede2333411b4cafc43c08ed21866dc100f3bd2 | [
"MIT"
] | 1 | 2022-03-04T14:09:05.000Z | 2022-03-04T14:09:05.000Z | watchman/integration/test_clock.py | Siyabonga-Gregory/watchman | 4c2a9ee8bc01f16be5be81c6734c0a00f8548770 | [
"MIT"
] | null | null | null | watchman/integration/test_clock.py | Siyabonga-Gregory/watchman | 4c2a9ee8bc01f16be5be81c6734c0a00f8548770 | [
"MIT"
] | null | null | null | # vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from watchman.integration.lib import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestClock(WatchmanTestCase.WatchmanTestCase):
def test_clock(self):
root = self.mkdtemp()
self.watchmanCommand("watch", root)
clock = self.watchmanCommand("clock", root)
self.assertRegex(clock["clock"], "^c:\\d+:\\d+:\\d+:\\d+$")
def test_clock_sync(self):
root = self.mkdtemp()
self.watchmanCommand("watch", root)
clock1 = self.watchmanCommand("clock", root, {"sync_timeout": 5000})
self.assertRegex(clock1["clock"], "^c:\\d+:\\d+:\\d+:\\d+$")
clock2 = self.watchmanCommand("clock", root, {"sync_timeout": 5000})
self.assertRegex(clock2["clock"], "^c:\\d+:\\d+:\\d+:\\d+$")
self.assertNotEqual(clock1, clock2)
| 33.533333 | 76 | 0.646123 |
4a24c3f3c813bc28ed1b6d89317402a43da8f160 | 3,705 | py | Python | tests/integration/cli/conftest.py | unparalleled-js/ape | b5443197ebd73186bbf8e716fa7bba3260f3dc8a | [
"Apache-2.0"
] | 210 | 2021-04-29T05:42:42.000Z | 2022-03-31T15:50:17.000Z | tests/integration/cli/conftest.py | unparalleled-js/ape | b5443197ebd73186bbf8e716fa7bba3260f3dc8a | [
"Apache-2.0"
] | 370 | 2021-04-29T01:54:32.000Z | 2022-03-31T19:19:29.000Z | tests/integration/cli/conftest.py | unparalleled-js/ape | b5443197ebd73186bbf8e716fa7bba3260f3dc8a | [
"Apache-2.0"
] | 25 | 2021-04-29T05:08:50.000Z | 2022-03-11T20:43:56.000Z | import os
from distutils.dir_util import copy_tree
from importlib import import_module
from pathlib import Path
import pytest
from click.testing import CliRunner
import ape
from ape import Project
from .utils import NodeId, project_names, project_skipper, projects_directory
class IntegrationTestModule:
"""
A test module in 'tests.integration.cli'.
"""
def __init__(self, path: Path):
self._path = path
module = import_module(f"tests.integration.cli.{path.stem}")
test_methods = [getattr(module, t) for t in dir(module) if t.startswith("test_")]
self.tests = [NodeId(t) for t in test_methods]
def __iter__(self):
return iter(self.tests)
@property
def name(self) -> str:
"""
The name of the module.
"""
return self._path.stem
# Loads the actual test modules / methods
integration_tests = [
IntegrationTestModule(p)
for p in Path(__file__).parent.iterdir()
if p.suffix == ".py" and p.name.startswith("test_")
]
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(session, config, items):
"""
Filter out tests marked to be skipped using ``skip_projects``
and the ``skip_projects_except`` decorators.
"""
modified_items = []
for item in items:
item_name_parts = item.name.split("[")
item_name_parts = [p.strip("[]") for p in item_name_parts]
module_full_name = item.module.__name__
module_name = module_full_name.split(".")[-1]
test_name = item_name_parts[0]
# Handle if a parametrized test is on-top
# of the project's parametrization.
project_name = item_name_parts[-1]
for proj_name in project_skipper:
# Example: 'test_foo[project-name-fuzz-0]' matches 'project-name'
if project_name.startswith(proj_name):
project_name = proj_name
break
is_cli_integration_test = (
len(item_name_parts) == 2 and "integration.cli" in module_full_name
)
if not is_cli_integration_test or not project_skipper.do_skip(
project_name, module_name, test_name
):
modified_items.append(item)
items[:] = modified_items
@pytest.fixture(params=project_names)
def project_folder(request, config):
project_source_dir = projects_directory / request.param
project_dest_dir = config.PROJECT_FOLDER / project_source_dir.name
copy_tree(project_source_dir.as_posix(), project_dest_dir.as_posix())
previous_project_folder = config.PROJECT_FOLDER
config.PROJECT_FOLDER = project_dest_dir
yield project_dest_dir
config.PROJECT_FOLDER = previous_project_folder
@pytest.fixture
def project(project_folder):
previous_project = ape.project
project = Project(project_folder)
ape.project = project
yield project
ape.project = previous_project
@pytest.fixture
def runner(project_folder):
previous_cwd = str(Path.cwd())
os.chdir(str(project_folder))
runner = CliRunner()
yield runner
os.chdir(previous_cwd)
@pytest.fixture(scope="session")
def ape_cli():
from ape._cli import cli
yield cli
def assert_failure(result, expected_output):
assert result.exit_code == 1
assert result.exception is not None
assert "ERROR" in result.output
assert expected_output in result.output
@pytest.fixture
def clean_cache(project):
"""
Use this fixture to ensure a project
does not have a cached compilation.
"""
cache_file = project.manifest_cachefile
if cache_file.exists():
cache_file.unlink()
yield
if cache_file.exists():
cache_file.unlink()
| 26.654676 | 89 | 0.68502 |
4a24c41493cd50e5ac11abe6524a00e5777b21a2 | 132 | py | Python | pyrhyme_demo.py | GSejas/pyrhyme | b9aab24b88130c9b1e48b5015098408bd21faa71 | [
"MIT"
] | 1 | 2020-05-05T11:54:10.000Z | 2020-05-05T11:54:10.000Z | pyrhyme_demo.py | GSejas/pyrhyme | b9aab24b88130c9b1e48b5015098408bd21faa71 | [
"MIT"
] | null | null | null | pyrhyme_demo.py | GSejas/pyrhyme | b9aab24b88130c9b1e48b5015098408bd21faa71 | [
"MIT"
] | null | null | null | import pyrhyme
rb = pyrhyme.RhymeBrain()
for obt in rb.rhyming_list(word="Dorf"):
print(obt["word"])
print(obt.freq)
| 18.857143 | 41 | 0.651515 |
4a24c4f25bd8f6bfda21592ff98ecc7aed866373 | 6,794 | py | Python | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslcertkey_service_binding.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslcertkey_service_binding.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslcertkey_service_binding.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class sslcertkey_service_binding(base_resource) :
""" Binding class showing the service that can be bound to sslcertkey.
"""
def __init__(self) :
self._servicename = None
self._data = None
self._version = None
self._certkey = None
self._service = None
self._servicegroupname = None
self._ca = None
self.___count = None
@property
def servicegroupname(self) :
try :
return self._servicegroupname
except Exception as e:
raise e
@servicegroupname.setter
def servicegroupname(self, servicegroupname) :
try :
self._servicegroupname = servicegroupname
except Exception as e:
raise e
@property
def ca(self) :
r"""The certificate-key pair being unbound is a Certificate Authority (CA) certificate. If you choose this option, the certificate-key pair is unbound from the list of CA certificates that were bound to the specified SSL virtual server or SSL service.
"""
try :
return self._ca
except Exception as e:
raise e
@ca.setter
def ca(self, ca) :
r"""The certificate-key pair being unbound is a Certificate Authority (CA) certificate. If you choose this option, the certificate-key pair is unbound from the list of CA certificates that were bound to the specified SSL virtual server or SSL service.
"""
try :
self._ca = ca
except Exception as e:
raise e
@property
def service(self) :
r"""Bind the certificate to the named SSL service or service group.
"""
try :
return self._service
except Exception as e:
raise e
@service.setter
def service(self, service) :
r"""Bind the certificate to the named SSL service or service group.
"""
try :
self._service = service
except Exception as e:
raise e
@property
def servicename(self) :
r"""Service name to which the certificate key pair is bound.
"""
try :
return self._servicename
except Exception as e:
raise e
@servicename.setter
def servicename(self, servicename) :
r"""Service name to which the certificate key pair is bound.
"""
try :
self._servicename = servicename
except Exception as e:
raise e
@property
def certkey(self) :
r"""Name of the certificate-key pair.<br/>Minimum length = 1.
"""
try :
return self._certkey
except Exception as e:
raise e
@certkey.setter
def certkey(self, certkey) :
r"""Name of the certificate-key pair.<br/>Minimum length = 1
"""
try :
self._certkey = certkey
except Exception as e:
raise e
@property
def version(self) :
r"""Version.
"""
try :
return self._version
except Exception as e:
raise e
@property
def data(self) :
r"""Vserver Id.
"""
try :
return self._data
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(sslcertkey_service_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.sslcertkey_service_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.certkey is not None :
return str(self.certkey)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, certkey="", option_="") :
r""" Use this API to fetch sslcertkey_service_binding resources.
"""
try :
if not certkey :
obj = sslcertkey_service_binding()
response = obj.get_resources(service, option_)
else :
obj = sslcertkey_service_binding()
obj.certkey = certkey
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, certkey, filter_) :
r""" Use this API to fetch filtered set of sslcertkey_service_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslcertkey_service_binding()
obj.certkey = certkey
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, certkey) :
r""" Use this API to count sslcertkey_service_binding resources configued on NetScaler.
"""
try :
obj = sslcertkey_service_binding()
obj.certkey = certkey
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, certkey, filter_) :
r""" Use this API to count the filtered set of sslcertkey_service_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslcertkey_service_binding()
obj.certkey = certkey
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class sslcertkey_service_binding_response(base_response) :
def __init__(self, length=1) :
self.sslcertkey_service_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.sslcertkey_service_binding = [sslcertkey_service_binding() for _ in range(length)]
| 27.844262 | 253 | 0.719017 |
4a24c5f4720b7cefe9f68f42d7d1e269374df833 | 267 | py | Python | typeidea/typeidea/settings/develop.py | BaichengLu/MyBlog | ab55dd7c98468ba68f3074541163764748fc4972 | [
"MIT"
] | null | null | null | typeidea/typeidea/settings/develop.py | BaichengLu/MyBlog | ab55dd7c98468ba68f3074541163764748fc4972 | [
"MIT"
] | null | null | null | typeidea/typeidea/settings/develop.py | BaichengLu/MyBlog | ab55dd7c98468ba68f3074541163764748fc4972 | [
"MIT"
] | null | null | null | from .base import * # NOQA
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'blog_db',
'USER': 'blog',
'PASSWORD': 'blog123.',
'HOST': '192.168.174.130',
'PORT': 3306
}
}
| 17.8 | 45 | 0.479401 |
4a24c671d667ee4a5eabbc76c3a7b65424dc41ff | 4,928 | py | Python | pypureclient/flashblade/FB_2_3/models/hardware_connector.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flashblade/FB_2_3/models/hardware_connector.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flashblade/FB_2_3/models/hardware_connector.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_3 import models
class HardwareConnector(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'id': 'str',
'connector_type': 'str',
'lane_speed': 'int',
'port_count': 'int',
'transceiver_type': 'str'
}
attribute_map = {
'name': 'name',
'id': 'id',
'connector_type': 'connector_type',
'lane_speed': 'lane_speed',
'port_count': 'port_count',
'transceiver_type': 'transceiver_type'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
id=None, # type: str
connector_type=None, # type: str
lane_speed=None, # type: int
port_count=None, # type: int
transceiver_type=None, # type: str
):
"""
Keyword args:
name (str): Name of the object (e.g., a file system or snapshot).
id (str): A non-modifiable, globally unique ID chosen by the system.
connector_type (str): Form-factor of the interface. Valid values include `QSFP` and `RJ-45`.
lane_speed (int): Configured speed of each lane in the connector in bits-per-second.
port_count (int): Configured number of ports in the connector (1/4 for QSFP).
transceiver_type (str): Details about the transceiver which is plugged into the connector port. Transceiver type will be read-only for pureuser. If nothing is plugged into QSFP port, value will be `Unused` and type cannot be auto-detected, and internal user has not specified a type - value will be `Unknown`. If transceiver is plugged in, and type is auto-detected, and/or type has been explicitly set by internal user - that value will be shown. Transceiver type is not applicable for RJ-45 connectors.
"""
if name is not None:
self.name = name
if id is not None:
self.id = id
if connector_type is not None:
self.connector_type = connector_type
if lane_speed is not None:
self.lane_speed = lane_speed
if port_count is not None:
self.port_count = port_count
if transceiver_type is not None:
self.transceiver_type = transceiver_type
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `HardwareConnector`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(HardwareConnector, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HardwareConnector):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.704225 | 516 | 0.576907 |
4a24c70f4a64b73ceafd2936bb31c7b90442b9a2 | 398 | py | Python | dnfas/settings/development.py | altest-com/dnfas-api | 56b4dfbef33fd9ad6e6504d1cb88105069b57d70 | [
"MIT"
] | null | null | null | dnfas/settings/development.py | altest-com/dnfas-api | 56b4dfbef33fd9ad6e6504d1cb88105069b57d70 | [
"MIT"
] | 1 | 2020-03-31T17:20:57.000Z | 2020-04-01T17:40:31.000Z | dnfas/settings/development.py | altest-com/dnfas-api | 56b4dfbef33fd9ad6e6504d1cb88105069b57d70 | [
"MIT"
] | null | null | null | from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Django rest framework
REST_FRAMEWORK.update({
'DEFAULT_AUTHENTICATION_CLASSES': (
'users.backends.JWTAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
]
})
# Enable CORS for all domains
CORS_ORIGIN_ALLOW_ALL = True | 23.411765 | 65 | 0.721106 |
4a24c766acfe8638ef22e9b9436afb060ce1e6a2 | 514 | py | Python | feng-ml-python/src/GradienAlgorithm.py | JiangFeng07/feng-python-apply | 1dec2d518ea257467c9b253981cfc281d7ac108a | [
"MIT"
] | 12 | 2017-08-05T16:46:25.000Z | 2019-04-18T08:32:16.000Z | feng-ml-python/src/GradienAlgorithm.py | JiangFeng07/feng-python-apply | 1dec2d518ea257467c9b253981cfc281d7ac108a | [
"MIT"
] | null | null | null | feng-ml-python/src/GradienAlgorithm.py | JiangFeng07/feng-python-apply | 1dec2d518ea257467c9b253981cfc281d7ac108a | [
"MIT"
] | 18 | 2017-08-30T10:58:02.000Z | 2019-12-09T13:27:34.000Z | # encoding=utf-8
x_old = 0
x_new = 6
gamma = 0.01
precision = 0.00000001
# x = Symbol("x")
# f = (x ** 4) - (3 * (x ** 3)) + 2
# 梯度下降算法
def df(x):
y = 4 * x ** 3 - 9 * x ** 2
return y
while abs(x_new - x_old) > precision:
x_old = x_new
x_new += -gamma * df(x_old)
print("The local minimum occurs at", x_new)
#梯度上升算法
def df(x):
y = -2 * x
return y
while abs(x_new - x_old) > precision:
x_old = x_new
x_new += gamma * df(x_old)
print("The local maximum occurs at", x_new)
| 14.277778 | 43 | 0.558366 |
4a24c82bd2c6e9d5139a1f3f0a57e1f980f71428 | 2,731 | py | Python | mycli/key_bindings.py | lyrl/mycli | d62eefdc819a11ecdb97d93dd7ad1922d28a3795 | [
"BSD-3-Clause"
] | 10,997 | 2015-07-27T06:59:04.000Z | 2022-03-31T07:49:26.000Z | mycli/key_bindings.py | lyrl/mycli | d62eefdc819a11ecdb97d93dd7ad1922d28a3795 | [
"BSD-3-Clause"
] | 937 | 2015-07-29T09:25:30.000Z | 2022-03-30T23:54:03.000Z | mycli/key_bindings.py | lyrl/mycli | d62eefdc819a11ecdb97d93dd7ad1922d28a3795 | [
"BSD-3-Clause"
] | 799 | 2015-07-27T13:13:49.000Z | 2022-03-29T21:24:39.000Z | import logging
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.filters import completion_is_selected
from prompt_toolkit.key_binding import KeyBindings
_logger = logging.getLogger(__name__)
def mycli_bindings(mycli):
"""Custom key bindings for mycli."""
kb = KeyBindings()
@kb.add('f2')
def _(event):
"""Enable/Disable SmartCompletion Mode."""
_logger.debug('Detected F2 key.')
mycli.completer.smart_completion = not mycli.completer.smart_completion
@kb.add('f3')
def _(event):
"""Enable/Disable Multiline Mode."""
_logger.debug('Detected F3 key.')
mycli.multi_line = not mycli.multi_line
@kb.add('f4')
def _(event):
"""Toggle between Vi and Emacs mode."""
_logger.debug('Detected F4 key.')
if mycli.key_bindings == "vi":
event.app.editing_mode = EditingMode.EMACS
mycli.key_bindings = "emacs"
else:
event.app.editing_mode = EditingMode.VI
mycli.key_bindings = "vi"
@kb.add('tab')
def _(event):
"""Force autocompletion at cursor."""
_logger.debug('Detected <Tab> key.')
b = event.app.current_buffer
if b.complete_state:
b.complete_next()
else:
b.start_completion(select_first=True)
@kb.add('c-space')
def _(event):
"""
Initialize autocompletion at cursor.
If the autocompletion menu is not showing, display it with the
appropriate completions for the context.
If the menu is showing, select the next completion.
"""
_logger.debug('Detected <C-Space> key.')
b = event.app.current_buffer
if b.complete_state:
b.complete_next()
else:
b.start_completion(select_first=False)
@kb.add('enter', filter=completion_is_selected)
def _(event):
"""Makes the enter key work as the tab key only when showing the menu.
In other words, don't execute query when enter is pressed in
the completion dropdown menu, instead close the dropdown menu
(accept current selection).
"""
_logger.debug('Detected enter key.')
event.current_buffer.complete_state = None
b = event.app.current_buffer
b.complete_state = None
@kb.add('escape', 'enter')
def _(event):
"""Introduces a line break in multi-line mode, or dispatches the
command in single-line mode."""
_logger.debug('Detected alt-enter key.')
if mycli.multi_line:
event.app.current_buffer.validate_and_handle()
else:
event.app.current_buffer.insert_text('\n')
return kb
| 30.344444 | 79 | 0.624313 |
4a24c82f43ddecd8279b927fc09b57c4b8d9a723 | 1,328 | py | Python | Python/maximum-distance-in-arrays.py | jolie1191/LeetCode | c081a67d3802b8ccf71b80cf0ec18346a46c1f82 | [
"MIT"
] | 5 | 2017-11-14T09:32:33.000Z | 2020-05-11T05:15:41.000Z | Python/maximum-distance-in-arrays.py | chairco/LeetCode | c35e1e04119de315560ec663fe5f56d918f0ed50 | [
"MIT"
] | null | null | null | Python/maximum-distance-in-arrays.py | chairco/LeetCode | c35e1e04119de315560ec663fe5f56d918f0ed50 | [
"MIT"
] | 3 | 2019-05-14T02:49:34.000Z | 2020-05-19T08:45:39.000Z | # Time: O(n)
# Space: O(1)
# Given m arrays, and each array is sorted in ascending order.
# Now you can pick up two integers from two different arrays (each array picks one)
# and calculate the distance.
# We define the distance between two integers a and b to be their absolute difference |a-b|.
# Your task is to find the maximum distance.
#
# Example 1:
# Input:
# [[1,2,3],
# [4,5],
# [1,2,3]]
# Output: 4
# Explanation:
# One way to reach the maximum distance 4 is to pick 1 in the first or third array
# and pick 5 in the second array.
# Note:
# Each given array will have at least 1 number. There will be at least two non-empty arrays.
# The total number of the integers in all the m arrays will be in the range of [2, 10000].
# The integers in the m arrays will be in the range of [-10000, 10000].
class Solution(object):
def maxDistance(self, arrays):
"""
:type arrays: List[List[int]]
:rtype: int
"""
result, min_val, max_val = 0, arrays[0][0], arrays[0][-1]
for i in xrange(1, len(arrays)):
result = max(result, \
max(max_val - arrays[i][0], \
arrays[i][-1] - min_val))
min_val = min(min_val, arrays[i][0])
max_val = max(max_val, arrays[i][-1])
return result | 35.891892 | 92 | 0.612199 |
4a24c87178da1020bd24319df55db6af5c2b8855 | 396 | py | Python | befh/__init__.py | joshua-jd-lee/BitcoinExchangeFH | d8661daf6882db30e9ac720c20c20737af9b118b | [
"Apache-2.0"
] | 310 | 2018-10-13T13:52:33.000Z | 2022-03-20T17:54:36.000Z | befh/__init__.py | joshua-jd-lee/BitcoinExchangeFH | d8661daf6882db30e9ac720c20c20737af9b118b | [
"Apache-2.0"
] | 45 | 2018-11-09T11:11:01.000Z | 2021-11-10T00:39:17.000Z | befh/__init__.py | joshua-jd-lee/BitcoinExchangeFH | d8661daf6882db30e9ac720c20c20737af9b118b | [
"Apache-2.0"
] | 121 | 2018-10-24T20:37:46.000Z | 2022-03-28T04:38:55.000Z | # -*- coding: utf-8 -*-
"""Top-level package for Bitcoin exchange feedhandler."""
__author__ = """Gavin Chan"""
__email__ = '[email protected]'
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
# flake8: noqa
from .core import Configuration, Runner
| 23.294118 | 64 | 0.747475 |
4a24c95afba4f81a08cc61191fef1b99b9ee7b4c | 553 | py | Python | pypy/module/_collections/interp_defaultdict.py | microvm/pypy-mu | 6b03fbe93052d0eb3a4c67152c987c16837b3484 | [
"Apache-2.0",
"OpenSSL"
] | 34 | 2015-07-09T04:53:27.000Z | 2021-07-19T05:22:27.000Z | pypy/module/_collections/interp_defaultdict.py | microvm/pypy-mu | 6b03fbe93052d0eb3a4c67152c987c16837b3484 | [
"Apache-2.0",
"OpenSSL"
] | 6 | 2015-05-30T17:20:45.000Z | 2017-06-12T14:29:23.000Z | pypy/module/_collections/interp_defaultdict.py | microvm/pypy-mu | 6b03fbe93052d0eb3a4c67152c987c16837b3484 | [
"Apache-2.0",
"OpenSSL"
] | 11 | 2015-09-07T14:26:08.000Z | 2020-04-10T07:20:41.000Z | from pypy.interpreter.error import OperationError
def missing(space, w_self, w_key):
# An interp-level version of this method. This is mostly only
# useful because it can be executed atomically in the presence of
# threads.
w_default_factory = space.getattr(w_self, space.wrap('default_factory'))
if space.is_w(w_default_factory, space.w_None):
raise OperationError(space.w_KeyError, space.newtuple([w_key]))
w_value = space.call_function(w_default_factory)
space.setitem(w_self, w_key, w_value)
return w_value
| 42.538462 | 76 | 0.746835 |
4a24c9d7049a640434974c8d11fdb28b42742911 | 9,717 | py | Python | main.py | stridera/Rainbow | 69b1b30c3a8a5b13af88e87a9103d42cc70e505f | [
"MIT"
] | 1 | 2020-03-15T09:32:36.000Z | 2020-03-15T09:32:36.000Z | main.py | stridera/Rainbow | 69b1b30c3a8a5b13af88e87a9103d42cc70e505f | [
"MIT"
] | null | null | null | main.py | stridera/Rainbow | 69b1b30c3a8a5b13af88e87a9103d42cc70e505f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division
import argparse
import bz2
from datetime import datetime
import os
import pickle
import numpy as np
import torch
from tqdm import trange
from agent import Agent
from robotronenv import Env
# from env import Env
from memory import ReplayMemory
from test import test
# Note that hyperparameters may originally be reported in ATARI game frames instead of agent steps
parser = argparse.ArgumentParser(description='Rainbow')
parser.add_argument('--id', type=str, default='default', help='Experiment ID')
parser.add_argument('--seed', type=int, default=123, help='Random seed')
parser.add_argument('--disable-cuda', action='store_true', help='Disable CUDA')
# parser.add_argument('--game', type=str, default='space_invaders', choices=atari_py.list_games(), help='ATARI game')
parser.add_argument('--T-max', type=int, default=int(50e6), metavar='STEPS',
help='Number of training steps (4x number of frames)')
parser.add_argument('--max-episode-length', type=int, default=int(108e3), metavar='LENGTH',
help='Max episode length in game frames (0 to disable)')
parser.add_argument('--history-length', type=int, default=4, metavar='T', help='Number of consecutive states processed')
parser.add_argument('--architecture', type=str, default='canonical',
choices=['canonical', 'data-efficient'], metavar='ARCH', help='Network architecture')
parser.add_argument('--hidden-size', type=int, default=512, metavar='SIZE', help='Network hidden size')
parser.add_argument('--noisy-std', type=float, default=0.1, metavar='σ',
help='Initial standard deviation of noisy linear layers')
parser.add_argument('--atoms', type=int, default=51, metavar='C', help='Discretised size of value distribution')
parser.add_argument('--V-min', type=float, default=-10, metavar='V', help='Minimum of value distribution support')
parser.add_argument('--V-max', type=float, default=10, metavar='V', help='Maximum of value distribution support')
parser.add_argument('--model', type=str, metavar='PARAMS', help='Pretrained model (state dict)')
parser.add_argument('--memory-capacity', type=int, default=int(1e6),
metavar='CAPACITY', help='Experience replay memory capacity')
parser.add_argument('--replay-frequency', type=int, default=4, metavar='k', help='Frequency of sampling from memory')
parser.add_argument('--priority-exponent', type=float, default=0.5, metavar='ω',
help='Prioritised experience replay exponent (originally denoted α)')
parser.add_argument('--priority-weight', type=float, default=0.4, metavar='β',
help='Initial prioritised experience replay importance sampling weight')
parser.add_argument('--multi-step', type=int, default=3, metavar='n', help='Number of steps for multi-step return')
parser.add_argument('--discount', type=float, default=0.99, metavar='γ', help='Discount factor')
parser.add_argument('--target-update', type=int, default=int(8e3), metavar='τ',
help='Number of steps after which to update target network')
parser.add_argument('--reward-clip', type=int, default=1, metavar='VALUE', help='Reward clipping (0 to disable)')
parser.add_argument('--learning-rate', type=float, default=0.0000625, metavar='η', help='Learning rate')
parser.add_argument('--adam-eps', type=float, default=1.5e-4, metavar='ε', help='Adam epsilon')
parser.add_argument('--batch-size', type=int, default=32, metavar='SIZE', help='Batch size')
parser.add_argument('--norm-clip', type=float, default=10, metavar='NORM', help='Max L2 norm for gradient clipping')
parser.add_argument('--learn-start', type=int, default=int(20e3), metavar='STEPS',
help='Number of steps before starting training')
parser.add_argument('--evaluate', action='store_true', help='Evaluate only')
parser.add_argument('--evaluation-interval', type=int, default=100000, metavar='STEPS',
help='Number of training steps between evaluations')
parser.add_argument('--evaluation-episodes', type=int, default=10, metavar='N',
help='Number of evaluation episodes to average over')
# TODO: Note that DeepMind's evaluation method is running the latest agent for 500K frames ever every 1M steps
parser.add_argument('--evaluation-size', type=int, default=500, metavar='N',
help='Number of transitions to use for validating Q')
parser.add_argument('--render', action='store_true', help='Display screen (testing only)')
parser.add_argument('--enable-cudnn', action='store_true', help='Enable cuDNN (faster but nondeterministic)')
parser.add_argument('--checkpoint-interval', default=0, type=int,
help='How often to checkpoint the model, defaults to 0 (never checkpoint)')
parser.add_argument('--memory', help='Path to save/load the memory from')
parser.add_argument('--disable-bzip-memory', action='store_true',
help='Don\'t zip the memory file. Not recommended (zipping is a bit slower and much, much smaller)')
# Setup
args = parser.parse_args()
print(' ' * 26 + 'Options')
for k, v in vars(args).items():
print(' ' * 26 + k + ': ' + str(v))
results_dir = os.path.join('results', args.id)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
metrics = {'steps': [], 'rewards': [], 'Qs': [], 'best_avg_reward': -float('inf')}
np.random.seed(args.seed)
torch.manual_seed(np.random.randint(1, 10000))
if torch.cuda.is_available() and not args.disable_cuda:
args.device = torch.device('cuda')
torch.cuda.manual_seed(np.random.randint(1, 10000))
torch.backends.cudnn.enabled = args.enable_cudnn
else:
args.device = torch.device('cpu')
# Simple ISO 8601 timestamped logger
def log(s):
print('[' + str(datetime.now().strftime('%Y-%m-%dT%H:%M:%S')) + '] ' + s)
def load_memory(memory_path, disable_bzip):
if disable_bzip:
with open(memory_path, 'rb') as pickle_file:
return pickle.load(pickle_file)
else:
with bz2.open(memory_path, 'rb') as zipped_pickle_file:
return pickle.load(zipped_pickle_file)
def save_memory(memory, memory_path, disable_bzip):
if disable_bzip:
with open(memory_path, 'wb') as pickle_file:
pickle.dump(memory, pickle_file)
else:
with bz2.open(memory_path, 'wb') as zipped_pickle_file:
pickle.dump(memory, zipped_pickle_file)
# Environment
env = Env(args)
env.train()
action_space = env.action_space()
# Agent
dqn = Agent(args, env)
# If a model is provided, and evaluate is fale, presumably we want to resume, so try to load memory
if args.model is not None and not args.evaluate:
if not args.memory:
raise ValueError('Cannot resume training without memory save path. Aborting...')
elif not os.path.exists(args.memory):
raise ValueError('Could not find memory file at {path}. Aborting...'.format(path=args.memory))
mem = load_memory(args.memory, args.disable_bzip_memory)
else:
mem = ReplayMemory(args, args.memory_capacity)
priority_weight_increase = (1 - args.priority_weight) / (args.T_max - args.learn_start)
# Construct validation memory
val_mem = ReplayMemory(args, args.evaluation_size)
T, done = 0, True
while T < args.evaluation_size:
if done:
state, done = env.reset(), False
next_state, _, done = env.step(np.random.randint(0, action_space))
val_mem.append(state, None, None, done)
state = next_state
T += 1
if args.evaluate:
dqn.eval() # Set DQN (online network) to evaluation mode
avg_reward, avg_Q = test(args, 0, dqn, val_mem, metrics, results_dir, evaluate=True) # Test
print('Avg. reward: ' + str(avg_reward) + ' | Avg. Q: ' + str(avg_Q))
else:
# Training loop
dqn.train()
T, done = 0, True
for T in trange(1, args.T_max + 1):
if done:
state, done = env.reset(), False
if T % args.replay_frequency == 0:
dqn.reset_noise() # Draw a new set of noisy weights
action = dqn.act(state) # Choose an action greedily (with noisy weights)
next_state, reward, done = env.step(action) # Step
if args.reward_clip > 0:
reward = max(min(reward, args.reward_clip), -args.reward_clip) # Clip rewards
mem.append(state, action, reward, done) # Append transition to memory
# Train and test
if T >= args.learn_start:
mem.priority_weight = min(mem.priority_weight + priority_weight_increase,
1) # Anneal importance sampling weight β to 1
if T % args.replay_frequency == 0:
dqn.learn(mem) # Train with n-step distributional double-Q learning
if T % args.evaluation_interval == 0:
dqn.eval() # Set DQN (online network) to evaluation mode
avg_reward, avg_Q = test(args, T, dqn, val_mem, metrics, results_dir) # Test
log('T = ' + str(T) + ' / ' + str(args.T_max) + ' | Avg. reward: ' +
str(avg_reward) + ' | Avg. Q: ' + str(avg_Q))
dqn.train() # Set DQN (online network) back to training mode
# If memory path provided, save it
if args.memory is not None:
save_memory(mem, args.memory, args.disable_bzip_memory)
# Update target network
if T % args.target_update == 0:
dqn.update_target_net()
# Checkpoint the network
if (args.checkpoint_interval != 0) and (T % args.checkpoint_interval == 0):
dqn.save(results_dir, 'checkpoint.pth')
state = next_state
env.close()
| 47.866995 | 120 | 0.672738 |
4a24ca10ebf588178aaa452577468c8da8bca9b9 | 301 | py | Python | api/__init__.py | Toskgreg/GoldenLions | 6616e7f531dc607cf7ddb75bfa341b5040c739a5 | [
"Apache-2.0"
] | 1 | 2019-01-30T17:41:53.000Z | 2019-01-30T17:41:53.000Z | api/__init__.py | bisonlou/challenge-III | 25b5fa7dcaf28606434175b240585a6e403ead09 | [
"Apache-2.0"
] | 3 | 2019-01-22T07:54:31.000Z | 2019-02-11T09:56:41.000Z | api/__init__.py | Toskgreg/GoldenLions | 6616e7f531dc607cf7ddb75bfa341b5040c739a5 | [
"Apache-2.0"
] | 1 | 2019-02-11T19:10:37.000Z | 2019-02-11T19:10:37.000Z | from flask import Flask
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
test_client = app.test_client()
import api.database.engine
import api.views.user_view
import api.views.red_flag_view
import api.views.common_routes
import api.views.intervention_view
import api.models.user_model
| 17.705882 | 34 | 0.82392 |
4a24cbf89e5b2a354e1d762c7bbf095fda4407dd | 1,335 | py | Python | days/day_3/day_3_part_1.py | sharkbound/adventofcode2021 | b4f4721ffad91e4df73a831d5322d17ede06f9b3 | [
"MIT"
] | null | null | null | days/day_3/day_3_part_1.py | sharkbound/adventofcode2021 | b4f4721ffad91e4df73a831d5322d17ede06f9b3 | [
"MIT"
] | null | null | null | days/day_3/day_3_part_1.py | sharkbound/adventofcode2021 | b4f4721ffad91e4df73a831d5322d17ede06f9b3 | [
"MIT"
] | null | null | null | from collections import Counter
from icecream import ic
from day import Day
import re
import numpy as np
import utils
"""
You need to use the binary numbers in the
diagnostic report to generate two new binary numbers (called the gamma rate and the epsilon rate).
The power consumption can then be found by multiplying the gamma rate by the epsilon rate.
"""
class Day3Part1(Day):
day = 3
part = 1
def get_sample_input(self):
return ('00100\n'
'11110\n'
'10110\n'
'10111\n'
'10101\n'
'01111\n'
'00111\n'
'11100\n'
'10000\n'
'11001\n'
'00010\n'
'01010')
def parse_input(self):
return self.input_text_lines
def most_and_least_common_at(self, index, data):
counts = Counter((binary[index] for binary in data))
return [pair[0] for pair in counts.most_common()]
def solve(self):
data = self.parse_input()
bits = [self.most_and_least_common_at(i, data) for i in range(len(data[0]))]
epsilon_rate = int(''.join(pair[0] for pair in bits), 2)
gamma_rate = int(''.join(pair[1] for pair in bits), 2)
print(f'day 3 part 2 answer: {epsilon_rate * gamma_rate}')
| 25.188679 | 99 | 0.580524 |
4a24cca4f04ca351eedbde310c6f2f356a6b3fd9 | 7,327 | py | Python | lib/surface/logging/sinks/update.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | lib/surface/logging/sinks/update.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | lib/surface/logging/sinks/update.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | 1 | 2020-07-25T12:23:41.000Z | 2020-07-25T12:23:41.000Z | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'logging sinks update' command."""
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
class Update(base.Command):
"""Updates a sink."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'sink_name', help='The name of the sink to update.')
parser.add_argument(
'destination', nargs='?',
help=('A new destination for the sink. '
'If omitted, the sink\'s existing destination is unchanged.'))
parser.add_argument(
'--log-filter', required=False,
help=('A new filter expression for the sink. '
'If omitted, the sink\'s existing filter (if any) is unchanged.'))
parser.add_argument(
'--output-version-format', required=False,
help=('Format of the log entries being exported. Detailed information: '
'https://cloud.google.com/logging/docs/api/introduction_v2'),
choices=('V1', 'V2'))
def Collection(self):
return 'logging.sinks'
def GetLogSink(self):
"""Returns a log sink specified by the arguments."""
client = self.context['logging_client_v1beta3']
return client.projects_logs_sinks.Get(
self.context['sink_reference'].Request())
def GetLogServiceSink(self):
"""Returns a log service sink specified by the arguments."""
client = self.context['logging_client_v1beta3']
return client.projects_logServices_sinks.Get(
self.context['sink_reference'].Request())
def GetProjectSink(self):
"""Returns a project sink specified by the arguments."""
# Use V2 logging API for project sinks.
client = self.context['logging_client_v2beta1']
messages = self.context['logging_messages_v2beta1']
sink_ref = self.context['sink_reference']
return client.projects_sinks.Get(
messages.LoggingProjectsSinksGetRequest(
projectsId=sink_ref.projectsId, sinksId=sink_ref.sinksId))
def UpdateLogSink(self, sink_data):
"""Updates a log sink specified by the arguments."""
client = self.context['logging_client_v1beta3']
messages = self.context['logging_messages_v1beta3']
sink_ref = self.context['sink_reference']
return client.projects_logs_sinks.Update(
messages.LoggingProjectsLogsSinksUpdateRequest(
projectsId=sink_ref.projectsId, logsId=sink_ref.logsId,
sinksId=sink_data['name'], logSink=messages.LogSink(**sink_data)))
def UpdateLogServiceSink(self, sink_data):
"""Updates a log service sink specified by the arguments."""
client = self.context['logging_client_v1beta3']
messages = self.context['logging_messages_v1beta3']
sink_ref = self.context['sink_reference']
return client.projects_logServices_sinks.Update(
messages.LoggingProjectsLogServicesSinksUpdateRequest(
projectsId=sink_ref.projectsId,
logServicesId=sink_ref.logServicesId, sinksId=sink_data['name'],
logSink=messages.LogSink(**sink_data)))
def UpdateProjectSink(self, sink_data):
"""Updates a project sink specified by the arguments."""
# Use V2 logging API for project sinks.
client = self.context['logging_client_v2beta1']
messages = self.context['logging_messages_v2beta1']
sink_ref = self.context['sink_reference']
# Change string value to enum.
sink_data['outputVersionFormat'] = getattr(
messages.LogSink.OutputVersionFormatValueValuesEnum,
sink_data['outputVersionFormat'])
return client.projects_sinks.Update(
messages.LoggingProjectsSinksUpdateRequest(
projectsId=sink_ref.projectsId, sinksId=sink_data['name'],
logSink=messages.LogSink(**sink_data)))
@util.HandleHttpError
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The updated sink with its new destination.
"""
util.CheckSinksCommandArguments(args)
# One of the flags is required to update the sink.
# log_filter can be an empty string, so check explicitly for None.
if not (args.destination or args.log_filter is not None or
args.output_version_format):
raise exceptions.ToolException(
'[destination], --log-filter or --output-version-format is required')
# Calling Update on a non-existing sink creates it.
# We need to make sure it exists, otherwise we would create it.
if args.log:
sink = self.GetLogSink()
elif args.service:
sink = self.GetLogServiceSink()
else:
sink = self.GetProjectSink()
# Only update fields that were passed to the command.
if args.destination:
destination = args.destination
else:
destination = sink.destination
if args.log_filter is not None:
log_filter = args.log_filter
else:
log_filter = sink.filter
sink_ref = self.context['sink_reference']
sink_data = {'name': sink_ref.sinksId, 'destination': destination,
'filter': log_filter}
if args.log:
result = util.TypedLogSink(self.UpdateLogSink(sink_data),
log_name=args.log)
elif args.service:
result = util.TypedLogSink(self.UpdateLogServiceSink(sink_data),
service_name=args.service)
else:
if args.output_version_format:
sink_data['outputVersionFormat'] = args.output_version_format
else:
sink_data['outputVersionFormat'] = sink.outputVersionFormat.name
result = util.TypedLogSink(self.UpdateProjectSink(sink_data))
log.UpdatedResource(sink_ref)
self._epilog_result_destination = result.destination
return result
def Epilog(self, unused_resources_were_displayed):
util.PrintPermissionInstructions(self._epilog_result_destination)
Update.detailed_help = {
'DESCRIPTION': """\
Changes the *[destination]* or *--log-filter* associated with a sink.
If you don't include one of the *--log* or *--log-service* flags,
this command updates a project sink.
The new destination must already exist and Cloud Logging must have
permission to write to it.
Log entries are exported to the new destination immediately.
""",
'EXAMPLES': """\
To only update a project sink filter, run:
$ {command} my-sink --log-filter='metadata.severity>=ERROR'
Detailed information about filters can be found at:
https://cloud.google.com/logging/docs/view/advanced_filters
""",
}
| 38.973404 | 80 | 0.696056 |
4a24cca7dc3f7b5da5d1ab460e9dfd83ff805ad9 | 4,816 | py | Python | dashboard/dashboard/pinpoint/handlers/isolate.py | Martijnve23/catapult | 5c63b19d221af6a12889e8727acc85d93892cab7 | [
"BSD-3-Clause"
] | 1 | 2021-07-04T03:26:43.000Z | 2021-07-04T03:26:43.000Z | dashboard/dashboard/pinpoint/handlers/isolate.py | Martijnve23/catapult | 5c63b19d221af6a12889e8727acc85d93892cab7 | [
"BSD-3-Clause"
] | null | null | null | dashboard/dashboard/pinpoint/handlers/isolate.py | Martijnve23/catapult | 5c63b19d221af6a12889e8727acc85d93892cab7 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Service for tracking isolates and looking them up by builder and commit.
An isolate is a way to describe the dependencies of a specific build.
More about isolates:
https://github.com/luci/luci-py/blob/master/appengine/isolate/doc/client/Design.md
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import webapp2
from dashboard.api import api_request_handler
from dashboard.common import utils
from dashboard.pinpoint.models import change as change_module
from dashboard.pinpoint.models import isolate
class Isolate(api_request_handler.ApiRequestHandler):
"""Handler for managing isolates.
A post request adds new isolate information.
A get request looks up an isolate hash from the builder, commit, and target.
"""
def get(self):
"""Look up an isolate hash.
Args:
builder_name: The name of the builder that produced the isolate.
change: The Change the isolate is for, as a JSON string.
target: The isolate target.
"""
# Get parameters.
parameters = (
('builder_name', str),
('change', lambda x: change_module.Change.FromDict(json.loads(x))),
('target', str),
)
try:
# pylint: disable=unbalanced-tuple-unpacking
builder_name, change, target = self._ValidateParameters(parameters)
except (KeyError, TypeError, ValueError) as e:
self.response.set_status(400)
self.response.write(e)
return
# Get.
try:
isolate_server, isolate_hash = isolate.Get(builder_name, change, target)
except KeyError as e:
self.response.set_status(404)
self.response.write(e)
return
self.response.write(
json.dumps({
'isolate_server': isolate_server,
'isolate_hash': isolate_hash,
}))
def _CheckUser(self):
# TODO: Remove when all Pinpoint builders are migrated to LUCI.
if self.request.remote_addr in utils.GetIpAllowlist():
return
self._CheckIsInternalUser()
def Post(self):
"""Add new isolate information.
Args:
builder_name: The name of the builder that produced the isolate.
change: The Change the isolate is for, as a JSON string.
isolate_server: The hostname of the server where the isolates are stored.
isolate_map: A JSON dict mapping the target names to the isolate hashes.
"""
# Get parameters.
parameters = (
('builder_name', str),
('change', lambda x: change_module.Change.FromDict(json.loads(x))),
('isolate_server', str),
('isolate_map', json.loads),
)
try:
# pylint: disable=unbalanced-tuple-unpacking
builder_name, change, isolate_server, isolate_map = (
self._ValidateParameters(parameters))
except (KeyError, TypeError, ValueError) as e:
self.response.set_status(400)
self.response.write(json.dumps({'error': e.message}))
return
# Put information into the datastore.
isolate_infos = [(builder_name, change, target, isolate_server,
isolate_hash)
for target, isolate_hash in isolate_map.items()]
isolate.Put(isolate_infos)
# Respond to the API user.
self.response.write(json.dumps(isolate_infos))
def _ValidateParameters(self, parameters):
"""Ensure the right parameters are present and valid.
Args:
parameters: Iterable of (name, converter) tuples where name is the
parameter name and converter is a function used to validate
and convert that parameter into its internal representation.
Returns:
A list of parsed parameter values.
Raises:
TypeError: The wrong parameters are present.
ValueError: The parameters have invalid values.
"""
parameter_names = tuple(parameter_name for parameter_name, _ in parameters)
for given_parameter in self.request.params:
if given_parameter not in parameter_names:
raise TypeError('Unknown parameter: %s' % given_parameter)
parameter_values = []
for parameter_name, parameter_converter in parameters:
if parameter_name not in self.request.params:
raise TypeError('Missing parameter: %s' % parameter_name)
parameter_value = self.request.get(parameter_name)
if not parameter_value:
raise ValueError('Empty parameter: %s' % parameter_name)
parameter_value = parameter_converter(parameter_value)
parameter_values.append(parameter_value)
return parameter_values
class IsolateCleanup(webapp2.RequestHandler):
def get(self):
isolate.DeleteExpiredIsolates()
| 32.540541 | 82 | 0.697051 |
4a24cd9b459a2d881b8e0c5cb7d026d49ff0f275 | 13,306 | py | Python | mydatamyconsent/model/error_type.py | My-Data-My-Consent/python-sdk | 414640bcda6350e6f5e74e42442737eb8d5b7447 | [
"Apache-2.0"
] | null | null | null | mydatamyconsent/model/error_type.py | My-Data-My-Consent/python-sdk | 414640bcda6350e6f5e74e42442737eb8d5b7447 | [
"Apache-2.0"
] | 5 | 2021-12-19T10:29:43.000Z | 2022-03-31T22:15:37.000Z | mydatamyconsent/model/error_type.py | mydatamyconsent/python-sdk | 414640bcda6350e6f5e74e42442737eb8d5b7447 | [
"Apache-2.0"
] | null | null | null | """
My Data My Consent - Developer API
Unleashing the power of data consent by establishing trust. The Platform Core Developer API defines a set of capabilities that can be used to request, issue, manage and update data, documents and credentials by organizations. The API can be used to request, manage and update Decentralised Identifiers, Financial Data, Health Data issue Documents, Credentials directly or using OpenID Connect flows, and verify Messages signed with DIDs and much more. # noqa: E501
The version of the OpenAPI document: v1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from mydatamyconsent.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from mydatamyconsent.exceptions import ApiAttributeError
class ErrorType(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'INVALIDACCESSTOKEN': "InvalidAccessToken",
'INVALIDREFRESHTOKEN': "InvalidRefreshToken",
'INSUFFICIENTPERMISSION': "InsufficientPermission",
'INTERNALSERVERERROR': "InternalServerError",
'BADREQUEST': "BadRequest",
'NOTFOUND': "NotFound",
'INVALIDORGANIZATION': "InvalidOrganization",
'INVALIDFILEUPLOADTYPE': "InvalidFileUploadType",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""ErrorType - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str):, must be one of ["InvalidAccessToken", "InvalidRefreshToken", "InsufficientPermission", "InternalServerError", "BadRequest", "NotFound", "InvalidOrganization", "InvalidFileUploadType", ] # noqa: E501
Keyword Args:
value (str):, must be one of ["InvalidAccessToken", "InvalidRefreshToken", "InsufficientPermission", "InternalServerError", "BadRequest", "NotFound", "InvalidOrganization", "InvalidFileUploadType", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""ErrorType - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str):, must be one of ["InvalidAccessToken", "InvalidRefreshToken", "InsufficientPermission", "InternalServerError", "BadRequest", "NotFound", "InvalidOrganization", "InvalidFileUploadType", ] # noqa: E501
Keyword Args:
value (str):, must be one of ["InvalidAccessToken", "InvalidRefreshToken", "InsufficientPermission", "InternalServerError", "BadRequest", "NotFound", "InvalidOrganization", "InvalidFileUploadType", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
| 44.651007 | 469 | 0.570119 |
4a24cde078bc8e66d4ed61b6c14ae1f36440ec0a | 734 | py | Python | tfx/version.py | Saiprasad16/tfx | c1e0704b2a83232469f55598efcdb7808b6c909f | [
"Apache-2.0"
] | 1 | 2021-05-10T10:41:06.000Z | 2021-05-10T10:41:06.000Z | tfx/version.py | Saiprasad16/tfx | c1e0704b2a83232469f55598efcdb7808b6c909f | [
"Apache-2.0"
] | null | null | null | tfx/version.py | Saiprasad16/tfx | c1e0704b2a83232469f55598efcdb7808b6c909f | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the version string of TFX."""
# Note that setup.py uses this version.
__version__ = '0.31.0.dev'
| 38.631579 | 74 | 0.752044 |
4a24ce234ccc4632854811aed67b16a67fd93175 | 615 | py | Python | blender/arm/logicnode/array_remove.py | astronalta/armory-3d | 15fa9fe50587e9e054cc5176f9a7de334cce5113 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/array_remove.py | astronalta/armory-3d | 15fa9fe50587e9e054cc5176f9a7de334cce5113 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/array_remove.py | astronalta/armory-3d | 15fa9fe50587e9e054cc5176f9a7de334cce5113 | [
"Zlib"
] | null | null | null | import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class ArrayRemoveNode(Node, ArmLogicTreeNode):
'''Array remove node'''
bl_idname = 'LNArrayRemoveNode'
bl_label = 'Array Remove'
bl_icon = 'GAME'
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.inputs.new('NodeSocketShader', 'Array')
self.inputs.new('NodeSocketInt', 'Index')
self.outputs.new('ArmNodeSocketAction', 'Out')
self.outputs.new('NodeSocketShader', 'Value')
add_node(ArrayRemoveNode, category='Array')
| 30.75 | 54 | 0.691057 |
4a24ce592bf9a1ad1bfa8597f12822f88adc0326 | 635 | py | Python | pyridge/preprocess/log.py | cperales/PyRidge | b0029fae9e24a4e5c364bbd8fc3791eab15baa75 | [
"MIT"
] | 8 | 2019-03-09T13:47:23.000Z | 2022-01-29T03:51:00.000Z | pyridge/preprocess/log.py | cperales/pyridge | 74a9aa83c1687e5362b0fd02f526281ad6837b75 | [
"MIT"
] | 1 | 2018-10-19T18:46:53.000Z | 2018-10-19T18:46:53.000Z | pyridge/preprocess/log.py | cperales/PyRidge | b0029fae9e24a4e5c364bbd8fc3791eab15baa75 | [
"MIT"
] | 3 | 2020-08-26T10:08:20.000Z | 2021-11-13T11:42:23.000Z | from pyridge.generic.scaler import Scaler
import numpy as np
class LogScaler(Scaler):
"""
Scaler for that transform the values in a logaritmic
scaler.
"""
def __init__(self):
self.min_: np.float
def get_params(self):
return {'min_': self.min_}
def fit(self, values):
self.min_ = np.min(values, axis=0)
def transform(self, values):
return np.log(values + (1.0 - self.min_))
def fit_transform(self, values):
self.fit(values)
return self.transform(values)
def inverse_transform(self, values):
return np.exp(values) - (1.0 - self.min_)
| 22.678571 | 56 | 0.623622 |
4a24ceb5e53ef73eeae8f50209b18d931e372b4f | 7,842 | py | Python | ceilometer/tests/unit/hardware/pollsters/test_generic.py | stackhpc/ceilometer | f19037c1b616f2ecb8fd4a1d446687538327e687 | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/unit/hardware/pollsters/test_generic.py | stackhpc/ceilometer | f19037c1b616f2ecb8fd4a1d446687538327e687 | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/unit/hardware/pollsters/test_generic.py | stackhpc/ceilometer | f19037c1b616f2ecb8fd4a1d446687538327e687 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
import yaml
import fixtures
from oslo_utils import fileutils
from ceilometer import declarative
from ceilometer.hardware.inspector import base as inspector_base
from ceilometer.hardware.pollsters import generic
from ceilometer import sample
from ceilometer import service
from ceilometer.tests import base as test_base
class TestMeterDefinition(test_base.BaseTestCase):
def test_config_definition(self):
cfg = dict(name='test',
type='gauge',
unit='B',
snmp_inspector={})
definition = generic.MeterDefinition(cfg)
self.assertEqual('test', definition.name)
self.assertEqual('gauge', definition.type)
self.assertEqual('B', definition.unit)
self.assertEqual({}, definition.snmp_inspector)
def test_config_missing_field(self):
cfg = dict(name='test', type='gauge')
try:
generic.MeterDefinition(cfg)
except declarative.MeterDefinitionException as e:
self.assertEqual("Missing field unit", e.brief_message)
def test_config_invalid_field(self):
cfg = dict(name='test',
type='gauge',
unit='B',
invalid={})
definition = generic.MeterDefinition(cfg)
self.assertEqual("foobar", getattr(definition, 'invalid', 'foobar'))
def test_config_invalid_type_field(self):
cfg = dict(name='test',
type='invalid',
unit='B',
snmp_inspector={})
try:
generic.MeterDefinition(cfg)
except declarative.MeterDefinitionException as e:
self.assertEqual("Unrecognized type value invalid",
e.brief_message)
def test_config_missing_unit_field(self):
cfg = dict(name='hardware.cpu.user',
snmp_inspector={"matching_type": "type_exact",
"oid": "1.3.6.1.4.1.2021.11.50.0",
"type": "int"})
try:
generic.MeterDefinition(cfg)
except declarative.MeterDefinitionException as e:
self.assertEqual("Missing field unit",
e.brief_message)
@mock.patch('ceilometer.hardware.pollsters.generic.LOG')
def test_bad_metric_skip(self, LOG):
cfg = {'metric': [dict(name='test1',
type='gauge',
unit='B',
snmp_inspector={}),
dict(name='test_bad',
type='invalid',
unit='B',
snmp_inspector={}),
dict(name='test2',
type='gauge',
unit='B',
snmp_inspector={})]}
data = generic.load_definition(cfg)
self.assertEqual(2, len(data))
LOG.error.assert_called_with(
"Error loading meter definition: %s",
"Unrecognized type value invalid")
class FakeInspector(inspector_base.Inspector):
net_metadata = dict(name='test.teest',
mac='001122334455',
ip='10.0.0.2',
speed=1000)
DATA = {
'test': (0.99, {}, {}),
'test2': (90, net_metadata, {}),
}
def inspect_generic(self, host, cache,
extra_metadata=None, param=None):
yield self.DATA[host.hostname]
class TestGenericPollsters(test_base.BaseTestCase):
@staticmethod
def faux_get_inspector(url, namespace=None):
return FakeInspector()
def setUp(self):
super(TestGenericPollsters, self).setUp()
self.conf = service.prepare_service([], [])
self.resources = ["snmp://test", "snmp://test2"]
self.useFixture(fixtures.MockPatch(
'ceilometer.hardware.inspector.get_inspector',
self.faux_get_inspector))
self.pollster = generic.GenericHardwareDeclarativePollster(self.conf)
def _setup_meter_def_file(self, cfg):
if six.PY3:
cfg = cfg.encode('utf-8')
meter_cfg_file = fileutils.write_to_tempfile(content=cfg,
prefix="snmp",
suffix="yaml")
self.conf.set_override(
'meter_definitions_file',
meter_cfg_file, group='hardware')
cfg = declarative.load_definitions(
self.conf, {}, self.conf.hardware.meter_definitions_file)
return cfg
def _check_get_samples(self, name, definition,
expected_value, expected_type, expected_unit=None):
self.pollster._update_meter_definition(definition)
cache = {}
samples = list(self.pollster.get_samples(None, cache,
self.resources))
self.assertTrue(samples)
self.assertIn(self.pollster.CACHE_KEY, cache)
for resource in self.resources:
self.assertIn(resource, cache[self.pollster.CACHE_KEY])
self.assertEqual(set([name]),
set([s.name for s in samples]))
match = [s for s in samples if s.name == name]
self.assertEqual(expected_value, match[0].volume)
self.assertEqual(expected_type, match[0].type)
if expected_unit:
self.assertEqual(expected_unit, match[0].unit)
def test_get_samples(self):
param = dict(matching_type='type_exact',
oid='1.3.6.1.4.1.2021.10.1.3.1',
type='lambda x: float(str(x))')
meter_def = generic.MeterDefinition(dict(type='gauge',
name='hardware.test1',
unit='process',
snmp_inspector=param))
self._check_get_samples('hardware.test1',
meter_def,
0.99, sample.TYPE_GAUGE,
expected_unit='process')
def test_get_pollsters_extensions(self):
param = dict(matching_type='type_exact',
oid='1.3.6.1.4.1.2021.10.1.3.1',
type='lambda x: float(str(x))')
meter_cfg = yaml.dump(
{'metric': [dict(type='gauge',
name='hardware.test1',
unit='process',
snmp_inspector=param),
dict(type='gauge',
name='hardware.test2.abc',
unit='process',
snmp_inspector=param)]})
self._setup_meter_def_file(meter_cfg)
pollster = generic.GenericHardwareDeclarativePollster
# Clear cached mapping
pollster.mapping = None
exts = pollster.get_pollsters_extensions(self.conf)
self.assertEqual(2, len(exts))
self.assertIn(exts[0].name, ['hardware.test1', 'hardware.test2.abc'])
self.assertIn(exts[1].name, ['hardware.test1', 'hardware.test2.abc'])
| 40.010204 | 78 | 0.554578 |
4a24d197dd2428eae1b7e3f94b91343bf5130617 | 9,366 | py | Python | photospline/resources/scripts/glam-photonics.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 1 | 2020-12-24T22:00:01.000Z | 2020-12-24T22:00:01.000Z | photospline/resources/scripts/glam-photonics.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | null | null | null | photospline/resources/scripts/glam-photonics.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 3 | 2020-07-17T09:20:29.000Z | 2021-03-30T16:44:18.000Z | from icecube.photospline import splinefitstable
from optparse import OptionParser
from icecube.photospline.photonics import *
try:
input = raw_input
except NameError:
pass
import sys
import os
import numpy
# Hard-coded params
#nknots =[17, 6, 12, 25] # [r, phi, z, t] For Nathan/Jakob's binning
# Parse arguments
usage = "usage: %prog [options] table.pt [output.fits]"
optparser = OptionParser(usage=usage)
optparser.add_option("-r", "--rknots", dest="rknots", type="int",
help="number of knots in radial dimension")
optparser.add_option("-f", "--fknots", dest="fknots", type="int",
help="number of knots in angular dimension")
optparser.add_option("-z", "--zknots", dest="zknots", type="int",
help="number of knots in longitudinal dimension")
optparser.add_option("-t", "--tknots", dest="tknots", type="int",
help="number of knots in time dimension")
optparser.add_option("-s", "--smooth", dest="smooth", type="float",
help="smoothness coefficient", default=1e-6)
optparser.add_option("--prob", dest="prob", action="store_true",
help="Fit only the normalized CDFs", default=False)
optparser.add_option("--abs", dest="abs", action="store_true",
help="Fit only the total amplitude in each cell", default=False)
optparser.add_option("--ice-bottom", dest="ice_bottom", type="float",
help="Lower boundary of ice properties. Any table cells below this\
depth will be weighted with zero, as they contain no data.", default=-820)
optparser.add_option("--ice-top", dest="ice_top", type="float",
help="Upper boundary of ice properties. Any table cells above this\
depth will be weighted with zero, as they contain no data.", default=820)
(opts, args) = optparser.parse_args()
if len(args) < 1:
print(usage)
sys.exit(1)
# by default, do both fits
if not opts.prob and not opts.abs:
opts.prob = opts.abs = True
def check_exists(outputfile):
if os.path.exists(outputfile):
if opts.force or input("File %s exists. Overwrite? (y/n)" % outputfile) == 'y':
os.unlink(outputfile)
else:
sys.exit()
def default_path(input):
pth = os.path.basename(input)
return pth+'.abs.pspl.fits',pth+'.prob.pspl.fits'
if len(args) < 2:
abs_outputfile, prob_outputfile = default_path(args[0])
else:
abs_outputfile, prob_outputfile = default_path(args[1])
if opts.prob: check_exists(prob_outputfile)
if opts.abs: check_exists(abs_outputfile)
smooth = opts.smooth
# Real code
from icecube.photospline import spglam as glam
table = Table(args[0])
table.convert_to_level1()
# Photonics stores a bitmask that gives the kinds of normalizations
# that have been applied to the table cells in the 'efficiency' field.
# NB: We want dP, not dP/dt
if (Efficiency.DIFFERENTIAL & table.header['efficiency']):
raise ValueError("This appears to be a dP/dt table. Don't do that, okay?")
if (not Efficiency.N_PHOTON & table.header['efficiency']):
raise ValueError("This table does not appear to be normalized.")
nknots = [15, 6, 25] # rho, phi, z
if table.ndim > 3:
nknots.append(20) # [t]
if opts.rknots:
nknots[0] = opts.rknots
if opts.fknots:
nknots[1] = opts.fknots
if opts.zknots:
nknots[2] = opts.zknots
if opts.tknots and table.ndim > 3:
nknots[3] = opts.tknots
print("Core knots:", nknots)
radial_extent = 600
length_extent = 500
coreknots = [None]*4
# It's tempting to use some version of the bin centers as knot positions,
# but this should be avoided. Data points exactly at the knot locations are
# not fully supported, leading to genuine wierdness in the fit.
coreknots[0] = numpy.linspace(0, numpy.sqrt(radial_extent), nknots[0])**2
coreknots[0] = numpy.concatenate(([0], numpy.logspace(-1,
numpy.log10(radial_extent), nknots[0]-1)))
coreknots[1] = numpy.linspace(0, 180, nknots[1])
# space 1/3 of the knots quadratically behind the source,
# where everything is diffuse, and the remainder in front
# with logarithmic spacing
backerds = int(nknots[2]/3.0)
coreknots[2] = numpy.concatenate((
-(numpy.linspace(1, numpy.sqrt(length_extent), backerds)**2)[::-1],
numpy.logspace(0, numpy.log10(length_extent), nknots[2]-backerds)
))
# We're fitting the CDF in time, so we need tightly-spaced knots at
# early times to be able to represent the potentially steep slope.
# XXX: we assume t_max == 7000 ns
coreknots[3] = numpy.logspace(-1, numpy.log10(7000), nknots[3])
coreknots[3] = numpy.concatenate(([0], coreknots[3]))
# Now append the extra knots off both ends of the axis in order to provide
# full support at the boundaries
rknots = numpy.append(numpy.append([-1, -0.5, -0.1], coreknots[0]),
100*numpy.arange(1,3) + radial_extent)
endgap = [coreknots[1][1]-coreknots[1][0], coreknots[1][-1]-coreknots[1][-2]]
thetaknots = numpy.concatenate((coreknots[1][0] - endgap[0]*numpy.arange(2,0,-1),
coreknots[1], coreknots[1][-1] + endgap[1]*numpy.arange(1,3)))
# NB: we want -1 and 1 to be fully supported.
endgap = [coreknots[2][1]-coreknots[2][0], coreknots[2][-1]-coreknots[2][-2]]
zknots = numpy.concatenate((coreknots[2][0] - endgap[0]*numpy.arange(2,0,-1),
coreknots[2], coreknots[2][-1] + endgap[1]*numpy.arange(1,3)))
# NB: we can get away with partial support in time, since we know that
# F(0) is identically zero.
tknots = numpy.concatenate((coreknots[3], 7000 + 100*numpy.arange(1,4)))
print('knots:')
print(rknots)
print(thetaknots)
print(zknots)
print(tknots)
def spline_spec(ndim):
if ndim > 3:
order = [2,2,2,3] # Quadric splines for t to get smooth derivatives
penalties = {2:[smooth]*3 + [0], # penalize curvature in rho,z,phi
3:[0]*3 + [smooth]} # order 3 in time CDF => order 2 in time PDF
knots = [rknots, thetaknots, zknots, tknots]
else:
order = [2,2,2] # Quadric splines to get smooth derivatives
penalties = {2:[smooth]*3} # Penalize curvature
knots = [rknots, thetaknots, zknots]
return order, penalties, knots
# Take cumulative sum to get the CDF, and adjust fit points to be
# the right edges of the time bins, where the CDF is measured.
table.values = numpy.cumsum(table.values, axis=3)
table.bin_centers[3] += table.bin_widths[3]/2.
print("Loaded histogram with dimensions ", table.shape)
norm = table.values[:,:,:,-1]
# Rescale all axes to have a maximum value of ~ 10
axis_scale = []
knots = [rknots, thetaknots, zknots, tknots]
for i in range(0,len(table.bin_centers)):
scale = 2**numpy.floor(numpy.log(numpy.max(table.bin_centers[i])/10.) /
numpy.log(2))
axis_scale.append(scale)
table.bin_centers[i] /= scale
knots[i] /= scale
table.bin_widths[i] /= scale
if opts.abs:
z = numpy.log(norm)
# add some numerical stability sauce
w = 1000*numpy.ones(norm.shape)
# Zero (and remove from fit) table cells with non-finite values
# (e.g. photon count was zero, and we have log(0) at this point)
w[numpy.logical_not(numpy.isfinite(z))] = 0
z[numpy.logical_not(numpy.isfinite(z))] = 0
# XXX HACK: don't believe anything that happens outside the
# tracking volume of the table
#scalp(table, w, low=opts.ice_bottom, high=opts.ice_top)
# XXX HACK: don't believe anything in the first 3 radial bins
#w[:3,:,:] = 0
order, penalties, knots = spline_spec(3)
print('Number of knots used: ',[len(a) for a in knots])
print("Beginning spline fit for abs table...")
spline = glam.fit(z,w,table.bin_centers[:3],knots,order,smooth,penalties=penalties)
print("Saving table to %s..." % abs_outputfile)
spline.knots = [spline.knots[i] * axis_scale[i] for i
in range(0, len(spline.knots))]
splinefitstable.write(spline, abs_outputfile)
# clean up
del(w,z,order,penalties,knots,spline)
if opts.prob:
z = table.values / norm.reshape(norm.shape + (1,))
# Same sauce as above.
w = 1000*numpy.ones(table.weights.shape)
w[numpy.logical_not(numpy.isfinite(z))] = 0
z[numpy.logical_not(numpy.isfinite(z))] = 0
order, penalties, knots = spline_spec(4)
centers = table.bin_centers
# XXX HACK: don't believe anything that happens outside the
# tracking volume of the table
#scalp(table, w, low=opts.ice_bottom, high=opts.ice_top)
# XXX HACK: also, don't believe anything in the first 3 radial bins
#w[:3,:,:,:] = 0
# go ahead and remove the table from memory
del(table, norm)
print('Number of knots used: ',[len(a) for a in knots])
print("Beginning spline fit for timing table...")
spline = glam.fit(z,w,centers,knots,order,smooth,penalties=penalties,monodim=3)
print("Saving table to %s..." % prob_outputfile)
spline.knots = [spline.knots[i] * axis_scale[i] for i
in range(0, len(spline.knots))]
splinefitstable.write(spline, prob_outputfile)
# clean up
del(w,z,order,penalties,knots,spline)
# smoothed = glam.grideval(spline, table.bin_centers)
# resid = (smoothed - table.values)[table.weights != 0]
# fracresid = ((smoothed - table.values)/table.values)[table.weights != 0]
#
#
# print "Fit Statistics:"
# print "\tMaximum Deviation from Data:",numpy.max(numpy.abs(resid))
# print "\tRMS Deviation from Data:",numpy.sqrt(numpy.mean(resid**2))
# print "\tMax Fractional Deviation from Data:",numpy.max(numpy.abs(fracresid))
# print "\tMean Fractional Deviation from Data:",numpy.mean(numpy.abs(fracresid))
| 36.162162 | 87 | 0.690049 |
4a24d284d629284f315011b7f501fc41c9a4a9d9 | 3,268 | py | Python | applications/tensorflow/cnns/training/Models/model_base.py | xihuaiwen/chinese_bert | 631afbc76c40b0ac033be2186e717885246f446c | [
"MIT"
] | null | null | null | applications/tensorflow/cnns/training/Models/model_base.py | xihuaiwen/chinese_bert | 631afbc76c40b0ac033be2186e717885246f446c | [
"MIT"
] | null | null | null | applications/tensorflow/cnns/training/Models/model_base.py | xihuaiwen/chinese_bert | 631afbc76c40b0ac033be2186e717885246f446c | [
"MIT"
] | null | null | null | # Copyright 2020 Graphcore Ltd.
import tensorflow as tf
from functools import partial
def custom_dtype_getter(getter, name, dtype, trainable,
master_weight_filter_fn,
shape=None, *args, **kwargs):
master_dtype = master_weight_filter_fn(name)
if dtype != master_dtype and trainable:
var = getter(
name, shape, master_dtype, *args, trainable=trainable, **kwargs
)
return tf.cast(var, dtype=dtype, name=name + "_cast")
else:
return getter(name, shape, dtype, *args, trainable=trainable, **kwargs)
class ModelBase:
def __init__(self, opts, is_training=True):
dtypes = opts["precision"].split(".")
self.dtype = tf.float16 if dtypes[0] == "16" else tf.float32
self.master_weight_filter_fn = (
lambda name: tf.float32 if dtypes[1] == "32" else tf.float16
)
self.custom_dtype_getter = partial(
custom_dtype_getter,
master_weight_filter_fn=self.master_weight_filter_fn,
)
# Apply dataset specific changes
if opts["dataset"] == "imagenet":
self.num_classes = 1000
elif opts["dataset"] == "cifar-10":
self.num_classes = 10
elif opts["dataset"] == "cifar-100":
self.num_classes = 100
else:
raise ValueError("Unknown Dataset {}".format(opts["dataset"]))
def _build_function_list(self):
raise NotImplementedError
def build_whole_graph(self, x):
fn_list = self._build_function_list()
tf.add_to_collection("activations", x)
with tf.variable_scope("all", use_resource=True, custom_getter=self.custom_dtype_getter):
for fn in fn_list:
x = fn(x)
return x
def first_stage(self, x, first_split_name):
self.fn_list = self._build_function_list()
if first_split_name not in [f.keywords["name"] for f in self.fn_list]:
raise ValueError(
"Couldn't find pipeline split called " + first_split_name
)
tf.add_to_collection("activations", x)
with tf.variable_scope(
"all", use_resource=True, custom_getter=self.custom_dtype_getter
):
for fn in self.fn_list:
if fn.keywords["name"] == first_split_name:
break
x = fn(x)
return x
def later_stage(self, x, prev_split_name, end_split_name):
if end_split_name is not None and end_split_name not in [
fn.keywords["name"] for fn in self.fn_list
]:
raise ValueError(
"Couldn't find pipeline split called " + end_split_name
)
with tf.variable_scope(
"all", use_resource=True, custom_getter=self.custom_dtype_getter
):
first_stage = False
for f in self.fn_list:
if (not first_stage and f.keywords["name"] != prev_split_name):
continue
first_stage = True
if f.keywords["name"] == end_split_name:
break
x = f(x)
return x
def __call__(self, x):
return self.build_whole_graph(x)
| 35.139785 | 97 | 0.584149 |
4a24d28808d585ebc2b370747222c2624a73176c | 3,621 | py | Python | numpyro/distributions/__init__.py | quattro/numpyro | b7b6e937297ea47c55760446134f84fc82936a9d | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/__init__.py | quattro/numpyro | b7b6e937297ea47c55760446134f84fc82936a9d | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/__init__.py | quattro/numpyro | b7b6e937297ea47c55760446134f84fc82936a9d | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from numpyro.distributions.conjugate import (
BetaBinomial,
DirichletMultinomial,
GammaPoisson,
NegativeBinomial2,
NegativeBinomialLogits,
NegativeBinomialProbs,
ZeroInflatedNegativeBinomial2,
)
from numpyro.distributions.continuous import (
LKJ,
Beta,
BetaProportion,
Cauchy,
Chi2,
Dirichlet,
Exponential,
Gamma,
GaussianRandomWalk,
Gumbel,
HalfCauchy,
HalfNormal,
InverseGamma,
Laplace,
LKJCholesky,
Logistic,
LogNormal,
LowRankMultivariateNormal,
MultivariateNormal,
Normal,
Pareto,
SoftLaplace,
StudentT,
Uniform,
Weibull,
)
from numpyro.distributions.directional import (
ProjectedNormal,
SineBivariateVonMises,
VonMises,
)
from numpyro.distributions.discrete import (
Bernoulli,
BernoulliLogits,
BernoulliProbs,
Binomial,
BinomialLogits,
BinomialProbs,
Categorical,
CategoricalLogits,
CategoricalProbs,
Geometric,
GeometricLogits,
GeometricProbs,
Multinomial,
MultinomialLogits,
MultinomialProbs,
OrderedLogistic,
Poisson,
PRNGIdentity,
ZeroInflatedDistribution,
ZeroInflatedPoisson,
)
from numpyro.distributions.distribution import (
Delta,
Distribution,
ExpandedDistribution,
FoldedDistribution,
ImproperUniform,
Independent,
MaskedDistribution,
TransformedDistribution,
Unit,
)
from numpyro.distributions.kl import kl_divergence
from numpyro.distributions.mixtures import MixtureSameFamily
from numpyro.distributions.transforms import biject_to
from numpyro.distributions.truncated import (
LeftTruncatedDistribution,
RightTruncatedDistribution,
TruncatedCauchy,
TruncatedDistribution,
TruncatedNormal,
TruncatedPolyaGamma,
TwoSidedTruncatedDistribution,
)
from . import constraints, transforms
__all__ = [
"biject_to",
"constraints",
"kl_divergence",
"transforms",
"Bernoulli",
"BernoulliLogits",
"BernoulliProbs",
"Beta",
"BetaBinomial",
"BetaProportion",
"Binomial",
"BinomialLogits",
"BinomialProbs",
"Categorical",
"CategoricalLogits",
"CategoricalProbs",
"Cauchy",
"Chi2",
"Delta",
"Dirichlet",
"DirichletMultinomial",
"Distribution",
"Exponential",
"ExpandedDistribution",
"FoldedDistribution",
"Gamma",
"GammaPoisson",
"GaussianRandomWalk",
"Geometric",
"GeometricLogits",
"GeometricProbs",
"Gumbel",
"HalfCauchy",
"HalfNormal",
"ImproperUniform",
"Independent",
"InverseGamma",
"LKJ",
"LKJCholesky",
"Laplace",
"LeftTruncatedDistribution",
"Logistic",
"LogNormal",
"MaskedDistribution",
"MixtureSameFamily",
"Multinomial",
"MultinomialLogits",
"MultinomialProbs",
"MultivariateNormal",
"LowRankMultivariateNormal",
"Normal",
"NegativeBinomialProbs",
"NegativeBinomialLogits",
"NegativeBinomial2",
"OrderedLogistic",
"Pareto",
"Poisson",
"ProjectedNormal",
"PRNGIdentity",
"RightTruncatedDistribution",
"SineBivariateVonMises",
"SoftLaplace",
"StudentT",
"TransformedDistribution",
"TruncatedCauchy",
"TruncatedDistribution",
"TruncatedNormal",
"TruncatedPolyaGamma",
"TwoSidedTruncatedDistribution",
"Uniform",
"Unit",
"VonMises",
"Weibull",
"ZeroInflatedDistribution",
"ZeroInflatedPoisson",
"ZeroInflatedNegativeBinomial2",
]
| 21.175439 | 60 | 0.684894 |
4a24d29bbaf05e31a1e4ac4cc8eab395b67d68f3 | 3,113 | py | Python | Python/paste.py | Zarthus/Code-Snippets | ce2025cde910278e3cfdab4a84a2127910b7ca28 | [
"MIT"
] | null | null | null | Python/paste.py | Zarthus/Code-Snippets | ce2025cde910278e3cfdab4a84a2127910b7ca28 | [
"MIT"
] | 1 | 2015-02-01T09:35:23.000Z | 2015-02-01T10:20:22.000Z | Python/paste.py | Zarthus/Code-Snippets | ce2025cde910278e3cfdab4a84a2127910b7ca28 | [
"MIT"
] | 1 | 2019-11-26T11:54:02.000Z | 2019-11-26T11:54:02.000Z | """
paste.py by Zarthus,
Licensed under MIT
"""
import requests
import json
class Paste:
"""
paste.py: Serveral methods to store text online.
All methods in this class are 'static' and support the 'logger' parameter,
Whenever possible, passing this parameter ensures errors will be logged to console, so it is recommended you do.
"""
def gist(description, content, filename="ircgist.txt", public=False, logger=None):
"""
Post a gist to https://gist.github.com
description: string, Description for your gist.
content: string, content to paste.
filename: string, filename.ext - name of file and extension
public: boolean, should your gist be visible to public or not.
logger: Logger, an instance of the logger class.
returns link of your gist or False on failure.
If logger is passed an Error will be logged to console.
For more information, reference to https://developer.github.com/v3/gists/#create-a-gist
"""
url = "https://api.github.com/gists"
payload = {
"description": description,
"public": public,
"files": {
filename: {
"content": content
}
}
}
returnurl = ""
try:
r = requests.post(url, data=json.dumps(payload))
if r.ok and "html_url" in r.json:
returnurl = r.json["html_url"]
else:
r.raise_for_status()
except Exception as e:
if logger:
logger.error("Error creating gist '{}': {}".format(filename, str(e)))
if returnurl:
return returnurl
return False
def gist_multifile(description, files, public=False, logger=None):
"""
Upload multiple gists https://gist.github.com
description: string, Description for your gist.
content: string, content to paste.
files: dict, following format: {"filename.ext": {"content": "the contents of your file"}}
public: boolean, should your gist be visible to public or not.
logger: Logger, an instance of the logger class.
returns link of your gist or False on failure.
If logger is passed an Error will be logged to console.
For more information, reference to https://developer.github.com/v3/gists/#create-a-gist
"""
url = "https://api.github.com/gists"
payload = {
"description": description,
"public": public,
"files": {
files
}
}
returnurl = ""
try:
r = requests.post(url, data=json.dumps(payload))
if r.ok and "html_url" in r.json:
returnurl = r.json["html_url"]
else:
r.raise_for_status()
except Exception as e:
if logger:
logger.error("Error creating gist multifile: {}".format(str(e)))
if returnurl:
return returnurl
return False
| 30.223301 | 116 | 0.569547 |
4a24d349776501706f895d41baadf1b5105ef618 | 723 | py | Python | 0x0F-python-object_relational_mapping/4-cities_by_state.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | 0x0F-python-object_relational_mapping/4-cities_by_state.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | 0x0F-python-object_relational_mapping/4-cities_by_state.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''Prints all cities and their state in a database.
'''
import sys
import MySQLdb
if __name__ == '__main__':
if len(sys.argv) >= 4:
db_connection = MySQLdb.connect(
host='localhost',
port=3306,
user=sys.argv[1],
passwd=sys.argv[2],
db=sys.argv[3]
)
cursor = db_connection.cursor()
cursor.execute(
'SELECT cities.id, cities.name, states.name FROM cities' +
' INNER JOIN states ON cities.state_id = states.id' +
' ORDER BY cities.id ASC;'
)
results = cursor.fetchall()
for result in results:
print(result)
db_connection.close()
| 26.777778 | 70 | 0.547718 |
4a24d434912e3b460808b2959981543b324bb75f | 596 | py | Python | software/server/V2.0/main.py | NKUSTMCU/MCU | 857135cd83fa55662ba06b44eafe6c7507e4eec5 | [
"MIT"
] | null | null | null | software/server/V2.0/main.py | NKUSTMCU/MCU | 857135cd83fa55662ba06b44eafe6c7507e4eec5 | [
"MIT"
] | null | null | null | software/server/V2.0/main.py | NKUSTMCU/MCU | 857135cd83fa55662ba06b44eafe6c7507e4eec5 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, Response
from camera import VideoCamera
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True) | 27.090909 | 74 | 0.610738 |
4a24d456596cebde0564f30a8ecc01c62baf6dac | 1,639 | py | Python | src/predict_job_class.py | samshad/Predict_Job_Type | 0857dbeca5a029f751a0fe8860e4f983333cbcf0 | [
"MIT"
] | null | null | null | src/predict_job_class.py | samshad/Predict_Job_Type | 0857dbeca5a029f751a0fe8860e4f983333cbcf0 | [
"MIT"
] | null | null | null | src/predict_job_class.py | samshad/Predict_Job_Type | 0857dbeca5a029f751a0fe8860e4f983333cbcf0 | [
"MIT"
] | null | null | null | import PyPDF2
import text_cleaner as tc
import pickle
import pandas as pd
def extract_text_from_pdf(file):
f_reader = PyPDF2.PdfFileReader(open(file, 'rb'))
page_count = f_reader.getNumPages()
text = [f_reader.getPage(i).extractText() for i in range(page_count)]
return str(text).replace("\\n", "")
def get_job_class(doc):
with open('Model/vectorizer.pkl', 'rb') as pickle_file:
word_vectorizer = pickle.load(pickle_file)
with open('Model/knn.pkl', 'rb') as pickle_file:
knn = pickle.load(pickle_file)
with open('Model/label_encoder.pkl', 'rb') as pickle_file:
le = pickle.load(pickle_file)
doc = tc.cleaner(doc)
pred_txt = word_vectorizer.transform([doc])
prediction = knn.predict(pred_txt)
#print(prediction)
return le.inverse_transform(prediction)
"""resume_txt = tc.cleaner(extract_text_from_pdf('Data/Resumes/Md Samshad Rahman.pdf'))
pred_txt = word_vectorizer.transform([resume_txt])
print(pred_txt.shape)
prediction = knn.predict(pred_txt)
print(prediction)
print(le.inverse_transform(prediction))
df = pd.read_csv('Data/Resumes/Archive/ResumeDataSet_1.csv')
print(df['category'].value_counts())
# tf = df[df['category'] == 'HR']
# tf = df[df['category'] == 'Java Developer']
tf = df[df['category'] == 'Mechanical Engineer']
# tf = df[df['category'] == 'Business Analyst']
print(tf['category'].value_counts())
for index, row in tf.iterrows():
resume_txt = tc.cleaner(row['resume'])
pred_txt = word_vectorizer.transform([resume_txt])
prediction = knn.predict(pred_txt)
print(prediction)
print(le.inverse_transform(prediction))
"""
| 31.519231 | 87 | 0.707749 |
4a24d459c9127836324f0a9607061d94d5fd9e37 | 19,170 | py | Python | dvc/output/base.py | asford/dvc | 4ed55d00511ea3d9115b76c463e1a466408b11ef | [
"Apache-2.0"
] | null | null | null | dvc/output/base.py | asford/dvc | 4ed55d00511ea3d9115b76c463e1a466408b11ef | [
"Apache-2.0"
] | 81 | 2021-04-13T08:02:09.000Z | 2022-03-30T16:10:17.000Z | dvc/output/base.py | asford/dvc | 4ed55d00511ea3d9115b76c463e1a466408b11ef | [
"Apache-2.0"
] | 2 | 2021-06-14T19:12:25.000Z | 2021-06-14T19:12:29.000Z | import logging
import os
from copy import copy
from typing import Type
from urllib.parse import urlparse
from voluptuous import Any
import dvc.objects as objects
import dvc.prompt as prompt
from dvc.checkout import checkout
from dvc.exceptions import (
CheckoutError,
CollectCacheError,
DvcException,
MergeError,
RemoteCacheRequiredError,
)
from dvc.hash_info import HashInfo
from dvc.objects.db import NamedCache
from dvc.objects.errors import ObjectFormatError
from dvc.objects.stage import stage as ostage
from ..fs.base import BaseFileSystem
logger = logging.getLogger(__name__)
class OutputDoesNotExistError(DvcException):
def __init__(self, path):
msg = f"output '{path}' does not exist"
super().__init__(msg)
class OutputIsNotFileOrDirError(DvcException):
def __init__(self, path):
msg = f"output '{path}' is not a file or directory"
super().__init__(msg)
class OutputAlreadyTrackedError(DvcException):
def __init__(self, path):
msg = f""" output '{path}' is already tracked by SCM (e.g. Git).
You can remove it from Git, then add to DVC.
To stop tracking from Git:
git rm -r --cached '{path}'
git commit -m "stop tracking {path}" """
super().__init__(msg)
class OutputIsStageFileError(DvcException):
def __init__(self, path):
super().__init__(f"DVC file '{path}' cannot be an output.")
class OutputIsIgnoredError(DvcException):
def __init__(self, match):
lines = "\n".join(match.patterns)
super().__init__(f"Path '{match.file}' is ignored by\n{lines}")
class BaseOutput:
IS_DEPENDENCY = False
FS_CLS = BaseFileSystem
PARAM_PATH = "path"
PARAM_CACHE = "cache"
PARAM_CHECKPOINT = "checkpoint"
PARAM_METRIC = "metric"
PARAM_METRIC_TYPE = "type"
PARAM_METRIC_XPATH = "xpath"
PARAM_PLOT = "plot"
PARAM_PLOT_TEMPLATE = "template"
PARAM_PLOT_X = "x"
PARAM_PLOT_Y = "y"
PARAM_PLOT_X_LABEL = "x_label"
PARAM_PLOT_Y_LABEL = "y_label"
PARAM_PLOT_TITLE = "title"
PARAM_PLOT_HEADER = "header"
PARAM_PERSIST = "persist"
PARAM_DESC = "desc"
PARAM_ISEXEC = "isexec"
PARAM_LIVE = "live"
PARAM_LIVE_SUMMARY = "summary"
PARAM_LIVE_HTML = "html"
METRIC_SCHEMA = Any(
None,
bool,
{
PARAM_METRIC_TYPE: Any(str, None),
PARAM_METRIC_XPATH: Any(str, None),
},
)
DoesNotExistError = OutputDoesNotExistError # type: Type[DvcException]
IsNotFileOrDirError = OutputIsNotFileOrDirError # type: Type[DvcException]
IsStageFileError = OutputIsStageFileError # type: Type[DvcException]
IsIgnoredError = OutputIsIgnoredError # type: Type[DvcException]
sep = "/"
def __init__(
self,
stage,
path,
info=None,
fs=None,
cache=True,
metric=False,
plot=False,
persist=False,
checkpoint=False,
live=False,
desc=None,
isexec=False,
):
self._validate_output_path(path, stage)
# This output (and dependency) objects have too many paths/urls
# here is a list and comments:
#
# .def_path - path from definition in DVC file
# .path_info - PathInfo/URLInfo structured resolved path
# .fspath - local only, resolved
# .__str__ - for presentation purposes, def_path/relpath
#
# By resolved path, which contains actual location,
# should be absolute and don't contain remote:// refs.
self.stage = stage
self.repo = stage.repo if stage else None
self.def_path = path
self.hash_info = HashInfo.from_dict(info)
if fs:
self.fs = fs
else:
self.fs = self.FS_CLS(self.repo, {})
self.use_cache = False if self.IS_DEPENDENCY else cache
self.metric = False if self.IS_DEPENDENCY else metric
self.plot = False if self.IS_DEPENDENCY else plot
self.persist = persist
self.checkpoint = checkpoint
self.live = live
self.desc = desc
self.path_info = self._parse_path(fs, path)
if self.use_cache and self.odb is None:
raise RemoteCacheRequiredError(self.path_info)
self.obj = None
self.isexec = False if self.IS_DEPENDENCY else isexec
def _parse_path(self, fs, path):
if fs:
parsed = urlparse(path)
return fs.path_info / parsed.path.lstrip("/")
return self.FS_CLS.PATH_CLS(path)
def __repr__(self):
return "{class_name}: '{def_path}'".format(
class_name=type(self).__name__, def_path=self.def_path
)
def __str__(self):
return self.def_path
@property
def scheme(self):
return self.FS_CLS.scheme
@property
def is_in_repo(self):
return False
@property
def use_scm_ignore(self):
if not self.is_in_repo:
return False
return self.use_cache or self.stage.is_repo_import
@property
def odb(self):
return getattr(self.repo.odb, self.scheme)
@property
def cache_path(self):
return self.odb.hash_to_path_info(self.hash_info.value).url
def get_hash(self):
if not self.use_cache:
return ostage(
self.repo.odb.local,
self.path_info,
self.fs,
self.fs.PARAM_CHECKSUM,
).hash_info
return ostage(
self.odb, self.path_info, self.fs, self.odb.fs.PARAM_CHECKSUM
).hash_info
@property
def is_dir_checksum(self):
return self.hash_info.isdir
@property
def exists(self):
return self.fs.exists(self.path_info)
def changed_checksum(self):
return self.hash_info != self.get_hash()
def changed_cache(self, filter_info=None):
if not self.use_cache or not self.hash_info:
return True
obj = self.get_obj(filter_info=filter_info)
if not obj:
return True
try:
objects.check(self.odb, obj)
return False
except (FileNotFoundError, ObjectFormatError):
return True
def workspace_status(self):
if not self.exists:
return {str(self): "deleted"}
if self.changed_checksum():
return {str(self): "modified"}
if not self.hash_info:
return {str(self): "new"}
return {}
def status(self):
if self.hash_info and self.use_cache and self.changed_cache():
return {str(self): "not in cache"}
return self.workspace_status()
def changed(self):
status = self.status()
logger.debug(str(status))
return bool(status)
@property
def is_empty(self):
return self.fs.is_empty(self.path_info)
def isdir(self):
return self.fs.isdir(self.path_info)
def isfile(self):
return self.fs.isfile(self.path_info)
# pylint: disable=no-member
def ignore(self):
if not self.use_scm_ignore:
return
if self.repo.scm.is_tracked(self.fspath):
raise OutputAlreadyTrackedError(self)
self.repo.scm.ignore(self.fspath)
def ignore_remove(self):
if not self.use_scm_ignore:
return
self.repo.scm.ignore_remove(self.fspath)
# pylint: enable=no-member
def save(self):
if not self.exists:
raise self.DoesNotExistError(self)
if not self.isfile and not self.isdir:
raise self.IsNotFileOrDirError(self)
if self.is_empty:
logger.warning(f"'{self}' is empty.")
self.ignore()
if self.metric or self.plot:
self.verify_metric()
if not self.use_cache:
self.hash_info = self.get_hash()
if not self.IS_DEPENDENCY:
logger.debug(
"Output '%s' doesn't use cache. Skipping saving.", self
)
return
assert not self.IS_DEPENDENCY
if not self.changed():
logger.debug("Output '%s' didn't change. Skipping saving.", self)
return
self.obj = ostage(
self.odb, self.path_info, self.fs, self.odb.fs.PARAM_CHECKSUM
)
self.hash_info = self.obj.hash_info
self.isexec = self.isfile() and self.fs.isexec(self.path_info)
def set_exec(self):
if self.isfile() and self.isexec:
self.odb.set_exec(self.path_info)
def commit(self, filter_info=None):
if not self.exists:
raise self.DoesNotExistError(self)
assert self.hash_info
if self.use_cache:
obj = ostage(
self.odb,
filter_info or self.path_info,
self.fs,
self.odb.fs.PARAM_CHECKSUM,
)
objects.save(self.odb, obj)
checkout(
filter_info or self.path_info,
self.fs,
obj,
self.odb,
relink=True,
)
self.set_exec()
def dumpd(self):
ret = copy(self.hash_info.to_dict())
ret[self.PARAM_PATH] = self.def_path
if self.IS_DEPENDENCY:
return ret
if self.desc:
ret[self.PARAM_DESC] = self.desc
if not self.use_cache:
ret[self.PARAM_CACHE] = self.use_cache
if isinstance(self.metric, dict):
if (
self.PARAM_METRIC_XPATH in self.metric
and not self.metric[self.PARAM_METRIC_XPATH]
):
del self.metric[self.PARAM_METRIC_XPATH]
if self.metric:
ret[self.PARAM_METRIC] = self.metric
if self.plot:
ret[self.PARAM_PLOT] = self.plot
if self.persist:
ret[self.PARAM_PERSIST] = self.persist
if self.checkpoint:
ret[self.PARAM_CHECKPOINT] = self.checkpoint
if self.isexec:
ret[self.PARAM_ISEXEC] = self.isexec
if self.live:
ret[self.PARAM_LIVE] = self.live
return ret
def verify_metric(self):
raise DvcException(f"verify metric is not supported for {self.scheme}")
def download(self, to, jobs=None):
self.fs.download(self.path_info, to.path_info, jobs=jobs)
def get_obj(self, filter_info=None):
if self.obj:
obj = self.obj
elif self.hash_info:
try:
obj = objects.load(self.odb, self.hash_info)
except FileNotFoundError:
return None
else:
return None
if filter_info and filter_info != self.path_info:
prefix = filter_info.relative_to(self.path_info).parts
obj = obj.filter(self.odb, prefix)
return obj
def checkout(
self,
force=False,
progress_callback=None,
relink=False,
filter_info=None,
allow_missing=False,
checkpoint_reset=False,
**kwargs,
):
if not self.use_cache:
if progress_callback:
progress_callback(
str(self.path_info), self.get_files_number(filter_info)
)
return None
obj = self.get_obj(filter_info=filter_info)
if not obj and (filter_info and filter_info != self.path_info):
# backward compatibility
return None
if self.checkpoint and checkpoint_reset:
if self.exists:
self.remove()
return None
added = not self.exists
try:
modified = checkout(
filter_info or self.path_info,
self.fs,
obj,
self.odb,
force=force,
progress_callback=progress_callback,
relink=relink,
**kwargs,
)
except CheckoutError:
if allow_missing or self.checkpoint:
return None
raise
self.set_exec()
return added, False if added else modified
def remove(self, ignore_remove=False):
self.fs.remove(self.path_info)
if self.scheme != "local":
return
if ignore_remove:
self.ignore_remove()
def move(self, out):
# pylint: disable=no-member
if self.scheme == "local" and self.use_scm_ignore:
self.repo.scm.ignore_remove(self.fspath)
self.fs.move(self.path_info, out.path_info)
self.def_path = out.def_path
self.path_info = out.path_info
self.save()
self.commit()
if self.scheme == "local" and self.use_scm_ignore:
self.repo.scm.ignore(self.fspath)
def get_files_number(self, filter_info=None):
if not self.use_cache or not self.hash_info:
return 0
if not self.hash_info.isdir:
return 1
if not filter_info or filter_info == self.path_info:
return self.hash_info.nfiles or 0
obj = self.get_obj(filter_info=filter_info)
return len(obj) if obj else 0
def unprotect(self):
if self.exists:
self.odb.unprotect(self.path_info)
def get_dir_cache(self, **kwargs):
if not self.is_dir_checksum:
raise DvcException("cannot get dir cache for file checksum")
try:
objects.check(self.odb, self.odb.get(self.hash_info))
except (FileNotFoundError, ObjectFormatError):
self.repo.cloud.pull(
NamedCache.make("local", self.hash_info.value, str(self)),
show_checksums=False,
**kwargs,
)
try:
self.obj = objects.load(self.odb, self.hash_info)
except (FileNotFoundError, ObjectFormatError):
self.obj = None
return self.obj
def collect_used_dir_cache(
self, remote=None, force=False, jobs=None, filter_info=None
):
"""Get a list of `info`s related to the given directory.
- Pull the directory entry from the remote cache if it was changed.
Example:
Given the following commands:
$ echo "foo" > directory/foo
$ echo "bar" > directory/bar
$ dvc add directory
It will return a NamedCache like:
nc = NamedCache()
nc.add(self.scheme, 'c157a79031e1', 'directory/foo')
nc.add(self.scheme, 'd3b07384d113', 'directory/bar')
"""
cache = NamedCache()
try:
self.get_dir_cache(jobs=jobs, remote=remote)
except DvcException:
logger.debug(f"failed to pull cache for '{self}'")
try:
objects.check(self.odb, self.odb.get(self.hash_info))
except (FileNotFoundError, ObjectFormatError):
msg = (
"Missing cache for directory '{}'. "
"Cache for files inside will be lost. "
"Would you like to continue? Use '-f' to force."
)
if not force and not prompt.confirm(msg.format(self.path_info)):
raise CollectCacheError(
"unable to fully collect used cache"
" without cache for directory '{}'".format(self)
)
return cache
path = str(self.path_info)
filter_path = str(filter_info) if filter_info else None
for entry_key, entry_obj in self.obj:
entry_path = os.path.join(path, *entry_key)
if (
not filter_path
or entry_path == filter_path
or entry_path.startswith(filter_path + os.sep)
):
cache.add(self.scheme, entry_obj.hash_info.value, entry_path)
return cache
def get_used_cache(self, **kwargs):
"""Get a dumpd of the given `out`, with an entry including the branch.
The `used_cache` of an output is no more than its `info`.
In case that the given output is a directory, it will also
include the `info` of its files.
"""
if not self.use_cache:
return NamedCache()
if self.stage.is_repo_import:
cache = NamedCache()
(dep,) = self.stage.deps
cache.external[dep.repo_pair].add(dep.def_path)
return cache
if not self.hash_info:
msg = (
"Output '{}'({}) is missing version info. "
"Cache for it will not be collected. "
"Use `dvc repro` to get your pipeline up to date.".format(
self, self.stage
)
)
if self.exists:
msg += (
"\n"
"You can also use `dvc commit {stage.addressing}` "
"to associate existing '{out}' with {stage}.".format(
out=self, stage=self.stage
)
)
logger.warning(msg)
return NamedCache()
ret = NamedCache.make(self.scheme, self.hash_info.value, str(self))
if not self.is_dir_checksum:
return ret
ret.add_child_cache(
self.hash_info.value, self.collect_used_dir_cache(**kwargs),
)
return ret
@classmethod
def _validate_output_path(cls, path, stage=None):
from dvc.dvcfile import is_valid_filename
if is_valid_filename(path):
raise cls.IsStageFileError(path)
if stage:
abs_path = os.path.join(stage.wdir, path)
if stage.repo.fs.dvcignore.is_ignored(abs_path):
check = stage.repo.fs.dvcignore.check_ignore(abs_path)
raise cls.IsIgnoredError(check)
def _check_can_merge(self, out):
if self.scheme != out.scheme:
raise MergeError("unable to auto-merge outputs of different types")
my = self.dumpd()
other = out.dumpd()
ignored = [
self.fs.PARAM_CHECKSUM,
HashInfo.PARAM_SIZE,
HashInfo.PARAM_NFILES,
]
for opt in ignored:
my.pop(opt, None)
other.pop(opt, None)
if my != other:
raise MergeError(
"unable to auto-merge outputs with different options"
)
if not out.is_dir_checksum:
raise MergeError(
"unable to auto-merge outputs that are not directories"
)
def merge(self, ancestor, other):
from dvc.objects.tree import merge
assert other
if ancestor:
self._check_can_merge(ancestor)
ancestor_info = ancestor.hash_info
else:
ancestor_info = None
self._check_can_merge(self)
self._check_can_merge(other)
self.hash_info = merge(
self.odb, ancestor_info, self.hash_info, other.hash_info
)
| 28.484398 | 79 | 0.57663 |
4a24d55254f47deb5cddb4570c50e2d744308e4f | 395 | py | Python | qctrl_api/qctrl_api/wsgi.py | bibek-Neupane/back-end-challenge | d5a7b33adaa59e5ad566ac7435132c990f80a740 | [
"Apache-2.0"
] | null | null | null | qctrl_api/qctrl_api/wsgi.py | bibek-Neupane/back-end-challenge | d5a7b33adaa59e5ad566ac7435132c990f80a740 | [
"Apache-2.0"
] | null | null | null | qctrl_api/qctrl_api/wsgi.py | bibek-Neupane/back-end-challenge | d5a7b33adaa59e5ad566ac7435132c990f80a740 | [
"Apache-2.0"
] | null | null | null | """
WSGI config for qctrl_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'qctrl_api.settings')
application = get_wsgi_application()
| 23.235294 | 78 | 0.787342 |
4a24d565d5685bb8cdf3477da4384c8d2c0ebaeb | 1,879 | py | Python | bintree.py | zhoujungis/Data-Structures-and-Algorithms | b96ff2eeca6dee8e6cfa6dd53f78fb84a2af7acf | [
"Apache-2.0"
] | null | null | null | bintree.py | zhoujungis/Data-Structures-and-Algorithms | b96ff2eeca6dee8e6cfa6dd53f78fb84a2af7acf | [
"Apache-2.0"
] | null | null | null | bintree.py | zhoujungis/Data-Structures-and-Algorithms | b96ff2eeca6dee8e6cfa6dd53f78fb84a2af7acf | [
"Apache-2.0"
] | null | null | null | from prioqueue import PrioQueue
class BinTNode:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
class BinTree:
def __init__(self):
self._root = None
def is_empty(self):
return self._root is None
def root(self):
return self._root
def leftchild(self):
return self._root.left
def rightchild(self):
return self._root.right
def set_root(self, rootnode):
self._root = rootnode
def set_left(self, leftchild):
self._root.left = leftchild
def set_right(self, rightchild):
self._root.right = rightchild
def preorder(self):
t, s = self._root, SStack()
while t or not s.is_empty():
while t:
s.push(t.right)
yield t.data
t = t.left
t = s.pop()
def postorder(self):
t, s = self._root, SStack()
while t or not s.is_empty():
s.push(t)
t = t.left if t.left else t.right
t = s.pop()
yield t.data
if not s.is_empty() and s.top().left == t:
t = s.top().right
else:
t = None
# 哈夫曼树
class HTNode(BinTNode):
def __lt__(self, othernode):
if not isinstance(othernode, HTNode):
raise ValueError
return self.data < othernode.data
class HuffmanPrioQ(PrioQueue):
def number(self):
return len(self._elems)
def HuffmanTree(weights):
trees = HuffmanPrioQ()
for w in weights:
trees.enqueue(HTNode(w))
while trees.number() > 1:
t1 = trees.dequeue()
t2 = trees.dequeue()
x = t1.data + t2.data
trees.enqueue(HTNode(x, t1, t2))
return trees.dequeue
| 24.723684 | 53 | 0.53273 |
4a24d58f7170b34c9d582f76324a87311cfb1b87 | 2,769 | py | Python | model.py | dsmoore96/wineorigin | 7c628a4811108fc651347ca04674f76345935884 | [
"Apache-2.0"
] | 1 | 2018-11-29T05:21:17.000Z | 2018-11-29T05:21:17.000Z | model.py | dsmoore96/wineorigin | 7c628a4811108fc651347ca04674f76345935884 | [
"Apache-2.0"
] | 1 | 2018-12-06T08:02:04.000Z | 2018-12-06T08:02:04.000Z | model.py | dsmoore96/wineorigin | 7c628a4811108fc651347ca04674f76345935884 | [
"Apache-2.0"
] | 1 | 2019-02-19T00:59:41.000Z | 2019-02-19T00:59:41.000Z | import gensim
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class CNN_Text(nn.Module):
def __init__(self, args):
super(CNN_Text, self).__init__()
self.args = args
V = args.embed_num
D = args.embed_dim
C = args.class_num
Ci = 1
Co = args.kernel_num
Ks = args.kernel_sizes
# Build word embeddings matrix
#Should you only use most freq words or all of them
use_subset = False
model = gensim.models.Word2Vec.load('wine2vec.model')
if use_subset:
model = gensim.models.Word2Vec.load('wine2vec_subset.model')
matrix_len = len(args.vocab)
weights_matrix = np.zeros((matrix_len, D))
words_found = 0
not_found = 0
for i, word in enumerate(args.vocab.itos):
try:
weights_matrix[i] = model.wv[word]
words_found += 1
except KeyError:
weights_matrix[i] = np.zeros(D)
print(word)
not_found += 1
print("Words found" + str(words_found))
print("Not Found" + str(not_found))
weights = torch.FloatTensor(weights_matrix)
#self.embed = nn.Embedding(V, D)
self.embed = nn.Embedding.from_pretrained(weights)
self.embed.weight.requires_grad = False
# self.convs1 = [nn.Conv2d(Ci, Co, (K, D)) for K in Ks]
self.convs1 = nn.ModuleList([nn.Conv2d(Ci, Co, (K, D)) for K in Ks])
'''
self.conv13 = nn.Conv2d(Ci, Co, (3, D))
self.conv14 = nn.Conv2d(Ci, Co, (4, D))
self.conv15 = nn.Conv2d(Ci, Co, (5, D))
'''
self.dropout = nn.Dropout(args.dropout)
self.fc1 = nn.Linear(len(Ks)*Co, C)
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3) # (N, Co, W)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
def forward(self, x):
x = self.embed(x) # (N, W, D)
if self.args.static:
x = Variable(x)
x = x.unsqueeze(1) # (N, Ci, W, D)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] # [(N, Co, W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks)
x = torch.cat(x, 1)
'''
x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
'''
x = self.dropout(x) # (N, len(Ks)*Co)
logit = self.fc1(x) # (N, C)
return logit
| 28.84375 | 93 | 0.530878 |
4a24d5e8e4864c0b5df5825fdf7dd75b9ea4e5d1 | 166 | py | Python | notebooks/funnel/config.py | matt-long/aerobic-safety-margins | 2f58775d8e67ea105a217ce89d09e239d208e001 | [
"MIT"
] | null | null | null | notebooks/funnel/config.py | matt-long/aerobic-safety-margins | 2f58775d8e67ea105a217ce89d09e239d208e001 | [
"MIT"
] | null | null | null | notebooks/funnel/config.py | matt-long/aerobic-safety-margins | 2f58775d8e67ea105a217ce89d09e239d208e001 | [
"MIT"
] | null | null | null | cache_catalog_dir = 'data/funnel-catalog'
cache_catalog_prefix = 'funnel-catalog-entry'
cache_format = 'zarr'
# TODO: provide some defaults and control over defaults
| 33.2 | 55 | 0.801205 |
4a24d5f6ffb0567fa893d2af018c946e695cbb7b | 5,395 | py | Python | src/datadog_api_client/v2/model/role_create_request.py | DataDog/datadog-api-client-python | de2fc57dbde9acf4b8c8eef94ac29911227a62a2 | [
"Apache-2.0"
] | 32 | 2021-01-07T15:09:56.000Z | 2022-01-30T05:49:23.000Z | src/datadog_api_client/v2/model/role_create_request.py | DataDog/datadog-api-client-python | de2fc57dbde9acf4b8c8eef94ac29911227a62a2 | [
"Apache-2.0"
] | 228 | 2020-09-03T14:03:54.000Z | 2022-03-31T20:16:12.000Z | src/datadog_api_client/v2/model/role_create_request.py | DataDog/datadog-api-client-python | de2fc57dbde9acf4b8c8eef94ac29911227a62a2 | [
"Apache-2.0"
] | 12 | 2020-09-15T21:36:03.000Z | 2022-03-31T17:13:17.000Z | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v2.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
def lazy_import():
from datadog_api_client.v2.model.role_create_data import RoleCreateData
globals()["RoleCreateData"] = RoleCreateData
class RoleCreateRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"data": (RoleCreateData,), # noqa: E501
}
discriminator = None
attribute_map = {
"data": "data", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@convert_js_args_to_python_args
def __init__(self, data, *args, **kwargs): # noqa: E501
"""RoleCreateRequest - a model defined in OpenAPI
Args:
data (RoleCreateData):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
super().__init__(kwargs)
self._check_pos_args(args)
self.data = data
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, data, *args, **kwargs): # noqa: E501
"""Helper creating a new instance from a response."""
self = super(RoleCreateRequest, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
self.data = data
return self
| 38.535714 | 108 | 0.592586 |
4a24d80d1a515b764fcf4f14121e595d3d5924f7 | 33,117 | py | Python | reactive/etcd.py | exceptorr/layer-etcd | 53d38096a6de8d4bcc18a2cb64a94d904c496660 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | reactive/etcd.py | exceptorr/layer-etcd | 53d38096a6de8d4bcc18a2cb64a94d904c496660 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | reactive/etcd.py | exceptorr/layer-etcd | 53d38096a6de8d4bcc18a2cb64a94d904c496660 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
from charms import layer
from charms.layer import snap
from charms.reactive import endpoint_from_flag
from charms.reactive import when
from charms.reactive import when_any
from charms.reactive import when_not
from charms.reactive import is_state
from charms.reactive import set_state
from charms.reactive import is_flag_set
from charms.reactive import clear_flag
from charms.reactive import remove_state
from charms.reactive import hook
from charms.reactive.helpers import data_changed
from charms.templating.jinja2 import render
from charmhelpers.core.hookenv import log
from charmhelpers.core.hookenv import leader_set
from charmhelpers.core.hookenv import leader_get
from charmhelpers.core.hookenv import storage_get
from charmhelpers.core.hookenv import application_version_set
from charmhelpers.core.hookenv import open_port
from charmhelpers.core.hookenv import close_port
from charmhelpers.core.host import write_file
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.contrib.charmsupport import nrpe
from charms.layer import status
from etcdctl import EtcdCtl
from etcdctl import get_connection_string
from etcd_databag import EtcdDatabag
from etcd_lib import get_ingress_address, get_ingress_addresses
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from shutil import copyfile
import os
import charms.leadership # noqa
import socket
import time
import traceback
import yaml
import shutil
import random
# Layer Note: the @when_not etcd.installed state checks are relating to
# a boundry that was superimposed by the etcd-24 release which added support
# for snaps. Snapped etcd is now the only supported mechanism by this charm.
# References to this state will be wiped sometime within the next 10 releases
# of the charm.
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = r'[\.A-Za-z0-9-_]+$'
def get_target_etcd_channel():
"""
Check whether or not etcd is already installed. i.e. we're
going through an upgrade. If so, leave the etcd version alone,
if we're a new install, we can set the default channel here.
If the user has specified a version, then just return that.
:return: String snap channel
"""
channel = hookenv.config('channel')
if channel == 'auto':
if snap.is_installed('etcd'):
return False
else:
return '3.4/stable'
else:
return channel
@when('etcd.installed')
def snap_upgrade_notice():
status.blocked('Manual migration required. http://bit.ly/2oznAUZ')
@when_any('etcd.registered', 'etcd.leader.configured')
@when_not('etcd.installed')
@when_not('upgrade.series.in-progress')
def check_cluster_health():
''' report on the cluster health every 5 minutes'''
etcdctl = EtcdCtl()
health = etcdctl.cluster_health()
# Determine if the unit is healthy or unhealthy
if 'unhealthy' in health['status']:
unit_health = "UnHealthy"
else:
unit_health = "Healthy"
# Determine units peer count, and surface 0 by default
try:
peers = len(etcdctl.member_list())
except Exception:
unit_health = "Errored"
peers = 0
bp = "{0} with {1} known peer{2}"
status_message = bp.format(unit_health, peers, 's' if peers != 1 else '')
status.active(status_message)
@when('snap.installed.etcd')
@when_not('etcd.installed')
def set_app_version():
''' Surface the etcd application version on juju status '''
# note - the snap doesn't place an etcd alias on disk. This shall infer
# the version from etcdctl, as the snap distributes both in lockstep.
application_version_set(etcd_version())
@when_not('certificates.available')
def missing_relation_notice():
status.blocked('Missing relation to certificate authority.')
@when('certificates.available')
def prepare_tls_certificates(tls):
common_name = hookenv.unit_public_ip()
sans = set()
sans.add(hookenv.unit_public_ip())
sans.update(get_ingress_addresses('db'))
sans.update(get_ingress_addresses('cluster'))
sans.add(socket.gethostname())
# add cluster peers as alt names when present
cluster = endpoint_from_flag('cluster.joined')
if cluster:
for ip in cluster.get_db_ingress_addresses():
sans.add(ip)
sans = sorted(sans)
certificate_name = hookenv.local_unit().replace('/', '_')
tls.request_server_cert(common_name, sans, certificate_name)
@hook('upgrade-charm')
def remove_states():
# stale state cleanup (pre rev6)
remove_state('etcd.tls.secured')
remove_state('etcd.ssl.placed')
remove_state('etcd.ssl.exported')
remove_state('etcd.nrpe.configured')
# force a config re-render in case template changed
set_state('etcd.rerender-config')
@hook('pre-series-upgrade')
def pre_series_upgrade():
bag = EtcdDatabag()
host.service_pause(bag.etcd_daemon)
status.blocked('Series upgrade in progress')
@hook('post-series-upgrade')
def post_series_upgrade():
bag = EtcdDatabag()
host.service_resume(bag.etcd_daemon)
@when('snap.installed.etcd')
@when('leadership.is_leader')
@when_any('config.changed.port', 'config.changed.management_port')
@when_not('etcd.installed')
@when_not('upgrade.series.in-progress')
def leader_config_changed():
''' The leader executes the runtime configuration update for the cluster,
as it is the controlling unit. Will render config, close and open ports and
restart the etcd service.'''
configuration = hookenv.config()
previous_port = configuration.previous('port')
log('Previous port: {0}'.format(previous_port))
previous_mgmt_port = configuration.previous('management_port')
log('Previous management port: {0}'.format(previous_mgmt_port))
if previous_port and previous_mgmt_port:
bag = EtcdDatabag()
etcdctl = EtcdCtl()
members = etcdctl.member_list()
# Iterate over all the members in the list.
for unit_name in members:
# Grab the previous peer url and replace the management port.
peer_urls = members[unit_name]['peer_urls']
log('Previous peer url: {0}'.format(peer_urls))
old_port = ':{0}'.format(previous_mgmt_port)
new_port = ':{0}'.format(configuration.get('management_port'))
url = peer_urls.replace(old_port, new_port)
# Update the member's peer_urls with the new ports.
log(etcdctl.member_update(members[unit_name]['unit_id'], url))
# Render just the leaders configuration with the new values.
render_config()
address = get_ingress_address('cluster')
leader_set({'leader_address':
get_connection_string([address],
bag.management_port)})
host.service_restart(bag.etcd_daemon)
@when('snap.installed.etcd')
@when_not('leadership.is_leader')
@when_any('config.changed.port', 'config.changed.management_port')
@when_not('etcd.installed')
def follower_config_changed():
''' Follower units need to render the configuration file, close and open
ports, and restart the etcd service. '''
set_state('etcd.rerender-config')
@when('snap.installed.etcd')
@when('config.changed.bind_to_all_interfaces')
@when_not('upgrade.series.in-progress')
def bind_to_all_interfaces_changed():
set_state('etcd.rerender-config')
@when('etcd.rerender-config')
@when_not('upgrade.series.in-progress')
def rerender_config():
''' Config must be updated and service restarted '''
bag = EtcdDatabag()
log('Rendering config file for {0}'.format(bag.unit_name))
render_config()
if host.service_running(bag.etcd_daemon):
host.service_restart(bag.etcd_daemon)
set_app_version()
@when('cluster.joined')
def set_db_ingress_address(cluster):
''' Send db ingress address to peers on the cluster relation '''
address = get_ingress_address('db')
cluster.set_db_ingress_address(address)
@when('db.connected')
@when('etcd.ssl.placed')
@when('cluster.joined')
def send_cluster_connection_details(cluster, db):
''' Need to set the cluster connection string and
the client key and certificate on the relation object. '''
cert = read_tls_cert('client.crt')
key = read_tls_cert('client.key')
ca = read_tls_cert('ca.crt')
etcdctl = EtcdCtl()
# Set the key, cert, and ca on the db relation
db.set_client_credentials(key, cert, ca)
port = hookenv.config().get('port')
# Get all the peers participating in the cluster relation.
members = cluster.get_db_ingress_addresses()
# Append our own address to the membership list, because peers dont self
# actualize
address = get_ingress_address('db')
members.append(address)
members.sort()
# Create a connection string with all the members on the configured port.
connection_string = get_connection_string(members, port)
# Set the connection string on the db relation.
db.set_connection_string(connection_string, version=etcdctl.version())
@when('db.connected')
@when('etcd.ssl.placed')
@when_not('cluster.joined')
def send_single_connection_details(db):
''' '''
cert = read_tls_cert('client.crt')
key = read_tls_cert('client.key')
ca = read_tls_cert('ca.crt')
etcdctl = EtcdCtl()
# Set the key and cert on the db relation
db.set_client_credentials(key, cert, ca)
bag = EtcdDatabag()
# Get all the peers participating in the cluster relation.
address = get_ingress_address('db')
members = [address]
# Create a connection string with this member on the configured port.
connection_string = get_connection_string(members, bag.port)
# Set the connection string on the db relation.
db.set_connection_string(connection_string, version=etcdctl.version())
@when('proxy.connected')
@when('etcd.ssl.placed')
@when_any('etcd.leader.configured', 'cluster.joined')
def send_cluster_details(proxy):
''' Sends the peer cluster string to proxy units so they can join and act
on behalf of the cluster. '''
cert = read_tls_cert('client.crt')
key = read_tls_cert('client.key')
ca = read_tls_cert('ca.crt')
proxy.set_client_credentials(key, cert, ca)
# format a list of cluster participants
etcdctl = EtcdCtl()
peers = etcdctl.member_list()
cluster = []
for peer in peers:
thispeer = peers[peer]
# Potential member doing registration. Default to skip
if 'peer_urls' not in thispeer.keys() or not thispeer['peer_urls']:
continue
peer_string = "{}={}".format(thispeer['name'], thispeer['peer_urls'])
cluster.append(peer_string)
proxy.set_cluster_string(','.join(cluster))
@when('config.changed.channel')
def channel_changed():
''' Ensure that the config is updated if the channel changes. '''
set_state('etcd.rerender-config')
@when('config.changed.channel')
@when_not('etcd.installed')
def snap_install():
channel = get_target_etcd_channel()
snap.install('core')
if channel:
snap.install('etcd', channel=channel, classic=False)
remove_state('etcd.ssl.exported')
@when('etcd.ssl.placed')
@when_not('snap.installed.etcd')
def install_etcd():
''' Attempt resource get on the "etcd" and "etcdctl" resources. If no
resources are provided attempt to install from the archive only on the
16.04 (xenial) series. '''
if is_state('etcd.installed'):
msg = 'Manual upgrade required. run-action snap-upgrade.'
status.blocked(msg)
return
status.maintenance('Installing etcd.')
channel = get_target_etcd_channel()
if channel:
snap.install('etcd', channel=channel, classic=False)
@when('snap.installed.etcd')
@when_not('etcd.service-restart.configured')
@when_not('upgrade.series.in-progress')
def add_systemd_restart_always():
template = 'templates/service-always-restart.systemd-latest.conf'
service = 'snap.etcd.etcd'
try:
# Get the systemd version
cmd = ['systemd', '--version']
output = check_output(cmd).decode('UTF-8')
line = output.splitlines()[0]
words = line.split()
assert words[0] == 'systemd'
systemd_version = int(words[1])
# Check for old version (for xenial support)
if systemd_version < 230:
template = 'templates/service-always-restart.systemd-229.conf'
except Exception:
traceback.print_exc()
hookenv.log('Failed to detect systemd version, using latest template',
level='ERROR')
dest_dir = '/etc/systemd/system/{}.service.d'.format(service)
os.makedirs(dest_dir, exist_ok=True)
copyfile(template, '{}/always-restart.conf'.format(dest_dir))
check_call(['systemctl', 'daemon-reload'])
host.service_restart('{}.service'.format(service))
set_state('etcd.service-restart.configured')
@when('snap.installed.etcd')
@when('etcd.ssl.placed')
@when('cluster.joined')
@when_not('leadership.is_leader')
@when_not('etcd.registered')
@when_not('etcd.installed')
@when_not('upgrade.series.in-progress')
def register_node_with_leader(cluster):
'''
Control flow mechanism to perform self registration with the leader.
Before executing self registration, we must adhere to the nature of offline
static turnup rules. If we find a GUID in the member list without peering
information the unit will enter a race condition and must wait for a clean
status output before we can progress to self registration.
'''
etcdctl = EtcdCtl()
bag = EtcdDatabag()
leader_address = leader_get('leader_address')
bag.leader_address = leader_address
try:
# Check if we are already registered. Unregister ourselves if we are so
# we can register from scratch.
peer_url = 'https://%s:%s' % (bag.cluster_address, bag.management_port)
members = etcdctl.member_list(leader_address)
for _, member in members.items():
if member['peer_urls'] == peer_url:
log('Found member that matches our peer URL. Unregistering...')
etcdctl.unregister(member['unit_id'], leader_address)
# Now register.
resp = etcdctl.register(bag.__dict__)
bag.set_cluster(resp['cluster'])
except EtcdCtl.CommandFailed:
log('etcdctl.register failed, will retry')
msg = 'Waiting to retry etcd registration'
status.waiting(msg)
return
render_config(bag)
host.service_restart(bag.etcd_daemon)
open_port(bag.port)
set_state('etcd.registered')
@when('etcd.ssl.placed')
@when('leadership.is_leader')
@when_not('etcd.leader.configured')
@when_not('etcd.installed')
@when_not('upgrade.series.in-progress')
def initialize_new_leader():
''' Create an initial cluster string to bring up a single member cluster of
etcd, and set the leadership data so the followers can join this one. '''
bag = EtcdDatabag()
bag.token = bag.token
bag.set_cluster_state('new')
address = get_ingress_address('cluster')
cluster_connection_string = get_connection_string([address],
bag.management_port)
bag.set_cluster("{}={}".format(bag.unit_name, cluster_connection_string))
render_config(bag)
host.service_restart(bag.etcd_daemon)
# sorry, some hosts need this. The charm races with systemd and wins.
time.sleep(2)
# Check health status before we say we are good
etcdctl = EtcdCtl()
status = etcdctl.cluster_health()
if 'unhealthy' in status:
status.blocked('Cluster not healthy.')
return
# We have a healthy leader, broadcast initial data-points for followers
open_port(bag.port)
leader_connection_string = get_connection_string([address],
bag.port)
leader_set({'leader_address': leader_connection_string,
'cluster': bag.cluster})
# set registered state since if we ever become a follower, we will not need
# to re-register
set_state('etcd.registered')
# finish bootstrap delta and set configured state
set_state('etcd.leader.configured')
@when('snap.installed.etcd')
@when('snap.refresh.set')
@when('leadership.is_leader')
def process_snapd_timer():
''' Set the snapd refresh timer on the leader so all cluster members
(present and future) will refresh near the same time. '''
# Get the current snapd refresh timer; we know layer-snap has set this
# when the 'snap.refresh.set' flag is present.
timer = snap.get(snapname='core', key='refresh.timer').decode('utf-8').strip()
if not timer:
# The core snap timer is empty. This likely means a subordinate timer
# reset ours. Try to set it back to a previously leader-set value,
# falling back to config if needed. Luckily, this should only happen
# during subordinate install, so this should remain stable afterward.
timer = leader_get('snapd_refresh') or hookenv.config('snapd_refresh')
snap.set_refresh_timer(timer)
# Ensure we have the timer known by snapd (it may differ from config).
timer = snap.get(snapname='core', key='refresh.timer').decode('utf-8').strip()
# The first time through, data_changed will be true. Subsequent calls
# should only update leader data if something changed.
if data_changed('etcd_snapd_refresh', timer):
log('setting snapd_refresh timer to: {}'.format(timer))
leader_set({'snapd_refresh': timer})
@when('snap.installed.etcd')
@when('snap.refresh.set')
@when('leadership.changed.snapd_refresh')
@when_not('leadership.is_leader')
def set_snapd_timer():
''' Set the snapd refresh.timer on non-leader cluster members. '''
# NB: This method should only be run when 'snap.refresh.set' is present.
# Layer-snap will always set a core refresh.timer, which may not be the
# same as our leader. Gating with 'snap.refresh.set' ensures layer-snap
# has finished and we are free to set our config to the leader's timer.
timer = leader_get('snapd_refresh') or '' # None will cause error
log('setting snapd_refresh timer to: {}'.format(timer))
snap.set_refresh_timer(timer)
@when('tls_client.ca.saved', 'tls_client.server.key.saved',
'tls_client.server.certificate.saved',
'tls_client.client.certificate.saved')
@when_not('etcd.ssl.placed')
def tls_state_control():
''' This state represents all the complexity of handling the TLS certs.
instead of stacking decorators, this state condenses it into a single
state we can gate on before progressing with secure setup. Also handles
ensuring users of the system can access the TLS certificates'''
bag = EtcdDatabag()
if not os.path.isdir(bag.etcd_conf_dir):
hookenv.log('Waiting for etcd conf creation.')
return
cmd = ['chown', '-R', 'root:ubuntu', bag.etcd_conf_dir]
check_call(cmd)
set_state('etcd.ssl.placed')
@when('etcd.ssl.placed')
@when_any('tls_client.ca.written',
'tls_client.server.certificate.written',
'tls_client.client.certificate.written')
@when_not('upgrade.series.in-progress')
def tls_update():
''' Handle changes to the TLS data by ensuring that the service is
restarted.
'''
# ensure config is updated with new certs and service restarted
bag = EtcdDatabag()
render_config(bag)
host.service_restart(bag.etcd_daemon)
# ensure that certs are re-echoed to the db relations
remove_state('etcd.ssl.placed')
remove_state('tls_client.ca.written')
remove_state('tls_client.server.certificate.written')
remove_state('tls_client.client.certificate.written')
@when('snap.installed.etcd')
@when_not('etcd.ssl.exported')
def render_default_user_ssl_exports():
''' Add secure credentials to default user environment configs,
transparently adding TLS '''
opts = layer.options('tls-client')
ca_path = opts['ca_certificate_path']
client_crt = opts['client_certificate_path']
client_key = opts['client_key_path']
etcd_ver = etcd_version()
if etcd_ver == 'n/a':
hookenv.log('Unable to determine version format for etcd SSL config',
level=hookenv.ERROR)
return
major, minor, _ = etcd_ver.split('.')
if int(major) >= 3 and int(minor) >= 3:
evars = [
'export ETCDCTL_KEY={}\n'.format(client_key),
'export ETCDCTL_CERT={}\n'.format(client_crt),
'export ETCDCTL_CACERT={}\n'.format(ca_path)
]
else:
evars = [
'export ETCDCTL_KEY_FILE={}\n'.format(client_key),
'export ETCDCTL_CERT_FILE={}\n'.format(client_crt),
'export ETCDCTL_CA_FILE={}\n'.format(ca_path)
]
with open('/home/ubuntu/.bash_aliases', 'w') as fp:
fp.writelines(evars)
with open('/root/.bash_aliases', 'w') as fp:
fp.writelines(evars)
set_state('etcd.ssl.exported')
def force_rejoin():
"""Wipe local data and rejoin new cluster formed by leader unit
This action is required if leader unit performed snapshot restore. All
other members must remove their local data and previous cluster
identities and join newly formed, restored, cluster.
"""
log('Wiping local storage and rejoining cluster')
conf = EtcdDatabag()
host.service_stop(conf.etcd_daemon)
clear_flag('etcd.registered')
etcd_data = os.path.join(conf.storage_path(), 'member')
if os.path.exists(etcd_data):
shutil.rmtree(etcd_data)
for _ in range(11):
# We need randomized back-off timer because only one unit can be
# joining at the same time
time.sleep(random.randint(1, 10))
register_node_with_leader(None)
if is_flag_set('etcd.registered'):
log('Successfully rejoined the cluster')
break
@when('leadership.changed.force_rejoin')
@when_not('leadership.is_leader')
def force_rejoin_requested():
force_rejoin()
check_cluster_health()
@hook('cluster-relation-broken')
def perform_self_unregistration(cluster=None):
''' Attempt self removal during unit teardown. '''
etcdctl = EtcdCtl()
leader_address = leader_get('leader_address')
unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '')
members = etcdctl.member_list()
# Self Unregistration
etcdctl.unregister(members[unit_name]['unit_id'], leader_address)
@hook('data-storage-attached')
def format_and_mount_storage():
''' This allows users to request persistent volumes from the cloud provider
for the purposes of disaster recovery. '''
set_state('data.volume.attached')
# Query juju for the information about the block storage
device_info = storage_get()
block = device_info['location']
bag = EtcdDatabag()
bag.cluster = leader_get('cluster')
# the databag has behavior that keeps the path updated.
# Reference the default path from layer_options.
etcd_opts = layer.options('etcd')
# Split the tail of the path to mount the volume 1 level before
# the data directory.
tail = os.path.split(bag.etcd_data_dir)[0]
if volume_is_mounted(block):
hookenv.log('Device is already attached to the system.')
hookenv.log('Refusing to take action against {}'.format(block))
return
# Format the device in non-interactive mode
cmd = ['mkfs.ext4', device_info['location'], '-F']
hookenv.log('Creating filesystem on {}'.format(device_info['location']))
hookenv.log('With command: {}'.format(' '.join(cmd)))
check_call(cmd)
# halt etcd to perform the data-store migration
host.service_stop(bag.etcd_daemon)
os.makedirs(tail, exist_ok=True)
mount_volume(block, tail)
# handle first run during early-attach storage, pre-config-changed hook.
os.makedirs(bag.etcd_data_dir, exist_ok=True)
# Only attempt migration if directory exists
if os.path.isdir(etcd_opts['etcd_data_dir']):
migrate_path = "{}/".format(etcd_opts['etcd_data_dir'])
output_path = "{}/".format(bag.etcd_data_dir)
cmd = ['rsync', '-azp', migrate_path, output_path]
hookenv.log('Detected existing data, migrating to new location.')
hookenv.log('With command: {}'.format(' '.join(cmd)))
check_call(cmd)
with open('/etc/fstab', 'r') as fp:
contents = fp.readlines()
found = 0
# scan fstab for the device
for line in contents:
if block in line:
found = found + 1
# if device not in fstab, append so it persists through reboots
if not found > 0:
append = "{0} {1} ext4 defaults 0 0".format(block, tail) # noqa
with open('/etc/fstab', 'a') as fp:
fp.writelines([append])
# Finally re-render the configuration and resume operation
render_config(bag)
host.service_restart(bag.etcd_daemon)
def read_tls_cert(cert):
''' Reads the contents of the layer-configured certificate path indicated
by cert. Returns the utf-8 decoded contents of the file '''
# Load the layer options for configured paths
opts = layer.options('tls-client')
# Retain a dict of the certificate paths
cert_paths = {'ca.crt': opts['ca_certificate_path'],
'server.crt': opts['server_certificate_path'],
'server.key': opts['server_key_path'],
'client.crt': opts['client_certificate_path'],
'client.key': opts['client_key_path']}
# If requesting a cert we dont know about, raise a ValueError
if cert not in cert_paths.keys():
raise ValueError('No known certificate {}'.format(cert))
# Read the contents of the cert and return it in utf-8 encoded text
with open(cert_paths[cert], 'r') as fp:
data = fp.read()
return data
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def force_update_nrpe_config():
remove_state('etcd.nrpe.configured')
@when('etcd.installed')
@when('nrpe-external-master.available')
@when_not('etcd.nrpe.configured')
def update_nrpe_config(unused=None):
# List of systemd services that will be checked
services = ('snap.etcd.etcd',)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname, primary=False)
# add our first check, to alert on service failure
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
# add the cron job to populate the cache for our second check
# (we cache the output of 'etcdctl alarm list' to minimise overhead)
with open("templates/check_etcd-alarms.cron") as fp:
write_file(
path="/etc/cron.d/check_etcd-alarms",
content=fp.read().encode(),
owner="root",
perms=0o644,
)
# create an empty output file for the above
write_file(
path="/var/lib/nagios/etcd-alarm-list.txt",
content="",
owner="root",
perms=0o644,
)
# install the NRPE script for the above
with open("templates/check_etcd-alarms.py") as fp:
write_file(
path="/usr/lib/nagios/plugins/check_etcd-alarms.py",
content=fp.read().encode(),
owner="root",
perms=0o755,
)
# define our second check, to alert on etcd alarm status
nrpe_setup.add_check(
"etcd-alarms",
"Verify etcd has no raised alarms",
"/usr/lib/nagios/plugins/check_etcd-alarms.py",
)
nrpe_setup.write()
set_state('etcd.nrpe.configured')
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.etcd.etcd',)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname, primary=False)
for service in services:
nrpe_setup.remove_check(shortname=service)
def volume_is_mounted(volume):
''' Takes a hardware path and returns true/false if it is mounted '''
cmd = ['df', '-t', 'ext4']
out = check_output(cmd).decode('utf-8')
return volume in out
def mount_volume(volume, location):
''' Takes a device path and mounts it to location '''
cmd = ['mount', volume, location]
hookenv.log("Mounting {0} to {1}".format(volume, location))
check_call(cmd)
def unmount_path(location):
''' Unmounts a mounted volume at path '''
cmd = ['umount', location]
hookenv.log("Unmounting {0}".format(location))
check_call(cmd)
def close_open_ports():
''' Close the previous port and open the port from configuration. '''
configuration = hookenv.config()
previous_port = configuration.previous('port')
port = configuration.get('port')
if previous_port is not None and previous_port != port:
log('The port changed; closing {0} opening {1}'.format(previous_port,
port))
close_port(previous_port)
open_port(port)
def install(src, tgt):
''' This method wraps the bash "install" command '''
return check_call(split('install {} {}'.format(src, tgt)))
def render_config(bag=None):
''' Render the etcd configuration template for the given version '''
if not bag:
bag = EtcdDatabag()
move_etcd_data_to_standard_location()
v2_conf_path = "{}/etcd.conf".format(bag.etcd_conf_dir)
v3_conf_path = "{}/etcd.conf.yml".format(bag.etcd_conf_dir)
# probe for 2.x compatibility
if etcd_version().startswith('2.'):
render('etcd2.conf', v2_conf_path, bag.__dict__, owner='root',
group='root')
# default to 3.x template behavior
else:
render('etcd3.conf', v3_conf_path, bag.__dict__, owner='root',
group='root')
if os.path.exists(v2_conf_path):
# v3 will fail if the v2 config is left in place
os.remove(v2_conf_path)
# Close the previous client port and open the new one.
close_open_ports()
remove_state('etcd.rerender-config')
def etcd_version():
''' This method surfaces the version from etcdctl '''
raw_output = None
try:
# try v3
raw_output = check_output(
['/snap/bin/etcd.etcdctl', 'version'],
env={'ETCDCTL_API': '3'}
).decode('utf-8').strip()
if "No help topic for 'version'" in raw_output:
# handle v2
raw_output = check_output(
['/snap/bin/etcd.etcdctl', '--version']
).decode('utf-8').strip()
for line in raw_output.splitlines():
if 'etcdctl version' in line:
# "etcdctl version: 3.0.17" or "etcdctl version 2.3.8"
version = line.split()[-1]
return version
hookenv.log('Unable to find etcd version: {}'.format(raw_output),
level=hookenv.ERROR)
return 'n/a'
except (ValueError, CalledProcessError):
hookenv.log('Failed to get etcd version:\n'
'{}'.format(traceback.format_exc()), level=hookenv.ERROR)
return 'n/a'
def move_etcd_data_to_standard_location():
''' Moves etcd data to the standard location if it's not already located
there. This is necessary when generating new etcd config after etcd has
been upgraded from version 2.3 to 3.x.
'''
bag = EtcdDatabag()
conf_path = bag.etcd_conf_dir + '/etcd.conf.yml'
if not os.path.exists(conf_path):
return
with open(conf_path) as f:
conf = yaml.safe_load(f)
data_dir = conf['data-dir']
desired_data_dir = bag.etcd_data_dir
if data_dir != desired_data_dir:
log('Moving etcd data from %s to %s' % (data_dir, desired_data_dir))
host.service_stop('snap.etcd.etcd')
for filename in os.listdir(data_dir):
os.rename(
data_dir + '/' + filename,
desired_data_dir + '/' + filename
)
os.rmdir(data_dir)
conf['data-dir'] = desired_data_dir
with open(conf_path, 'w') as f:
yaml.dump(conf, f)
host.service_start('snap.etcd.etcd')
| 35.156051 | 86 | 0.681372 |
4a24d820bda9a5ab1d6c6218445a812be86baff2 | 3,988 | py | Python | data/process_data.py | Duratorre/Disaster-Response-Pipeline | d68a51bd0d7d53a7a09259586011b76e8882ac08 | [
"zlib-acknowledgement"
] | null | null | null | data/process_data.py | Duratorre/Disaster-Response-Pipeline | d68a51bd0d7d53a7a09259586011b76e8882ac08 | [
"zlib-acknowledgement"
] | null | null | null | data/process_data.py | Duratorre/Disaster-Response-Pipeline | d68a51bd0d7d53a7a09259586011b76e8882ac08 | [
"zlib-acknowledgement"
] | null | null | null | # import libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
# load messages and categories datasets
def load_data(messages_filepath, categories_filepath):
'''
The function takes as input the directories of the messages and categories
with respect to the working directory, loads the data contained in the filepaths
and then merges the two dataframes together
Input:
messages_filepath - the directory of the messages file
categories_filepath - the directory of the categories file
Output:
df - a pandas dataframe which is the result of the merging between the messages and categories dataframes
'''
# read in the datasets
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
# merge the two datasets
df = messages.merge(categories, how='inner', on=['id'])
return df
def clean_data(df):
'''
The function takes as input the result of load_data and performs one hot encoding
on the categories column of the dataframe
Input:
df - a pandas dataframe which is the result of the merging between the messages and the categories dataframes
Output:
df - a cleaned pandas dataframe, with one hot encoding for the categories column
'''
# extract the different categories from the categories column into a new dataframe
# and expand them into multiple columns
categories = df.categories.str.split(';', expand=True)
# take the first row of the new dataframe
row = categories.iloc[0]
# extract the column names from the first row
category_colnames = row.apply(lambda x: x[:-2])
categories.columns = category_colnames
# convert category values to 0 and 1
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str[-1]
# convert column from string to numeric
categories[column] = categories[column].astype(int)
# replace all possible values greater than 1 with 1
categories = categories.apply(lambda x: [y if y<=1 else 1 for y in x])
# drop categories column from input dataframe and join input dataframe with
# one hot encoded categories dataframe
df.drop(columns=['categories'], inplace=True)
df = df.join(categories)
# remove all duplicates
df.drop_duplicates(inplace=True)
return df
def save_data(df, database_filepath):
'''
This function takes as input a pandas dataframe and the directory of a sqlite database
and saves the dataframe into the sqlite database
Input:
df - a pandas dataframes
database_filepath - the directory of the sqlite database where the dataframe will be stored
'''
# create connection with the database
engine = create_engine('sqlite:///{}'.format(database_filepath))
# saave data into the database
df.to_sql('etl_data', engine, index=False, if_exists='replace')
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| 34.08547 | 113 | 0.694835 |
4a24d8a0a0eddad330093fc7c06367f6d0148a22 | 172 | py | Python | sequenceapi/asgi.py | tiveritz/sequence-api | ba0fb432028eaf878122e4d96d8b1ce234602e47 | [
"MIT"
] | null | null | null | sequenceapi/asgi.py | tiveritz/sequence-api | ba0fb432028eaf878122e4d96d8b1ce234602e47 | [
"MIT"
] | null | null | null | sequenceapi/asgi.py | tiveritz/sequence-api | ba0fb432028eaf878122e4d96d8b1ce234602e47 | [
"MIT"
] | null | null | null | import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sequenceapi.settings')
application = get_asgi_application()
| 21.5 | 71 | 0.831395 |
4a24d8a811a8e09d385b9b0c634339a0ed43cc2d | 1,990 | py | Python | repos/system_upgrade/el7toel8/actors/checkcpu/libraries/cpu.py | fellipeh/leapp-repository | 874e480fa84476fee37da4f184b47f2472748929 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/checkcpu/libraries/cpu.py | fellipeh/leapp-repository | 874e480fa84476fee37da4f184b47f2472748929 | [
"Apache-2.0"
] | 1 | 2020-04-03T07:41:43.000Z | 2020-04-03T07:41:43.000Z | repos/system_upgrade/el7toel8/actors/checkcpu/libraries/cpu.py | pirat89/leapp-repository | aac51ab67ee22413a7ab1da6cec33e54b9357afd | [
"Apache-2.0"
] | null | null | null |
from leapp import reporting
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.common.config import architecture
from leapp.libraries.stdlib import api
from leapp.models import CPUInfo
SUPPORTED_MACHINE_TYPES = [2964, 2965, 3906, 3907]
def process():
if not architecture.matches_architecture(architecture.ARCH_S390X):
return
cpuinfo = next(api.consume(CPUInfo), None)
if cpuinfo is None:
raise StopActorExecutionError(message=("Missing information about CPU."))
if not cpuinfo.machine_type:
# this is not expected to happen, but in case...
api.current_logger().warning("The machine (CPU) type is empty.")
if cpuinfo.machine_type not in SUPPORTED_MACHINE_TYPES:
summary = ("The system is not possible to upgrade because of unsupported"
" type of the processor. Based on the official documentation,"
" z13 and z14 processors are supported on the Red Hat Enterprise"
" Linux 8 system for the IBM Z architecture. The supported processors"
" have machine types {}. The detected machine type of the CPU is '{}'."
.format(", ".join([str(i) for i in SUPPORTED_MACHINE_TYPES]), cpuinfo.machine_type))
report = [
reporting.Title("The processor is not supported by the target system."),
reporting.Summary(summary),
reporting.Severity(reporting.Severity.HIGH),
reporting.Tags([reporting.Tags.SANITY]),
reporting.Flags([reporting.Flags.INHIBITOR]),
reporting.ExternalLink(
title="Considerations in adopting RHEL 8",
url=("https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/"
"html-single/considerations_in_adopting_rhel_8/"
"index#changes-in-gcc-in-rhel-8_changes-in-toolchain-since-rhel-7"))
]
reporting.create_report(report)
| 47.380952 | 103 | 0.663317 |
4a24d8d0a34b10b02fa1fe2a018574d1fe473363 | 1,595 | py | Python | Test_Python_code/last/02_Indonesia_Com/total_death_indonesia.py | pdeesawat/PSIT58_test_01 | 631946eacd82503e0697680f06290a4fe10f17f2 | [
"Apache-2.0"
] | null | null | null | Test_Python_code/last/02_Indonesia_Com/total_death_indonesia.py | pdeesawat/PSIT58_test_01 | 631946eacd82503e0697680f06290a4fe10f17f2 | [
"Apache-2.0"
] | null | null | null | Test_Python_code/last/02_Indonesia_Com/total_death_indonesia.py | pdeesawat/PSIT58_test_01 | 631946eacd82503e0697680f06290a4fe10f17f2 | [
"Apache-2.0"
] | null | null | null | """Import Module Plotly To Ploting Graph"""
import plotly.plotly as py
import plotly.graph_objs as go
"""Open and Read CSV from database"""
data = open('Real_Final_database_02.csv')
alldata = data.readlines()
listdata = []
for i in alldata:
listdata.append(i.strip().split(','))
type_z = ['Flood', 'Epidemic', 'Drought', 'Earthquake', 'Storm']
size_fill = [15,20,25,30,35]
fill_colors = ['#00d0f5', '#ff4a2e', 'a36800', '#ad9900', '#8b00db']
trace = []
"""Select and Set variable Data affect that happen in each disaster in Indonesia"""
for i in range(5):
year_x = []
death_z = []
types_y = []
for j in listdata:
if j[0] == 'Indonesia' and j[2] == type_z[i]:
year_x.append(int(j[1]))
death_z.append(int(j[5]))
types_y.append(type_z[i])
trace.append(go.Scatter(x = year_x, y = death_z, name = type_z[i], mode = 'markers',
marker = dict(color = [fill_colors[i] for k in death_z], size = [size_fill[i] for k in death_z])))
data = trace
"""Part of code that adjust layout of graph"""
layout = go.Layout(title='Total Death',
showlegend=True,
height=600,
width=600,
xaxis=dict(tickangle=-45),
yaxis=dict(title='Total Death',
titlefont=dict(color='#ff2323'),
tickfont=dict(color='#ff2323')))
"""Part of plot graph in plotly"""
fig = go.Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='Total_Death_in_Indonesia')
| 35.444444 | 102 | 0.578056 |
4a24dad199061ca157cbdeb2ce7ebd284534f666 | 549 | py | Python | database_demo.py | DMH2021/website_project | 8acffa0be705a4cd899f979b8b5af494f01a11ec | [
"Apache-2.0"
] | null | null | null | database_demo.py | DMH2021/website_project | 8acffa0be705a4cd899f979b8b5af494f01a11ec | [
"Apache-2.0"
] | null | null | null | database_demo.py | DMH2021/website_project | 8acffa0be705a4cd899f979b8b5af494f01a11ec | [
"Apache-2.0"
] | null | null | null | import sqlite3
def main():
conn = sqlite3.connect("site_data.db")
# Adding new data with the insert statement
cursor = conn.execute("INSERT INTO messages VALUES ('Sam', 'python is cool', 0)")
cursor.close()
conn.commit()
# Querying the database with SELECT statement
cursor = conn.execute("SELECT User, Content from messages")
records = cursor.fetchall()
for record in records:
print('%s says "%s"' % (record[0], record[1]))
cursor.close()
conn.close()
if __name__=="__main__":
main() | 32.294118 | 85 | 0.641166 |
4a24dd7093fb8f03663e3ad4826612cb51b67b7e | 5,312 | py | Python | python/qilinguist/test/test_gettext.py | PrashantKumar-sudo/qibuild | a16ce425cf25127ceff29507feeeeca37af23351 | [
"BSD-3-Clause"
] | null | null | null | python/qilinguist/test/test_gettext.py | PrashantKumar-sudo/qibuild | a16ce425cf25127ceff29507feeeeca37af23351 | [
"BSD-3-Clause"
] | null | null | null | python/qilinguist/test/test_gettext.py | PrashantKumar-sudo/qibuild | a16ce425cf25127ceff29507feeeeca37af23351 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" QiBuild """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import pytest
import subprocess
import qisys.command
WRONG_TRANSLATION = "Wrong translation :\n\n{}\nnot in\n\n{}"
def check_gettext():
""" Check GetText """
gettext = qisys.command.find_program("xgettext", raises=False)
if not gettext:
return False
return True
def test_update(qilinguist_action):
""" Test Update """
if not check_gettext():
return
trad = qilinguist_action.trad
fr_FR_po_file = os.path.join(trad.path, "po", "fr_FR.po")
en_US_po_file = os.path.join(trad.path, "po", "en_US.po")
pot_file = os.path.join(trad.path, "po", "translate.pot")
assert not os.path.exists(fr_FR_po_file)
assert not os.path.exists(en_US_po_file)
assert not os.path.exists(pot_file)
qilinguist_action("update", "translate")
assert os.path.exists(fr_FR_po_file)
assert os.path.exists(en_US_po_file)
assert os.path.exists(pot_file)
def test_release(qilinguist_action):
""" Test Release """
if not check_gettext():
return
trad = qilinguist_action.trad
fr_FR_mo_file = os.path.join(trad.path, "po", "share", "locale", "translate",
"fr_FR", "LC_MESSAGES", "translate.mo")
en_US_mo_file = os.path.join(trad.path, "po", "share", "locale", "translate",
"fr_FR", "LC_MESSAGES", "translate.mo")
assert not os.path.exists(fr_FR_mo_file)
assert not os.path.exists(en_US_mo_file)
qilinguist_action("update", "translate")
qilinguist_action.create_po(trad)
qilinguist_action("release", "translate")
assert os.path.exists(fr_FR_mo_file)
assert os.path.exists(en_US_mo_file)
def test_cplusplus_sdk_workflow(qilinguist_action):
""" Test Cpp SDK Workflow """
if not check_gettext():
return
trad = qilinguist_action.trad
qilinguist_action.create_po(trad)
qilinguist_action("update", "translate")
qilinguist_action("release", "translate")
trad.configure()
trad.build()
# check binary output
binary = os.path.join(trad.sdk_directory, "bin", "translate")
dictPath = os.path.join(trad.path, "po", "share", "locale", "translate")
env = os.environ.copy()
env["LANGUAGE"] = "fr_FR.UTF-8" # for Ubuntu
env["LC_ALL"] = "fr_FR.UTF-8" # for Arch Linux
cmd = [binary, dictPath]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, _ = process.communicate()
out_fr = b"""Bonjour, mon nom est NAO.
O\xc3\xb9 est Brian ?
Brian est dans la cuisine.
"""
if out_fr not in out:
pytest.fail(WRONG_TRANSLATION.format(out_fr.decode("utf-8"), out.decode("utf-8")))
env = os.environ.copy()
env["LANGUAGE"] = "en_US.UTF-8"
cmd = [binary, dictPath]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, _ = process.communicate()
out_en = b"""Hi, my name is NAO.
Where is Brian?
Brian is in the kitchen.
"""
if out_en not in out:
pytest.fail(WRONG_TRANSLATION.format(out_en.decode("utf-8"), out.decode("utf-8")))
def test_cplusplus_install_workflow(qilinguist_action, tmpdir):
""" Test Cpp Install Workflow """
if not check_gettext():
return
trad = qilinguist_action.trad
qilinguist_action.create_po(trad)
qilinguist_action("update", "translate")
qilinguist_action("release", "translate")
trad.configure()
trad.build()
trad.install(tmpdir.strpath)
# check mo files
fr_mo_file = tmpdir.join("share", "locale", "translate", "fr_FR", "LC_MESSAGES", "translate.mo").strpath
en_mo_file = tmpdir.join("share", "locale", "translate", "en_US", "LC_MESSAGES", "translate.mo").strpath
assert os.path.exists(fr_mo_file)
assert os.path.exists(en_mo_file)
# check binary output
binary = tmpdir.join("bin", "translate").strpath
dict_path = tmpdir.join("share", "locale", "translate").strpath
env = os.environ.copy()
env["LANGUAGE"] = "fr_FR.UTF-8" # for Ubuntu
env["LC_ALL"] = "fr_FR.UTF-8" # for Arch Linux
cmd = [binary, dict_path]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, _ = process.communicate()
out_fr = b"""Bonjour, mon nom est NAO.
O\xc3\xb9 est Brian ?
Brian est dans la cuisine.
"""
if out_fr not in out:
pytest.fail(WRONG_TRANSLATION.format(out_fr.decode("utf-8"), out.decode("utf-8")))
env = os.environ.copy()
env["LANGUAGE"] = "en_US.UTF-8"
cmd = [binary, dict_path]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, _ = process.communicate()
out_en = b"""Hi, my name is NAO.
Where is Brian?
Brian is in the kitchen.
"""
if out_en not in out:
pytest.fail(WRONG_TRANSLATION.format(out_en.decode("utf-8"), out.decode("utf-8")))
| 36.136054 | 108 | 0.657003 |
4a24de0b33b27569c570076dbb3aa58427d1c5ed | 714 | py | Python | backend/env/lib/python3.8/site-packages/zmq/devices/__init__.py | lubitelpospat/CFM-source | 4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03 | [
"MIT"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | backend/env/lib/python3.8/site-packages/zmq/devices/__init__.py | lubitelpospat/CFM-source | 4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03 | [
"MIT"
] | 26 | 2020-03-24T18:07:06.000Z | 2022-03-12T00:12:27.000Z | backend/env/lib/python3.8/site-packages/zmq/devices/__init__.py | lubitelpospat/CFM-source | 4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03 | [
"MIT"
] | 11 | 2020-06-29T08:40:24.000Z | 2022-02-24T17:39:16.000Z | """0MQ Device classes for running in background threads or processes."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from zmq import device
from zmq.devices import (
basedevice,
monitoredqueue,
monitoredqueuedevice,
proxydevice,
proxysteerabledevice,
)
from zmq.devices.basedevice import *
from zmq.devices.proxydevice import *
from zmq.devices.proxysteerabledevice import *
from zmq.devices.monitoredqueue import *
from zmq.devices.monitoredqueuedevice import *
__all__ = ['device']
for submod in (
basedevice,
proxydevice,
proxysteerabledevice,
monitoredqueue,
monitoredqueuedevice
):
__all__.extend(submod.__all__)
| 23.8 | 72 | 0.759104 |
4a24de877d28556a5136f1cf9259b141e0f2eddd | 993 | py | Python | test/tbaa.py | KennethNielsen/llvmpy | 70c5957cfd10f1e32a44f28dcb9a4dc72d499c2e | [
"BSD-3-Clause"
] | 140 | 2015-01-07T20:58:12.000Z | 2022-01-21T17:02:21.000Z | test/tbaa.py | KennethNielsen/llvmpy | 70c5957cfd10f1e32a44f28dcb9a4dc72d499c2e | [
"BSD-3-Clause"
] | 19 | 2015-01-15T14:45:49.000Z | 2020-09-04T14:58:23.000Z | test/tbaa.py | KennethNielsen/llvmpy | 70c5957cfd10f1e32a44f28dcb9a4dc72d499c2e | [
"BSD-3-Clause"
] | 12 | 2015-01-12T01:49:32.000Z | 2020-07-10T22:30:38.000Z | from llvm.core import *
from llvm.tbaa import *
from llvm.tests.support import TestCase
import unittest
class TestTBAABuilder(TestCase):
def test_tbaa_builder(self):
mod = Module.new('test_tbaa_builder')
fty = Type.function(Type.void(), [Type.pointer(Type.float())])
foo = mod.add_function(fty, 'foo')
bb = foo.append_basic_block('entry')
bldr = Builder.new(bb)
tbaa = TBAABuilder.new(mod, "tbaa.root")
float = tbaa.get_node('float', const=False)
const_float = tbaa.get_node('const float', float, const=True)
tbaa = TBAABuilder.new(mod, "tbaa.root")
old_const_float = const_float
del const_float
const_float = tbaa.get_node('const float', float, const=True)
self.assertIs(old_const_float, const_float)
ptr = bldr.load(foo.args[0])
ptr.set_metadata('tbaa', const_float)
bldr.ret_void()
print(mod)
if __name__ == '__main__':
unittest.main()
| 27.583333 | 70 | 0.64149 |
4a24debf930c7b00843f8f4976befa0e24ac9170 | 1,192 | py | Python | Mouse.py | VDHARV/hand-tracking | 03653f5b0a0a6f0f362047d86c94b0624e8e6a43 | [
"MIT"
] | null | null | null | Mouse.py | VDHARV/hand-tracking | 03653f5b0a0a6f0f362047d86c94b0624e8e6a43 | [
"MIT"
] | null | null | null | Mouse.py | VDHARV/hand-tracking | 03653f5b0a0a6f0f362047d86c94b0624e8e6a43 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from HandDetector import HandDetector
import autopy
vid = cv2.VideoCapture(0)
wScr, hScr = autopy.screen.size()
wCam, hCam = 640, 480
vid.set(3, wCam)
vid.set(4, hCam)
detector = HandDetector(detectionCon=0.75, trackCon=0.75)
while True:
success, img = vid.read()
img = detector.find_hands(img)
landmark_list = detector.find_position(img)
if landmark_list:
distance = np.linalg.norm(np.array(landmark_list[8][1:]) - np.array(landmark_list[12][1:]))
finger_up = detector.finger_up(landmark_list)
x1, y1 = landmark_list[8][1:]
x2, y2 = landmark_list[12][1:]
x3 = np.interp(x1, (0, wCam), (0, wScr))
y3 = np.interp(y1, (0, hCam), (0, hScr))
if finger_up[0] and not finger_up[2]:
try:
autopy.mouse.smooth_move(wScr - x3, y3)
cv2.circle(img, tuple(landmark_list[8][1:]), 15, (250, 0, 250), cv2.FILLED)
except:
continue
if distance < 40:
cv2.circle(img, tuple(landmark_list[12][1:]), 15, (250, 0, 250), cv2.FILLED)
autopy.mouse.click()
cv2.imshow('Mouse', img)
cv2.waitKey(1)
| 29.073171 | 99 | 0.600671 |
4a24def846cf1e0e9cae719122fcad6eccd44014 | 27,242 | py | Python | api/base/views.py | kounoAkihiro/SV-COS-osf.io | 0a9a68bbf9cf254d2e900d49b20d8a8e6e359c21 | [
"Apache-2.0"
] | null | null | null | api/base/views.py | kounoAkihiro/SV-COS-osf.io | 0a9a68bbf9cf254d2e900d49b20d8a8e6e359c21 | [
"Apache-2.0"
] | 16 | 2020-03-24T16:30:32.000Z | 2022-03-03T22:39:45.000Z | api/base/views.py | kounoAkihiro/SV-COS-osf.io | 0a9a68bbf9cf254d2e900d49b20d8a8e6e359c21 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
from distutils.version import StrictVersion
from django_bulk_update.helper import bulk_update
from django.conf import settings as django_settings
from django.db import transaction
from django.db.models import F
from django.http import JsonResponse
from django.contrib.contenttypes.models import ContentType
from rest_framework import generics
from rest_framework import permissions as drf_permissions
from rest_framework import status
from rest_framework.decorators import api_view, throttle_classes
from rest_framework.exceptions import ValidationError, NotFound
from rest_framework.mixins import ListModelMixin
from rest_framework.response import Response
from api.base import permissions as base_permissions
from api.base import utils
from api.base.exceptions import RelationshipPostMakesNoChanges
from api.base.filters import ListFilterMixin
from api.base.parsers import JSONAPIRelationshipParser
from api.base.parsers import JSONAPIRelationshipParserForRegularJSON
from api.base.requests import EmbeddedRequest
from api.base.serializers import (
MaintenanceStateSerializer,
LinkedNodesRelationshipSerializer,
LinkedRegistrationsRelationshipSerializer,
)
from api.base.throttling import RootAnonThrottle, UserRateThrottle
from api.base.utils import is_bulk_request, get_user_auth
from api.nodes.utils import get_file_object
from api.nodes.permissions import ContributorOrPublic
from api.nodes.permissions import ContributorOrPublicForRelationshipPointers
from api.nodes.permissions import ReadOnlyIfRegistration
from api.users.serializers import UserSerializer
from framework.auth.oauth_scopes import CoreScopes
from osf.models import Contributor, MaintenanceState, BaseFileNode
from waffle.models import Flag, Switch, Sample
from waffle import flag_is_active, sample_is_active
class JSONAPIBaseView(generics.GenericAPIView):
def __init__(self, **kwargs):
assert getattr(self, 'view_name', None), 'Must specify view_name on view.'
assert getattr(self, 'view_category', None), 'Must specify view_category on view.'
self.view_fqn = ':'.join([self.view_category, self.view_name])
super(JSONAPIBaseView, self).__init__(**kwargs)
def _get_embed_partial(self, field_name, field):
"""Create a partial function to fetch the values of an embedded field. A basic
example is to include a Node's children in a single response.
:param str field_name: Name of field of the view's serializer_class to load
results for
:return function object -> dict:
"""
if getattr(field, 'field', None):
field = field.field
def partial(item):
# resolve must be implemented on the field
v, view_args, view_kwargs = field.resolve(item, field_name, self.request)
if not v:
return None
request = EmbeddedRequest(self.request)
if not hasattr(request._request, '_embed_cache'):
request._request._embed_cache = {}
cache = request._request._embed_cache
request.parents.setdefault(type(item), {})[item._id] = item
view_kwargs.update({
'request': request,
'is_embedded': True,
})
# Setup a view ourselves to avoid all the junk DRF throws in
# v is a function that hides everything v.cls is the actual view class
view = v.cls()
view.args = view_args
view.kwargs = view_kwargs
view.request = request
view.request.parser_context['kwargs'] = view_kwargs
view.format_kwarg = view.get_format_suffix(**view_kwargs)
if not isinstance(view, ListModelMixin):
try:
item = view.get_object()
except Exception as e:
with transaction.atomic():
ret = view.handle_exception(e).data
return ret
_cache_key = (v.cls, field_name, view.get_serializer_class(), (type(item), item.id))
if _cache_key in cache:
# We already have the result for this embed, return it
return cache[_cache_key]
# Cache serializers. to_representation of a serializer should NOT augment it's fields so resetting the context
# should be sufficient for reuse
if not view.get_serializer_class() in cache:
cache[view.get_serializer_class()] = view.get_serializer_class()(many=isinstance(view, ListModelMixin), context=view.get_serializer_context())
ser = cache[view.get_serializer_class()]
try:
ser._context = view.get_serializer_context()
if not isinstance(view, ListModelMixin):
ret = ser.to_representation(item)
else:
queryset = view.filter_queryset(view.get_queryset())
page = view.paginate_queryset(getattr(queryset, '_results_cache', None) or queryset)
ret = ser.to_representation(page or queryset)
if page is not None:
request.parser_context['view'] = view
request.parser_context['kwargs'].pop('request')
view.paginator.request = request
ret = view.paginator.get_paginated_response(ret).data
except Exception as e:
with transaction.atomic():
ret = view.handle_exception(e).data
# Allow request to be gc'd
ser._context = None
# Cache our final result
cache[_cache_key] = ret
return ret
return partial
def get_serializer_context(self):
"""Inject request into the serializer context. Additionally, inject partial functions
(request, object -> embed items) if the query string contains embeds. Allows
multiple levels of nesting.
"""
context = super(JSONAPIBaseView, self).get_serializer_context()
if self.kwargs.get('is_embedded'):
embeds = []
else:
embeds = self.request.query_params.getlist('embed') or self.request.query_params.getlist('embed[]')
fields_check = self.get_serializer_class()._declared_fields.copy()
if 'fields[{}]'.format(self.serializer_class.Meta.type_) in self.request.query_params:
# Check only requested and mandatory fields
sparse_fields = self.request.query_params['fields[{}]'.format(self.serializer_class.Meta.type_)]
for field in fields_check.copy().keys():
if field not in ('type', 'id', 'links') and field not in sparse_fields:
fields_check.pop(field)
for field in fields_check:
if getattr(fields_check[field], 'field', None):
fields_check[field] = fields_check[field].field
for field in fields_check:
if getattr(fields_check[field], 'always_embed', False) and field not in embeds:
embeds.append(unicode(field))
if getattr(fields_check[field], 'never_embed', False) and field in embeds:
embeds.remove(field)
embeds_partials = {}
for embed in embeds:
embed_field = fields_check.get(embed)
embeds_partials[embed] = self._get_embed_partial(embed, embed_field)
context.update({
'enable_esi': (
utils.is_truthy(self.request.query_params.get('esi', django_settings.ENABLE_ESI)) and
self.request.accepted_renderer.media_type in django_settings.ESI_MEDIA_TYPES
),
'embed': embeds_partials,
'envelope': self.request.query_params.get('envelope', 'data'),
})
return context
class LinkedNodesRelationship(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, generics.CreateAPIView):
""" Relationship Endpoint for Linked Node relationships
Used to set, remove, update and retrieve the ids of the linked nodes attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the nodes requested. Data can contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection and for the user that is
making the request to be able to read the nodes requested. Data can contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = LinkedNodesRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
def get_object(self):
object = self.get_node(check_object_permissions=False)
auth = utils.get_user_auth(self.request)
obj = {
'data': [
pointer for pointer in
object.linked_nodes.filter(is_deleted=False, type='osf.node')
if pointer.can_view(auth)
], 'self': object,
}
self.check_object_permissions(self.request, obj)
return obj
def perform_destroy(self, instance):
data = self.request.data['data']
auth = utils.get_user_auth(self.request)
current_pointers = {pointer._id: pointer for pointer in instance['data']}
collection = instance['self']
for val in data:
if val['id'] in current_pointers:
collection.rm_pointer(current_pointers[val['id']], auth)
def create(self, *args, **kwargs):
try:
ret = super(LinkedNodesRelationship, self).create(*args, **kwargs)
except RelationshipPostMakesNoChanges:
return Response(status=status.HTTP_204_NO_CONTENT)
return ret
class LinkedRegistrationsRelationship(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, generics.CreateAPIView):
""" Relationship Endpoint for Linked Registrations relationships
Used to set, remove, update and retrieve the ids of the linked registrations attached to this collection. For each id, there
exists a node link that contains that registration.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = LinkedRegistrationsRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
def get_object(self):
object = self.get_node(check_object_permissions=False)
auth = utils.get_user_auth(self.request)
obj = {
'data': [
pointer for pointer in
object.linked_nodes.filter(is_deleted=False, type='osf.registration')
if pointer.can_view(auth)
], 'self': object,
}
self.check_object_permissions(self.request, obj)
return obj
def perform_destroy(self, instance):
data = self.request.data['data']
auth = utils.get_user_auth(self.request)
current_pointers = {pointer._id: pointer for pointer in instance['data']}
collection = instance['self']
for val in data:
if val['id'] in current_pointers:
collection.rm_pointer(current_pointers[val['id']], auth)
else:
raise NotFound(detail='Pointer with id "{}" not found in pointers list'.format(val['id'], collection))
def create(self, *args, **kwargs):
try:
ret = super(LinkedRegistrationsRelationship, self).create(*args, **kwargs)
except RelationshipPostMakesNoChanges:
return Response(status=status.HTTP_204_NO_CONTENT)
return ret
@api_view(('GET',))
@throttle_classes([RootAnonThrottle, UserRateThrottle])
def root(request, format=None, **kwargs):
"""
The documentation for the Open Science Framework API can be found at [developer.osf.io](https://developer.osf.io).
The contents of this endpoint are variable and subject to change without notification.
"""
if request.user and not request.user.is_anonymous:
user = request.user
current_user = UserSerializer(user, context={'request': request}).data
else:
current_user = None
flags = [name for name in Flag.objects.values_list('name', flat=True) if flag_is_active(request, name)]
samples = [name for name in Sample.objects.values_list('name', flat=True) if sample_is_active(name)]
switches = list(Switch.objects.filter(active=True).values_list('name', flat=True))
kwargs = request.parser_context['kwargs']
return_val = {
'meta': {
'message': 'Welcome to the OSF API.',
'version': request.version,
'current_user': current_user,
'active_flags': flags + samples + switches,
},
'links': {
'nodes': utils.absolute_reverse('nodes:node-list', kwargs=kwargs),
'users': utils.absolute_reverse('users:user-list', kwargs=kwargs),
'collections': utils.absolute_reverse('collections:collection-list', kwargs=kwargs),
'registrations': utils.absolute_reverse('registrations:registration-list', kwargs=kwargs),
'institutions': utils.absolute_reverse('institutions:institution-list', kwargs=kwargs),
'licenses': utils.absolute_reverse('licenses:license-list', kwargs=kwargs),
'schemas': utils.absolute_reverse('schemas:registration-schema-list', kwargs=kwargs),
'addons': utils.absolute_reverse('addons:addon-list', kwargs=kwargs),
},
}
if utils.has_admin_scope(request):
return_val['meta']['admin'] = True
return Response(return_val)
@api_view(('GET',))
@throttle_classes([RootAnonThrottle, UserRateThrottle])
def status_check(request, format=None, **kwargs):
maintenance = MaintenanceState.objects.all().first()
return Response({
'maintenance': MaintenanceStateSerializer(maintenance).data if maintenance else None,
})
def error_404(request, format=None, *args, **kwargs):
return JsonResponse(
{'errors': [{'detail': 'Not found.'}]},
status=404,
content_type='application/vnd.api+json; application/json',
)
class BaseContributorDetail(JSONAPIBaseView, generics.RetrieveAPIView):
# overrides RetrieveAPIView
def get_object(self):
node = self.get_node()
user = self.get_user()
# May raise a permission denied
self.check_object_permissions(self.request, user)
try:
return node.contributor_set.get(user=user)
except Contributor.DoesNotExist:
raise NotFound('{} cannot be found in the list of contributors.'.format(user))
class BaseContributorList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin):
ordering = ('-modified',)
def get_default_queryset(self):
node = self.get_node()
return node.contributor_set.all().include('user__guids')
def get_queryset(self):
queryset = self.get_queryset_from_request()
# If bulk request, queryset only contains contributors in request
if is_bulk_request(self.request):
contrib_ids = []
for item in self.request.data:
try:
contrib_ids.append(item['id'].split('-')[1])
except AttributeError:
raise ValidationError('Contributor identifier not provided.')
except IndexError:
raise ValidationError('Contributor identifier incorrectly formatted.')
queryset[:] = [contrib for contrib in queryset if contrib._id in contrib_ids]
return queryset
class BaseNodeLinksDetail(JSONAPIBaseView, generics.RetrieveAPIView):
pass
class BaseNodeLinksList(JSONAPIBaseView, generics.ListAPIView):
ordering = ('-modified',)
def get_queryset(self):
auth = get_user_auth(self.request)
query = self.get_node()\
.node_relations.select_related('child')\
.filter(is_node_link=True, child__is_deleted=False)\
.exclude(child__type='osf.collection')
return sorted(
[
node_link for node_link in query
if node_link.child.can_view(auth) and not node_link.child.is_retracted
], key=lambda node_link: node_link.child.modified, reverse=True,
)
class BaseLinkedList(JSONAPIBaseView, generics.ListAPIView):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NULL]
# subclass must set
serializer_class = None
view_category = None
view_name = None
ordering = ('-modified',)
# TODO: This class no longer exists
# model_class = Pointer
def get_queryset(self):
auth = get_user_auth(self.request)
return (
self.get_node().linked_nodes
.filter(is_deleted=False)
.annotate(region=F('addons_osfstorage_node_settings__region___id'))
.exclude(region=None)
.exclude(type='osf.collection', region=None)
.can_view(user=auth.user, private_link=auth.private_link)
.order_by('-modified')
)
class WaterButlerMixin(object):
path_lookup_url_kwarg = 'path'
provider_lookup_url_kwarg = 'provider'
def bulk_get_file_nodes_from_wb_resp(self, files_list):
"""Takes a list of file data from wb response, touches/updates metadata for each, and returns list of file objects.
This function mirrors all the actions of get_file_node_from_wb_resp except the create and updates are done in bulk.
The bulk_update and bulk_create do not call the base class update and create so the actions of those functions are
done here where needed
"""
node = self.get_node(check_object_permissions=False)
content_type = ContentType.objects.get_for_model(node)
objs_to_create = defaultdict(lambda: [])
file_objs = []
for item in files_list:
attrs = item['attributes']
base_class = BaseFileNode.resolve_class(
attrs['provider'],
BaseFileNode.FOLDER if attrs['kind'] == 'folder'
else BaseFileNode.FILE,
)
# mirrors BaseFileNode get_or_create
try:
file_obj = base_class.objects.get(target_object_id=node.id, target_content_type=content_type, _path='/' + attrs['path'].lstrip('/'))
except base_class.DoesNotExist:
# create method on BaseFileNode appends provider, bulk_create bypasses this step so it is added here
file_obj = base_class(target=node, _path='/' + attrs['path'].lstrip('/'), provider=base_class._provider)
objs_to_create[base_class].append(file_obj)
else:
file_objs.append(file_obj)
file_obj.update(None, attrs, user=self.request.user, save=False)
bulk_update(file_objs)
for base_class in objs_to_create:
base_class.objects.bulk_create(objs_to_create[base_class])
file_objs += objs_to_create[base_class]
return file_objs
def get_file_node_from_wb_resp(self, item):
"""Takes file data from wb response, touches/updates metadata for it, and returns file object"""
attrs = item['attributes']
file_node = BaseFileNode.resolve_class(
attrs['provider'],
BaseFileNode.FOLDER if attrs['kind'] == 'folder'
else BaseFileNode.FILE,
).get_or_create(self.get_node(check_object_permissions=False), attrs['path'])
file_node.update(None, attrs, user=self.request.user)
return file_node
def fetch_from_waterbutler(self):
node = self.get_resource(check_object_permissions=False)
path = self.kwargs[self.path_lookup_url_kwarg]
provider = self.kwargs[self.provider_lookup_url_kwarg]
return self.get_file_object(node, path, provider)
def get_resource(self, check_object_permissions):
"""
Overwrite on view if your file is not on a node.
"""
return self.get_node(check_object_permissions=check_object_permissions)
def get_file_object(self, target, path, provider, check_object_permissions=True):
obj = get_file_object(target=target, path=path, provider=provider, request=self.request)
if provider == 'osfstorage':
if check_object_permissions:
self.check_object_permissions(self.request, obj)
return obj
class DeprecatedView(JSONAPIBaseView):
""" Mixin for deprecating old views
Subclasses must define `max_version`
"""
@property
def max_version(self):
raise NotImplementedError()
def __init__(self, *args, **kwargs):
super(DeprecatedView, self).__init__(*args, **kwargs)
self.is_deprecated = False
def determine_version(self, request, *args, **kwargs):
version, scheme = super(DeprecatedView, self).determine_version(request, *args, **kwargs)
if StrictVersion(version) > StrictVersion(self.max_version):
self.is_deprecated = True
raise NotFound(detail='This route has been deprecated. It was last available in version {}'.format(self.max_version))
return version, scheme
def finalize_response(self, request, response, *args, **kwargs):
response = super(DeprecatedView, self).finalize_response(request, response, *args, **kwargs)
if self.is_deprecated:
# Already has the error message
return response
if response.status_code == 204:
response.status_code = 200
response.data = {}
deprecation_warning = 'This route is deprecated and will be unavailable after version {}'.format(self.max_version)
if response.data.get('meta', False):
if response.data['meta'].get('warnings', False):
response.data['meta']['warnings'].append(deprecation_warning)
else:
response.data['meta']['warnings'] = [deprecation_warning]
else:
response.data['meta'] = {'warnings': [deprecation_warning]}
return response
| 40.903904 | 158 | 0.640261 |
4a24df3ef2fd23b8c38ecd3babff675841817fe5 | 73 | py | Python | cfp/context_processors.py | JulienPalard/PonyConf | e462fb4bc42a2e7ade4dd230d928b0cecc05fecb | [
"Apache-2.0"
] | 11 | 2016-06-15T12:05:18.000Z | 2017-08-02T14:12:41.000Z | cfp/context_processors.py | JulienPalard/PonyConf | e462fb4bc42a2e7ade4dd230d928b0cecc05fecb | [
"Apache-2.0"
] | 110 | 2016-07-06T20:04:57.000Z | 2017-12-01T20:51:52.000Z | cfp/context_processors.py | JulienPalard/PonyConf | e462fb4bc42a2e7ade4dd230d928b0cecc05fecb | [
"Apache-2.0"
] | 10 | 2016-08-28T14:13:35.000Z | 2017-06-08T07:27:29.000Z |
def conference(request):
return {'conference': request.conference}
| 14.6 | 45 | 0.726027 |
4a24e040983c016ade11b4a8e3bf53a39de60d08 | 403 | py | Python | thefuck/rules/history.py | frankhli843/thedarn | 9e00f854c248156fba820f39b2834e8273583984 | [
"MIT"
] | null | null | null | thefuck/rules/history.py | frankhli843/thedarn | 9e00f854c248156fba820f39b2834e8273583984 | [
"MIT"
] | null | null | null | thefuck/rules/history.py | frankhli843/thedarn | 9e00f854c248156fba820f39b2834e8273583984 | [
"MIT"
] | null | null | null | from thedarn.utils import get_close_matches, get_closest, \
get_valid_history_without_current
def match(command):
return len(get_close_matches(command.script,
get_valid_history_without_current(command)))
def get_new_command(command):
return get_closest(command.script,
get_valid_history_without_current(command))
priority = 9999
| 25.1875 | 77 | 0.704715 |
4a24e16f24474e2c0f45f0333b645ab1772c6a17 | 8,261 | py | Python | egs/public_dataset/kiritan/local/prepare_data.py | bobwzy/SVS_system | 5fd711edd02d102bfebafe8a8863fba3321cdecc | [
"Apache-2.0"
] | null | null | null | egs/public_dataset/kiritan/local/prepare_data.py | bobwzy/SVS_system | 5fd711edd02d102bfebafe8a8863fba3321cdecc | [
"Apache-2.0"
] | null | null | null | egs/public_dataset/kiritan/local/prepare_data.py | bobwzy/SVS_system | 5fd711edd02d102bfebafe8a8863fba3321cdecc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2020 The Johns Hopkins University (author: Jiatong Shi)
import argparse
import librosa
import numpy as np
import os
import pyworld as pw
import soundfile as sf
def pack_zero(number, length=4):
number = str(number)
return "0" * (length - len(number)) + number
def same_split(alignment):
size = 2
while len(alignment) / size > 330:
size += 1
segments = []
start = 0
for i in range(size - 1):
index = round(len(alignment) / size) * (i + 1)
while index < len(alignment) and alignment[index] != alignment[index + 1]:
index += 1
segments.append(alignment[start:index])
start = index + 1
segments.append(alignment[start:])
return segments, size
def make_segment(alignment, sil="pau"):
segment_info = {}
start_id = 1
silence_start = []
silence_end = []
for i in range(len(alignment)):
if len(silence_start) == len(silence_end) and alignment[i] == sil:
silence_start.append(i)
elif len(silence_start) != len(silence_end) and alignment[i] != sil:
silence_end.append(i)
else:
continue
if len(silence_start) != len(silence_end):
silence_end.append(len(alignment) - 1)
if silence_start[0] != 0:
if silence_end[0] - silence_start[0] > 5:
segment_info[pack_zero(start_id)] = {
"alignment": alignment[: silence_start[0] + 5],
"start": 0,
}
else:
segment_info[pack_zero(start_id)] = {
"alignment": alignment[: silence_end[0]],
"start": 0,
}
start_id += 1
for i in range(len(silence_start) - 1):
if silence_end[i] - silence_start[i] > 5:
start = silence_end[i] - 5
else:
start = silence_start[i]
if silence_end[i + 1] - silence_start[i + 1] > 5:
end = silence_start[i + 1] + 5
else:
end = silence_end[i + 1]
if end - start > 450:
segments, size = same_split(alignment[start:end])
pre_size = 0
for i in range(size):
segment_info[pack_zero(start_id)] = {
"alignment": segments[i],
"start": start + pre_size,
}
start_id += 1
pre_size += len(segments[i])
continue
segment_info[pack_zero(start_id)] = {
"alignment": alignment[start:end],
"start": start,
}
start_id += 1
if silence_end[-1] != len(alignment) - 1:
if silence_end[-1] - silence_start[-1] > 5:
segment_info[pack_zero(start_id)] = {
"alignment": alignment[silence_end[-1] - 5 :],
"start": silence_end[-1] - 5,
}
else:
segment_info[pack_zero(start_id)] = {
"alignment": alignment[silence_start[-1] :],
"start": silence_start[-1],
}
return segment_info
def load_label(label_file, s_type="s", sr=48000, frame_shift=0.03, sil="pau"):
label_data = open(label_file, "r")
label_data = label_data.read().split("\n")
quantized_align = []
for label in label_data:
label = label.split(" ")
if len(label) < 3:
continue
if s_type == "s":
length = (float(label[1]) - float(label[0])) / frame_shift
else:
length = (float(label[1]) - float(label[0])) / (frame_shift * 10e7)
quantized_align.extend([label[-1]] * round(length))
segment = make_segment(quantized_align, sil=sil)
return segment, list(set(quantized_align))
def process(args):
f0_max = 1100.0
f0_min = 50.0
frame_shift = args.shift_size / 1000
hop_length = int(args.sr * frame_shift)
lab_list = os.listdir(args.labdir)
phone_set = []
idscp = {}
index = 1
for lab in lab_list:
lab_id = lab[:-4]
idscp[lab_id] = index
segments, phone = load_label(
os.path.join(args.labdir, lab),
s_type=args.label_type,
sr=args.sr,
frame_shift=frame_shift,
sil=args.sil,
)
for p in phone:
if p not in phone_set:
phone_set.append(p)
wav_path = os.path.join(args.wavdir, lab_id + "." + args.wav_extention)
if args.wav_extention == "raw":
signal, osr = sf.read(
wav_path,
subtype="PCM_16",
channels=1,
samplerate=args.sr,
endian="LITTLE",
)
else:
signal, osr = librosa.load(wav_path, sr=None)
if osr != args.sr:
signal = librosa.resample(signal, osr, args.sr)
song_align = os.path.join(args.outdir, "alignment")
song_wav = os.path.join(args.outdir, "wav_info", str(index))
song_pitch_beat = os.path.join(args.outdir, "pitch_beat_extraction", str(index))
if not os.path.exists(song_align):
os.makedirs(song_align)
if not os.path.exists(song_wav):
os.makedirs(song_wav)
if not os.path.exists(song_pitch_beat):
os.makedirs(song_pitch_beat)
print("processing {}".format(song_wav))
for seg in segments.keys():
alignment = segments[seg]["alignment"]
start = segments[seg]["start"]
name = seg
seg_signal = signal[
int(start * hop_length) : int(
start * hop_length + len(alignment) * hop_length
)
]
"""extract beats"""
tempo, beats = librosa.beat.beat_track(
y=seg_signal, sr=args.sr, hop_length=hop_length
)
# times = librosa.frames_to_time(beats, sr=args.sr)
# frames = librosa.time_to_frames(
# times, sr=args.sr, hop_length=hop_length, n_fft=n_fft
# )
np.save(os.path.join(song_pitch_beat, name) + "_beats", np.array(beats))
"""extract pitch"""
seg_signal = seg_signal.astype("double")
_f0, t = pw.harvest(
seg_signal,
args.sr,
f0_floor=f0_min,
f0_ceil=f0_max,
frame_period=frame_shift * 1000,
)
_f0 = pw.stonemask(seg_signal, _f0, t, args.sr)
np.save(os.path.join(song_pitch_beat, name) + "_pitch", np.array(_f0))
alignment_id = np.zeros((len(alignment)))
for i in range(len(alignment)):
alignment_id[i] = phone_set.index(alignment[i])
np.save(
os.path.join(song_align, pack_zero(index) + name),
np.array(alignment_id),
)
sf.write(
os.path.join(song_wav, name) + ".wav", seg_signal, samplerate=args.sr
)
print("saved {}".format(os.path.join(song_wav, name) + ".wav"))
index += 1
with open(os.path.join(args.outdir, "phone_set.txt"), "w") as f:
for p_id, p in enumerate(phone_set):
f.write(str(p_id) + " " + p)
f.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("wavdir", type=str, help="wav data directory")
parser.add_argument("labdir", type=str, help="label data directory")
parser.add_argument("outdir", type=str, help="output directory")
parser.add_argument(
"--window_size", type=int, default=50, help="window size in miliseconds"
)
parser.add_argument(
"--shift_size", type=float, default=12.5, help="shift size in miliseconds"
)
parser.add_argument("--sr", type=int, default=22050)
parser.add_argument("--sil", type=str, default="pau")
parser.add_argument(
"--label_type",
type=str,
default="s",
help="label resolution - sample based or second based",
)
parser.add_argument("--label_extention", type=str, default=".txt")
parser.add_argument("--wav_extention", type=str, default="wav")
args = parser.parse_args()
process(args)
| 32.912351 | 88 | 0.546544 |
4a24e24dafeb1c05c09171c774684841dfa7227f | 4,858 | py | Python | test/functional/p2p_node_network_limited.py | lihuanghai/bitcoin | 624da15f8c55219f4ca3e0877a17799990299504 | [
"MIT"
] | 20 | 2019-04-03T06:30:39.000Z | 2019-11-07T08:57:50.000Z | test/functional/p2p_node_network_limited.py | lihuanghai/bitcoin | 624da15f8c55219f4ca3e0877a17799990299504 | [
"MIT"
] | 1 | 2017-01-08T20:32:43.000Z | 2017-01-08T20:32:43.000Z | test/functional/p2p_node_network_limited.py | lihuanghai/bitcoin | 624da15f8c55219f4ca3e0877a17799990299504 | [
"MIT"
] | 1 | 2019-09-02T00:49:46.000Z | 2019-09-02T00:49:46.000Z | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack
from test_framework.mininode import NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS, P2PInterface, wait_until, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes_bi, sync_blocks
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
super(NodeNetworkLimitedTest, self).setup_network()
self.disconnect_all()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node.wait_for_verack()
expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generate(292)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.wait_for_verack()
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1036)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at heigh 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
sync_blocks(self.nodes)
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generate(10)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
| 41.521368 | 123 | 0.692466 |
4a24e361bbb2c0427e637a240b5a89a02711895c | 2,067 | py | Python | mindspore/ops/_op_impl/tbe/bias_add_grad.py | TommyLike/mindspore | 401dabb786a9097d6dd84f391657d266b04e9a37 | [
"Apache-2.0"
] | 1 | 2020-05-23T07:08:46.000Z | 2020-05-23T07:08:46.000Z | mindspore/ops/_op_impl/tbe/bias_add_grad.py | liyong126/mindspore | 930a1fb0a8fa9432025442c4f4732058bb7af592 | [
"Apache-2.0"
] | 7 | 2020-03-30T08:31:56.000Z | 2020-04-01T09:54:39.000Z | mindspore/ops/_op_impl/tbe/bias_add_grad.py | liyong126/mindspore | 930a1fb0a8fa9432025442c4f4732058bb7af592 | [
"Apache-2.0"
] | 1 | 2020-03-30T17:07:43.000Z | 2020-03-30T17:07:43.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""BiasAddGrad op"""
from mindspore.ops.op_info_register import op_info_register
@op_info_register("""{
"op_name": "BiasAddGrad",
"imply_type": "TBE",
"fusion_type": "COMMREDUCE",
"async_flag": false,
"binfile_name": "biasaddgrad.so",
"compute_cost": 10,
"kernel_name": "biasaddgrad",
"partial_flag": true,
"attr": [
{
"name": "data_format",
"param_type": "required",
"type": "str",
"value": "all"
}
],
"inputs": [
{
"index": 0,
"dtype": [
"float16","float16","float","float"
],
"format": [
"FRACTAL_NZ","DefaultFormat","FRACTAL_NZ","DefaultFormat"
],
"name": "out_backprop",
"need_compile": false,
"param_type": "required",
"shape": "all"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float16","float16","float","float"
],
"format": [
"DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat"
],
"name": "output",
"need_compile": false,
"param_type": "required",
"shape": "all"
}
]
}""")
def _bias_add_grad_tbe():
"""BiasAddGrad TBE register"""
return
| 29.112676 | 79 | 0.524915 |
4a24e4c732fe1b524b09a8afd6d17bf495fb3543 | 4,326 | py | Python | BESI_LOGGING_R/pebble_connect.py | nh4ar/besi-relay-station | 71093a566aee6c0847f3b6c0f1c88cf3429f292a | [
"MIT"
] | null | null | null | BESI_LOGGING_R/pebble_connect.py | nh4ar/besi-relay-station | 71093a566aee6c0847f3b6c0f1c88cf3429f292a | [
"MIT"
] | null | null | null | BESI_LOGGING_R/pebble_connect.py | nh4ar/besi-relay-station | 71093a566aee6c0847f3b6c0f1c88cf3429f292a | [
"MIT"
] | null | null | null | from libpebble2.communication import PebbleConnection
import logging
from libpebble2.communication.transports.serial import SerialTransport as ST
import libpebble2.exceptions
from libpebble2.protocol import *
from libpebble2.services.appmessage import AppMessageService, CString, Uint8
from libpebble2.services.data_logging import DataLoggingService
from time import sleep
import subprocess
import sys
import redis
import os
from serial.serialutil import SerialException
import argparse
import uuid
import time
import rNTPTime
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("pebble_id", type=int)
parser.add_argument("streaming_port", type=str)
args = parser.parse_args()
#magic number for pebble app
SESSION_TAG = 0x54
running = True
#if pebble.connect=True but data=None, will count this up to detect charging status
charging_status = 0;
#redis_ip = os.environ["REDIS_IP"]
#relay_id = os.environ["RELAY_ID"]
#r = redis.StrictRedis(host=redis_ip, port=6379, db=0)
#for app restart
APP_UUID = "16ab285518a942f8be2c8e224691092a"
def restart_app_on_watch(pebble,appUUID):
current_app_uuid = pebble.send_and_read(AppRunState(data=AppRunStateRequest()), AppRunState).data.uuid
# Re-start the watchapp
pebble.send_packet(AppRunState(command = 0x01, data=AppRunStateStop(uuid = uuid.UUID(appUUID))))
print("Restart Pebble App!")
time.sleep(1)
pebble.send_packet(AppRunState(command = 0x01, data=AppRunStateStart(uuid = uuid.UUID(appUUID))))
time.sleep(1)
#print(current_app_uuid)
if current_app_uuid != uuid.UUID(appUUID):
# Re-start the watchapp
pebble.send_packet(AppRunState(command = 0x01, data=AppRunStateStop(uuid = uuid.UUID(appUUID))))
print("Pebble App Closed!")
time.sleep(5)
pebble.send_packet(AppRunState(command = 0x01, data=AppRunStateStart(uuid = uuid.UUID(appUUID))))
time.sleep(2)
return
def readConfigFile():
# get BS IP and RS port # from config file
configFileName = r'/root/besi-relay-station/BESI_LOGGING_R/config'
fconfig = open(configFileName)
for line in fconfig:
if line[0] == "#":
pass
else:
splitLine = line.split("=")
try:
if splitLine[0] == "BaseStation_IP":
BaseStation_IP2 = str(splitLine[1]).rstrip()
except:
print "Error reading IP Address"
try:
if splitLine[0] == "relayStation_ID":
relayStation_ID2 = int(splitLine[1])
except:
print "Error reading Port"
default_settings = ''
fconfig.close()
return BaseStation_IP2, relayStation_ID2
def check_charging(address):
message = []
message.append("Charging")
message.append(str("-1"))
temp = rNTPTime.sendUpdate(address, message, 5)
return
def get_id(sessions):
for session in sessions:
if session['log_tag'] == SESSION_TAG:
infomsg = "FOUND ID " + str(session['session_id'])
logging.info(infomsg)
return session['session_id']
return -1
logging.info("Starting pebble connection")
#get info from config file
hostIP, BASE_PORT = readConfigFile()
server_address = (hostIP, BASE_PORT)
pebble = PebbleConnection(ST("/dev/rfcomm0"), log_packet_level=logging.DEBUG)
pebble.connect()
pebble.pump_reader()
try:
while running:
try:
logging.info("Attempting to connect to pebble")
pebble.run_async()
logging.info("Pebble connection success")
#restart app on watch
appUUID = APP_UUID[:]
restart_app_on_watch(pebble,appUUID)
break
except libpebble2.exceptions.TimeoutError:
logging.info("Pebble timeouted, retrying..")
continue
while pebble.connected:
data_srv = DataLoggingService(pebble,5000)
data_srv.set_send_enable(True)
logging.info("Looking for target session")
# Update target session id
target_session_id = get_id(data_srv.list())
# if we could not find it retry
while target_session_id == -1:
logging.info("target session not found")
sleep(3)
target_session_id = get_id(data_srv.list())
# start the data stream. If this returns then the stream has stopped
(session_info, data) = data_srv.download(target_session_id)
# logging.info("info="+str(session_info))
# logging.info("data="+str(data))
if (str(data)=='None'):
check_charging(server_address)
logging.info("stream closed")
sleep(1)
except SerialException:
print("Pebble disconnected unexpectedly")
#pebble.close()
exit(2)
| 27.909677 | 103 | 0.746879 |
Subsets and Splits