content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def eiffel_artifact_created_event():
"""Eiffel artifact created event."""
return {
"meta": {
"id": "7c2b6c13-8dea-4c99-a337-0490269c374d",
"time": 1575981274307,
"type": "EiffelArtifactCreatedEvent",
"version": "3.0.0",
},
"links": [],
"data": {"identity": "pkg:artifact/created/[email protected]"},
} | 0ef2e5adadb58b92c94bac42c9880728573b159e | 8,000 |
def _hash(input_data, initVal=0):
"""
hash() -- hash a variable-length key into a 32-bit value
k : the key (the unaligned variable-length array of bytes)
len : the length of the key, counting by bytes
level : can be any 4-byte value
Returns a 32-bit value. Every bit of the key affects every bit of
the return value. Every 1-bit and 2-bit delta achieves avalanche.
About 36+6len instructions.
The best hash table sizes are powers of 2. There is no need to do
mod a prime (mod is so slow!). If you need less than 32 bits,
use a bitmask. For example, if you need only 10 bits, do
h = (h & hashmask(10));
In which case, the hash table should have hashsize(10) elements.
If you are hashing n strings (ub1 **)k, do it like this:
for (i=0, h=0; i<n; ++i) h = hash( k[i], len[i], h);
By Bob Jenkins, 1996. [email protected]. You may use this
code any way you wish, private, educational, or commercial. It's free.
See http://burtleburtle.net/bob/hash/evahash.html
Use for hash table lookup, or anything where one collision in 2^32 is
acceptable. Do NOT use for cryptographic purposes.
"""
data = bytes(input_data, encoding='ascii')
len_pos = len(data)
length = len(data)
if length == 0:
return 0
a = 0x9e3779b9
b = 0x9e3779b9
c = initVal
p = 0
while len_pos >= 12:
a += ((data[p + 0]) + ((data[p + 1]) << 8) + ((data[p + 2]) << 16) + ((data[p + 3]) << 24))
b += ((data[p + 4]) + ((data[p + 5]) << 8) + ((data[p + 6]) << 16) + ((data[p + 7]) << 24))
c += ((data[p + 8]) + ((data[p + 9]) << 8) + ((data[p + 10]) << 16) + ((data[p + 11]) << 24))
q = _mix(a, b, c)
a = q[0]
b = q[1]
c = q[2]
p += 12
len_pos -= 12
c += length
if len_pos >= 11:
c += (data[p + 10]) << 24
if len_pos >= 10:
c += (data[p + 9]) << 16
if len_pos >= 9:
c += (data[p + 8]) << 8
# the first byte of c is reserved for the length
if len_pos >= 8:
b += (data[p + 7]) << 24
if len_pos >= 7:
b += (data[p + 6]) << 16
if len_pos >= 6:
b += (data[p + 5]) << 8
if len_pos >= 5:
b += (data[p + 4])
if len_pos >= 4:
a += (data[p + 3]) << 24
if len_pos >= 3:
a += (data[p + 2]) << 16
if len_pos >= 2:
a += (data[p + 1]) << 8
if len_pos >= 1:
a += (data[p + 0])
q = _mix(a, b, c)
a = q[0]
b = q[1]
c = q[2]
return rshift_zero_padded(c, 0) | c4f1b0ee22ca940d360090b965125e71c272ad4c | 8,001 |
import logging
def read_config_option(key, expected_type=None, default_value=None):
"""Read the specified value from the configuration file.
Args:
key: the name of the key to read from the config file.
expected_type: read the config option as the specified type (if specified)
default_value: if the key doesn't exist, just return the default value.
If the default value is not specified, the function will throw whatever
error was raised by the configuration parser
"""
logging.info("Reading config option {} with expected type {}".format(key, expected_type))
try:
if not expected_type:
value = conf_parser.get("Settings", key)
if key is "password":
logging.info("Got configuration for key {}: ****".format(key))
else:
logging.info("Got configuration for key {}: {}".format(key, value))
return conf_parser.get("Settings", key)
elif expected_type is bool:
return conf_parser.getboolean("Settings", key)
except (ValueError, NoOptionError) as e:
if default_value:
return default_value
else:
raise | effc94b89dd8b1e0765c71bd4c0c03760715db1d | 8,002 |
def simple_password(request):
"""
Checks a password
"""
if request.method == "POST":
form = PasswordForm(data=request.POST)
if form.is_valid():
# TODO: set session with better param
request.session["simple_auth"] = True
return redirect(form.cleaned_data["url"] or "/")
else:
form = PasswordForm()
return render(request, "simple_auth/password_form.html",
{"form": form}) | 4a70bb5578a528ed1646ce028e1db76e77ef7d91 | 8,003 |
from typing import Any
import logging
def removeKeys(array: dict = None, remove: Any = None) -> dict:
"""
Removes keys from array by given remove value.
:param array: dict[Any: Any]
:param remove: Any
:return:
- sorted_dict - dict[Any: Any]
"""
if remove is None:
remove = []
try:
sorted_dict = {}
for item_key in array:
if array[item_key] != remove:
sorted_dict[item_key] = array[item_key]
return sorted_dict
except Exception as e:
logging.exception(e) | 1b98821000642c79fbb71a9c0dc7163c4a95fa26 | 8,004 |
from trimesh.path.creation import box_outline
from trimesh.path.util import concatenate
def affine2boxmesh(affines):
"""
:param affines: (n_parts, 6), range (0, 1)
:return:
"""
n_parts = len(affines)
colors = [[0, 0, 255, 255], # blue
[0, 255, 0, 255], # green
[255, 0, 0, 255], # red
[255, 255, 0, 255], # yellow
[0, 255, 255, 255], # cyan
[255, 0, 255, 255], # Magenta
[160, 32, 240, 255], # purple
[255, 255, 240, 255]] # ivory
shape_box = []
for idx in range(n_parts):
part_trans = affines[idx, :3]
part_size = affines[idx, 3:]
trans_mat = np.eye(4)
# translate to center of axis aligned bounds
trans_mat[:3, 3] = part_trans
part_box = box_outline(transform=trans_mat,
extents=part_size
)
shape_box.append(part_box)
shape_box = concatenate(shape_box)
return shape_box | 3d6568e6e533bdb31cdceb244666f376d73dad1e | 8,005 |
def _select_index_code(code):
"""
1 - sh
0 - sz
"""
code = str(code)
if code[0] == '3':
return 0
return 1 | 697d8e5ca1744c897b7eebbb7b9b0a3b45faec3d | 8,006 |
def get_install_agent_cmd():
"""Get OS specific command to install Telegraf agent."""
agent_pkg_deb = "https://packagecloud.io/install/repositories/" \
"wavefront/telegraf/script.deb.sh"
agent_pkg_rpm = "https://packagecloud.io/install/repositories/" \
"wavefront/telegraf/script.rpm.sh"
dist = system.check_os()
cmd = None
if not dist:
print("Error: Unsupported OS version. Please contact"
" [email protected].")
return cmd
if dist.strip().startswith(("Oracle Linux Server", "Fedora",
"Amazon Linux", "CentOS",
"Red Hat Enterprise Linux")):
cmd = "curl -s %s | bash" % (agent_pkg_rpm)
cmd += " && yum -y -q install telegraf"
elif dist.strip().startswith("Ubuntu"):
cmd = "curl -s %s | bash" % (agent_pkg_deb)
cmd += ' && apt-get -y -qq -o Dpkg::Options::="--force-confold"' \
' install telegraf'
elif dist.strip().lower().startswith("debian"):
cmd = "curl -s %s | bash" % (agent_pkg_deb)
cmd += ' && apt-get -o Dpkg::Options::="--force-confnew"' \
' -y install telegraf'
elif dist.strip().startswith(("openSUSE", "SUSE Linux Enterprise Server",
"SLES")):
cmd = "curl -s %s | bash" % (agent_pkg_rpm)
cmd += ' && zypper install telegraf'
else:
message.print_warn("Error: Unsupported OS version: %s." % (dist))
return cmd | 14373024b3b6046badcedf686a38423f126f02a2 | 8,007 |
from typing import Tuple
from typing import DefaultDict
import collections
def unpack(manifests: LocalManifestLists) -> Tuple[ServerManifests, bool]:
"""Convert `manifests` to `ServerManifests` for internal processing.
Returns `False` unless all resources in `manifests` are unique. For
instance, returns False if two files define the same namespace or the same
deployment.
The primary use case is to convert the manifests we read from local files
into the format Square uses internally for the server manifests as well.
Inputs:
manifests: LocalManifestLists
Returns:
ServerManifests: flattened version of `manifests`.
"""
# Compile a dict that shows which meta manifest was defined in which file.
# We will shortly use this information to determine if all resources were
# defined exactly once across all files.
all_meta: DefaultDict[MetaManifest, list] = collections.defaultdict(list)
for fname in manifests:
for meta, _ in manifests[fname]:
all_meta[meta].append(fname)
# Find out if all meta manifests were unique. If not, log the culprits and
# return with an error.
unique = True
for meta, fnames in all_meta.items():
if len(fnames) > 1:
unique = False
tmp = [str(_) for _ in fnames]
logit.error(
f"Duplicate ({len(tmp)}x) manifest {meta}. "
f"Defined in {str.join(', ', tmp)}"
)
if not unique:
return ({}, True)
# Compile the input manifests into a new dict with the meta manifest as key.
out = {k: v for fname in manifests for k, v in manifests[fname]}
return (out, False) | b7f3c3f1388b9d3791a18f2da97dd40cf131ecaa | 8,008 |
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the switch from config."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config[CONF_HOST]
token = config[CONF_TOKEN]
name = config[CONF_NAME]
model = config.get(CONF_MODEL)
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
devices = []
unique_id = None
if model is None:
try:
miio_device = Device(host, token)
device_info = await hass.async_add_executor_job(miio_device.info)
model = device_info.model
unique_id = f"{model}-{device_info.mac_address}"
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
except DeviceException:
raise PlatformNotReady
if model in ["090615.switch.switch01"]:
plug = PtxSwitch(host, token, model=model)
device = XiaomiPTXSwitch(name, plug, model, unique_id, 1)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model in ["090615.switch.switch02"]:
plug = PtxSwitch(host, token, model=model)
device = XiaomiPTXSwitch(name, plug, model, unique_id, 1)
devices.append(device)
hass.data[DATA_KEY][host] = device
plug2 = PtxSwitch(host, token, model=model)
device2 = XiaomiPTXSwitch(name, plug2, model, unique_id, 2)
devices.append(device2)
hass.data[DATA_KEY][host] = device2
elif model in ["090615.switch.switch03"]:
plug = PtxSwitch(host, token, model=model)
device = XiaomiPTXSwitch(name, plug, model, unique_id, 1)
devices.append(device)
hass.data[DATA_KEY][host] = device
plug2 = PtxSwitch(host, token, model=model)
device2 = XiaomiPTXSwitch(name, plug2, model, unique_id, 2)
devices.append(device2)
hass.data[DATA_KEY][host] = device2
plug3 = PtxSwitch(host, token, model=model)
device3 = XiaomiPTXSwitch(name, plug3, model, unique_id, 3)
devices.append(device3)
hass.data[DATA_KEY][host] = device3
else:
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/volshebniks/python-miio-ptx/issues "
"and provide the following data: %s",
model,
)
return False
async_add_entities(devices, update_before_add=True) | 0ef4f94c2b69bb2674ffcace56a85927c2c6d1d1 | 8,009 |
def process_generate_metric_alarms_event(event):
"""Handles a new event request
Placeholder copied from alert_controller implementation
"""
LOG.info(str(event))
return create_response(200, body="Response to HealthCheck") | 45f3ac5fa73048ec1f76bb5188a4e1de0eaca62d | 8,010 |
def get_query_segment_info(collection_name, timeout=None, using="default"):
"""
Notifies Proxy to return segments information from query nodes.
:param collection_name: A string representing the collection to get segments info.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: QuerySegmentInfo:
QuerySegmentInfo is the growing segments's information in query cluster.
:rtype: QuerySegmentInfo
:example:
>>> from pymilvus import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="get collection entities num")
>>> collection = Collection(name="test_get_segment_info", schema=schema)
>>> import pandas as pd
>>> int64_series = pd.Series(data=list(range(10, 20)), index=list(range(10)))i
>>> float_vector_series = [[random.random() for _ in range _DIM] for _ in range (10)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": float_vector_series})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> res = utility.get_query_segment_info("test_get_segment_info")
"""
return _get_connection(using).get_query_segment_info(collection_name, timeout=timeout) | 521119f98a43d1abc303028f9bfa150dbfba098b | 8,011 |
def IoU(pred, gt, n_classes, all_iou=False):
"""Computes the IoU by class and returns mean-IoU"""
# print("IoU")
iou = []
for i in range(n_classes):
if np.sum(gt == i) == 0:
iou.append(np.NaN)
continue
TP = np.sum(np.logical_and(pred == i, gt == i))
FP = np.sum(np.logical_and(pred == i, gt != i))
FN = np.sum(np.logical_and(pred != i, gt == i))
iou.append(TP / (TP + FP + FN))
# nanmean: if a class is not present in the image, it's a NaN
result = [np.nanmean(iou), iou] if all_iou else np.nanmean(iou)
return result | 9635472121b13c9ce04e38fdfaee8bf29774a17a | 8,012 |
def from_torchvision(vision_transform, p=1):
"""Takes in an arbitary torchvision tranform and wrap it such that it can be
applied to a list of images of shape HxWxC
Returns a callable class that takes in list of images and target as input
NOTE:
Due to implementation difficuities, in order to apply the same
randomized transform to EACH image, it is best to pass in
a deterministic transform like the functional transforms
in torchvision and then pass in a p value for the wrapper
to roll a number and apply the transform with that probability
Additionally, it's also possible to wrap a torchvision functional transform
as long as it's a function that takes in an image as it's only argument
i.e can write something like:
lambda x: some_functional_transform(x,...)
"""
return TorchvisionWrapper(vision_transform, p=p) | b2af1a672d24171d9b80cd4eeb6d53fc80d09f53 | 8,013 |
import requests
def is_valid(inputted):
"""
Essa função irá verificar o QueryDict trago pelo método POST do nosso frontend e fazer uma verificação dos campos an
tes de persistir os dados no banco de dados.
:param inputted: Query Dict trago pelo POST
:return: Um valor booleano
"""
for key in inputted.keys():
if key != 'complemento':
if inputted.get(key) is None:
return False
elif len(inputted.get(key)) == 0:
return False
else:
pass
if key == 'estado':
if inputted.get(key) not in country_uf.values():
return False
else:
pass
if key == 'cep':
if len(inputted.get(key)) != 8:
return False
else:
cep = inputted.get(key)
try:
int(cep)
except (TypeError, ValueError):
return False
else:
url = f"https://viacep.com.br/ws/{cep}/json/"
response = requests.get(url)
response = response.json()
if 'erro' in response.keys():
return False
return True | ff52ad53f2073ff3659d16601528984f7282940a | 8,014 |
def get_ports(context, project_id=None):
"""Returns all ports of VMs in EOS-compatible format.
:param project_id: globally unique neutron tenant identifier
"""
session = context.session
model = db_models.AristaProvisionedVms
if project_id:
all_ports = (session.query(model).
filter(model.project_id == project_id,
model.host_id.isnot(None),
model.vm_id.isnot(None),
model.network_id.isnot(None),
model.port_id.isnot(None)))
else:
all_ports = (session.query(model).
filter(model.project_id.isnot(None),
model.host_id.isnot(None),
model.vm_id.isnot(None),
model.network_id.isnot(None),
model.port_id.isnot(None)))
ports = {}
for port in all_ports:
if port.port_id not in ports:
ports[port.port_id] = port.eos_port_representation()
ports[port.port_id]['hosts'].append(port.host_id)
return ports | 17d8dadde3dde78286f746435454b454d5589bd2 | 8,015 |
import hashlib
import hmac
def _HMAC(K, C, Mode=hashlib.sha1):
"""
Generate an HMAC value.
The default mode is to generate an HMAC-SHA-1 value w/ the SHA-1 algorithm.
:param K: shared secret between client and server.
Each HOTP generator has a different and unique secret K.
:type K: bytes
:param C: 8-byte counter value, the moving factor.
This counter MUST be synchronized between the HOTP generator
(client) and the HOTP validator (server).
:type C: bytes
:param Mode: The algorithm to use when generating the HMAC value
:type Mode: hashlib.sha1, hashlib.sha256, hashlib.sha512, or hashlib.md5
:return: HMAC result. If HMAC-SHA-1, result is 160-bits (20-bytes) long.
:rtype: bytes
"""
return hmac.new(K, C, Mode).digest() | db9bf26c52427acc259f3cb1590c7c13b0d0dd9e | 8,016 |
def get_reference(planet_name):
"""
Return reference for a given planet's orbit fit
Args:
planet_name (str): name of planet. no space
Returns:
reference (str): Reference of orbit fit
"""
planet_name = planet_name.lower()
if planet_name not in post_dict:
raise ValueError("Invalid planet name '{0}'".format(planet_name))
filename, reference = post_dict[planet_name]
return reference | b700509300bdb2f6595e2a7c44a0d84e04d795f8 | 8,017 |
def DefaultPortIfAvailable():
"""Returns default port if available.
Raises:
EmulatorArgumentsError: if port is not available.
Returns:
int, default port
"""
if portpicker.is_port_free(_DEFAULT_PORT):
return _DEFAULT_PORT
else:
raise EmulatorArgumentsError(
'Default emulator port [{}] is already in use'.format(_DEFAULT_PORT)) | 40c065946d8f9ee6c50f7df40f2be6644a472414 | 8,018 |
def mock_hub(hass):
"""Mock hub."""
mock_integration(hass, MockModule(DOMAIN))
hub = mock.MagicMock()
hub.name = "hub"
hass.data[DOMAIN] = {DEFAULT_HUB: hub}
return hub | b4495ca6fbb7638aedf86406f94c00566e376b1b | 8,019 |
def deferred_setting(name, default):
"""
Returns a function that calls settings with (name, default)
"""
return lambda: setting(name, default) | 286acec75f7a5a1e0217dc4cee7b7b5d1ba8e742 | 8,020 |
def extends_dict(target, source):
""" Will copy every key and value of source in target if key is not present in target """
for key, value in source.items():
if key not in target:
target[key] = value
elif type(target[key]) is dict:
extends_dict(target[key], value)
elif type(target[key]) is list:
target[key] += value
return target | 5a68dde5e3bb7dbb81ad61c3698614f56dd5efd7 | 8,021 |
def get_maps_interface_class(zep_inp):
"""
Takes the input of zephyrus and return the maps of
interfaces to classes and viceversa
"""
interface_to_classes = {}
class_to_interfaces = {}
for i in zep_inp["components"].keys():
class_name = i.split(settings.SEPARATOR)[-1]
interfaces = zep_inp["components"][i]["provides"][0]["ports"]
class_to_interfaces[class_name] = interfaces
for k in interfaces:
if k in interface_to_classes:
interface_to_classes[k].append(class_name)
else:
interface_to_classes[k] = [class_name]
return (interface_to_classes,class_to_interfaces) | 9a14de30677abe0fa8cf0bb3907cd1f32a8f33de | 8,022 |
def plextv_resources_base_fixture():
"""Load base payload for plex.tv resources and return it."""
return load_fixture("plex/plextv_resources_base.xml") | f252099bd6457af208c4d96b8024d3ce28d84cd9 | 8,023 |
from netrc import netrc
from requests.auth import HTTPDigestAuth
def _auth(machine='desi.lbl.gov'):
"""Get authentication credentials.
"""
n = netrc()
try:
u,foo,p = n.authenticators(machine)
except:
raise ValueError('Unable to get user/pass from $HOME/.netrc for {}'.format(machine))
return HTTPDigestAuth(u,p) | 4ef27e589416f54dd76522b3312a2aa24441e200 | 8,024 |
import re
def relative_date_add(date_rule: str, strict: bool = False) -> float:
"""Change the string in date rule format to the number of days. E.g 1d to 1, 1y to 365, 1m to 30, -1w to -7"""
days = ''
if re.search(DateRuleReg, date_rule) is not None:
res = re.search(DateRuleReg, date_rule)
date_str = res.group(1)
if date_str[0] == '-':
num = float(date_str[1:-1])
days = '-'
else:
num = float(date_str[:-1])
rule = date_str[-1:]
if rule in DictDayRule:
scale = DictDayRule[rule]
days = days + str(num * scale)
d = float(days)
return d
else:
raise MqValueError('There are no valid day rule for the point provided.')
if strict:
raise MqValueError(f'invalid date rule {date_rule}')
return 0 | 9180ed2ec99302679f7d0e2ee9ca57c4e2e6c48c | 8,025 |
def to_frames_using_nptricks(src: np.ndarray, window_size: int, stride: int) -> np.ndarray:
"""
np.ndarray をフレーム分けするプリミティブな実装で,分割に`np.lib.stride_tricks.as_strided`関数を使用しており,indexingを使用する`to_frames_using_index`より高速である.
Parameters
----------
src: np.ndarray
splited source.
window_size: int
sliding window size.
stride: int,
stride is int more than 0.
Returns
-------
frames: np.ndarray
a shape of frames is `(num_frames, window_size, *src.shape[1:])`, where num_frames is `(src.shape[0] - window_size) // stride + 1`.
"""
assert stride > 0, 'ストライドは正の整数である必要がある. stride={}'.format(stride)
num_frames = (src.shape[0] - window_size) // stride + 1
ret_shape = (num_frames, window_size, *src.shape[1:])
strides = (stride * src.strides[0], *src.strides)
return np.lib.stride_tricks.as_strided(src, shape=ret_shape, strides=strides) | b10b077cf0fbf2b0e491e7f1cba9033cfadf10c5 | 8,026 |
def global_fit(
model_constructor,
pdf_transform=False,
default_rtol=1e-10,
default_atol=1e-10,
default_max_iter=int(1e7),
learning_rate=1e-6,
):
"""
Wraps a series of functions that perform maximum likelihood fitting in the
`two_phase_solver` method found in the `fax` python module. This allows for
the calculation of gradients of the best-fit parameters with respect to upstream
parameters that control the underlying model, i.e. the event yields (which are
then parameterized by weights or similar).
Args:
model_constructor: Function that takes in the parameters of the observable,
and returns a model object (and background-only parameters)
Returns:
global_fitter: Callable function that performs global fits.
Differentiable :)
"""
adam_init, adam_update, adam_get_params = optimizers.adam(learning_rate)
def make_model(model_pars):
m, bonlypars = model_constructor(model_pars)
bounds = m.config.suggested_bounds()
constrained_mu = (
to_inf(constrained_mu, bounds[0]) if pdf_transform else constrained_mu
)
exp_bonly_data = m.expected_data(bonlypars, include_auxdata=True)
def expected_logpdf(pars): # maps pars to bounded space if pdf_transform = True
return (
m.logpdf(to_bounded_vec(pars, bounds), exp_bonly_data)
if pdf_transform
else m.logpdf(pars, exp_bonly_data)
)
def global_fit_objective(pars): # NLL
return -expected_logpdf(pars)[0]
return global_fit_objective
def global_bestfit_minimized(hyper_param):
nll = make_model(hyper_param)
def bestfit_via_grad_descent(i, param): # gradient descent
g = jax.grad(nll)(param)
# param = param - g * learning_rate
param = adam_get_params(adam_update(i, g, adam_init(param)))
return param
return bestfit_via_grad_descent
global_solve = twophase.two_phase_solver(
param_func=global_bestfit_minimized,
default_rtol=default_rtol,
default_atol=default_atol,
default_max_iter=default_max_iter,
)
def global_fitter(init, hyper_pars):
solve = global_solve(init, hyper_pars)
return solve.value
return global_fitter | 46917c0a4e6469759184a4aaa8199c57573360b0 | 8,027 |
import os
def get_connection_string(storage_account_name):
"""
Checks the environment for variable named AZ_<STORAGE_ACCOUNT_NAME> and
returns the corresponding connection string. Raises a
``ConnectionStringNotFound`` exception if environment variable is missing
"""
conn_string = os.environ.get("AZ_" + storage_account_name.upper(), None)
if conn_string is None:
raise ConnectionStringError(
"Environment variable AZ_" + storage_account_name.upper() + " not found!")
else:
return conn_string | 0f776e98741132dcce60ced478ab8c4514c5a9f8 | 8,028 |
from typing import Optional
from typing import List
def reorder_task(
token: 'auth.JWT',
task_id: 'typevars.ObjectID',
before_id: 'Optional[typevars.ObjectID]' = None,
after_id: 'Optional[typevars.ObjectID]' = None
) -> 'List[models.Task]':
"""Change the position of the task in the list."""
if before_id is None and after_id is None:
raise util_errors.APIError(
'One of before_id or after_id must be provided', 400)
if task_id == before_id or task_id == after_id:
raise util_errors.APIError(
'Task cannot be before or after itself', 400)
before = None
after = None
(task, before, after) = auth.load_owned_objects(
models.Task, token, 'get tasks', task_id, before_id, after_id)
if before is None:
before = after.before
if after is None:
after = before.after
if (
(before is not None and before.after is not after) or
(after is not None and after.before is not before)):
raise util_errors.APIError(
'Before and after tasks are not adjacent', 400)
mutated = [before, after, task, task.before, task.after]
if before is not None and task.parent is not before.parent:
mutated.extend([task.parent, before.parent])
check_reparent(task, before.parent)
elif after is not None and task.parent is not after.parent:
mutated.extend([task.parent, after.parent])
check_reparent(task, after.parent)
if task.before is not None:
task.before.after = task.after
elif task.after is not None:
task.after.before = None
task.before = before
task.after = after
db.DB.session.commit()
return [m for m in set(mutated) if m is not None] | b9e30b6d5929614d8385bd478e0e0a1f2663723f | 8,029 |
def get_ldpc_code_params(ldpc_design_filename):
"""
Extract parameters from LDPC code design file.
Parameters
----------
ldpc_design_filename : string
Filename of the LDPC code design file.
Returns
-------
ldpc_code_params : dictionary
Parameters of the LDPC code.
"""
ldpc_design_file = open(ldpc_design_filename)
ldpc_code_params = {}
[n_vnodes, n_cnodes] = [int(x) for x in ldpc_design_file.readline().split(' ')]
[max_vnode_deg, max_cnode_deg] = [int(x) for x in ldpc_design_file.readline().split(' ')]
vnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_adj_list = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_adj_list = -np.ones([n_vnodes, max_vnode_deg], int)
for vnode_idx in range(n_vnodes):
vnode_adj_list[vnode_idx, 0:vnode_deg_list[vnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
for cnode_idx in range(n_cnodes):
cnode_adj_list[cnode_idx, 0:cnode_deg_list[cnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
cnode_vnode_map = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_cnode_map = -np.ones([n_vnodes, max_vnode_deg], int)
cnode_list = np.arange(n_cnodes)
vnode_list = np.arange(n_vnodes)
for cnode in range(n_cnodes):
for i, vnode in enumerate(cnode_adj_list[cnode, 0:cnode_deg_list[cnode]]):
cnode_vnode_map[cnode, i] = cnode_list[np.where(vnode_adj_list[vnode, :] == cnode)]
for vnode in range(n_vnodes):
for i, cnode in enumerate(vnode_adj_list[vnode, 0:vnode_deg_list[vnode]]):
vnode_cnode_map[vnode, i] = vnode_list[np.where(cnode_adj_list[cnode, :] == vnode)]
cnode_adj_list_1d = cnode_adj_list.flatten().astype(np.int32)
vnode_adj_list_1d = vnode_adj_list.flatten().astype(np.int32)
cnode_vnode_map_1d = cnode_vnode_map.flatten().astype(np.int32)
vnode_cnode_map_1d = vnode_cnode_map.flatten().astype(np.int32)
pmat = np.zeros([n_cnodes, n_vnodes], int)
for cnode_idx in range(n_cnodes):
pmat[cnode_idx, cnode_adj_list[cnode_idx, :]] = 1
ldpc_code_params['n_vnodes'] = n_vnodes
ldpc_code_params['n_cnodes'] = n_cnodes
ldpc_code_params['max_cnode_deg'] = max_cnode_deg
ldpc_code_params['max_vnode_deg'] = max_vnode_deg
ldpc_code_params['cnode_adj_list'] = cnode_adj_list_1d
ldpc_code_params['cnode_vnode_map'] = cnode_vnode_map_1d
ldpc_code_params['vnode_adj_list'] = vnode_adj_list_1d
ldpc_code_params['vnode_cnode_map'] = vnode_cnode_map_1d
ldpc_code_params['cnode_deg_list'] = cnode_deg_list
ldpc_code_params['vnode_deg_list'] = vnode_deg_list
ldpc_design_file.close()
return ldpc_code_params | a2702a3fb5faf67d56fa08ae7ab627e3142fb006 | 8,030 |
def find_collection(*, collection, name):
"""
Looks through the pages of a collection for a resource with the specified name.
Returns it, or if not found, returns None
"""
if isinstance(collection, ProjectCollection):
try:
# try to use search if it is available
# call list() to collapse the iterator, otherwise the NotFound
# won't show up until collection_list is used
collection_list = list(collection.search(search_params={
"name": {
"value": name,
"search_method": "EXACT"
}
}))
except NotFound:
# Search must not be available yet
collection_list = collection.list()
else:
collection_list = collection.list()
matching_resources = [resource for resource in collection_list if resource.name == name]
if len(matching_resources) > 1:
raise ValueError("Found multiple collections with name '{}'".format(name))
if len(matching_resources) == 1:
result = matching_resources.pop()
print('Found existing: {}'.format(result))
return result
else:
return None | a6532f2f63b682822f96e51d7ab86e7bc240d922 | 8,031 |
def terms_documents_matrix_ticcl_frequency(in_files):
"""Returns a terms document matrix and related objects of a corpus
A terms document matrix contains frequencies of wordforms, with wordforms
along one matrix axis (columns) and documents along the other (rows).
Inputs:
in_files: list of ticcl frequency files (one per document in the
corpus)
Returns:
corpus: a sparse terms documents matrix
vocabulary: the vectorizer object containing the vocabulary (i.e., all word forms
in the corpus)
"""
vocabulary = DictVectorizer()
corpus = vocabulary.fit_transform(ticcl_frequency(in_files))
return corpus, vocabulary | 25e6cf8ca1696ebb1d5d7f72ddd90fe091e22030 | 8,032 |
def cvt_continue_stmt(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""continue_stmt: 'continue'"""
#-# Continue
assert ctx.is_REF, [node]
return ast_cooked.ContinueStmt() | 1eefd660e9023aa69957cf1004369d6495048437 | 8,033 |
def nth_even(n):
"""Function I wrote that returns the nth even number."""
return (n * 2) - 2 | 26e1465a039352917647ae650d653ed9842db7f6 | 8,034 |
def _has__of__(obj):
"""Check whether an object has an __of__ method for returning itself
in the context of a container."""
# It is necessary to check both the type (or we get into cycles)
# as well as the presence of the method (or mixins of Base pre- or
# post-class-creation as done in, e.g.,
# zopefoundation/Persistence) can fail.
return isinstance(obj, ExtensionClass.Base) and hasattr(type(obj), '__of__') | 638b6ed823acf2a46ae5a5cda6d3565fad498364 | 8,035 |
def grayscale(img):
"""
Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')
"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread() | 3c3b0508850c5cdaf2617ed37c6f5eea79be64d0 | 8,036 |
def get_pageinfo(response, tracktype='recenttracks'):
"""Check how many pages of tracks the user have."""
xmlpage = ET.fromstring(response)
totalpages = xmlpage.find(tracktype).attrib.get('totalPages')
return int(totalpages) | ed5c05bcc648d4a22c5f1b51b196743bf6883dff | 8,037 |
def MIN(*args):
"""Return the minimum of a range or list of Number or datetime"""
return _group_function(min, *args) | 044d1d433901f6ed3308aad711a6bf7eca4e2301 | 8,038 |
def GF(order, irreducible_poly=None, primitive_element=None, verify_irreducible=True, verify_primitive=True, mode="auto", target="cpu"):
"""
Factory function to construct a Galois field array class of type :math:`\\mathrm{GF}(p^m)`.
The created class will be a subclass of :obj:`galois.FieldArray` with metaclass :obj:`galois.FieldMeta`.
The :obj:`galois.FieldArray` inheritance provides the :obj:`numpy.ndarray` functionality. The :obj:`galois.FieldMeta` metaclass
provides a variety of class attributes and methods relating to the finite field.
Parameters
----------
order : int
The order :math:`p^m` of the field :math:`\\mathrm{GF}(p^m)`. The order must be a prime power.
irreducible_poly : int, galois.Poly, optional
Optionally specify an irreducible polynomial of degree :math:`m` over :math:`\\mathrm{GF}(p)` that will
define the Galois field arithmetic. An integer may be provided, which is the integer representation of the
irreducible polynomial. Default is `None` which uses the Conway polynomial :math:`C_{p,m}` obtained from :func:`galois.conway_poly`.
primitive_element : int, galois.Poly, optional
Optionally specify a primitive element of the field :math:`\\mathrm{GF}(p^m)`. A primitive element is a generator of
the multiplicative group of the field. For prime fields :math:`\\mathrm{GF}(p)`, the primitive element must be an integer
and is a primitive root modulo :math:`p`. For extension fields :math:`\\mathrm{GF}(p^m)`, the primitive element is a polynomial
of degree less than :math:`m` over :math:`\\mathrm{GF}(p)` or its integer representation. The default is `None` which uses
:obj:`galois.primitive_root(p)` for prime fields and :obj:`galois.primitive_element(irreducible_poly)` for extension fields.
verify_irreducible : bool, optional
Indicates whether to verify that the specified irreducible polynomial is in fact irreducible. The default is
`True`. For large irreducible polynomials that are already known to be irreducible (and may take a long time to verify),
this argument can be set to `False`.
verify_primitive : bool, optional
Indicates whether to verify that the specified primitive element is in fact a generator of the multiplicative group.
The default is `True`.
mode : str, optional
The type of field computation, either `"auto"`, `"jit-lookup"`, or `"jit-calculate"`. The default is `"auto"`.
The "jit-lookup" mode will use Zech log, log, and anti-log lookup tables for efficient calculation. The "jit-calculate"
mode will not store any lookup tables, but instead perform field arithmetic on the fly. The "jit-calculate" mode is
designed for large fields that cannot or should not store lookup tables in RAM. Generally, "jit-calculate" mode will
be slower than "jit-lookup". The "auto" mode will determine whether to use "jit-lookup" or "jit-calculate" based on the field's
size. In "auto" mode, field's with `order <= 2**16` will use the "jit-lookup" mode.
target : str, optional
The `target` keyword argument from :func:`numba.vectorize`, either `"cpu"`, `"parallel"`, or `"cuda"`.
Returns
-------
galois.FieldMeta
A new Galois field array class that is a subclass of :obj:`galois.FieldArray` with :obj:`galois.FieldMeta` metaclass.
Examples
--------
Construct a Galois field array class with default irreducible polynomial and primitive element.
.. ipython:: python
# Construct a GF(2^m) class
GF256 = galois.GF(2**8)
# Notice the irreducible polynomial is primitive
print(GF256.properties)
poly = GF256.irreducible_poly
Construct a Galois field specifying a specific irreducible polynomial.
.. ipython:: python
# Field used in AES
GF256_AES = galois.GF(2**8, irreducible_poly=galois.Poly.Degrees([8,4,3,1,0]))
print(GF256_AES.properties)
# Construct a GF(p) class
GF571 = galois.GF(571); print(GF571.properties)
# Construct a very large GF(2^m) class
GF2m = galois.GF(2**100); print(GF2m.properties)
# Construct a very large GF(p) class
GFp = galois.GF(36893488147419103183); print(GFp.properties)
See :obj:`galois.FieldArray` for more examples of what Galois field arrays can do.
"""
if not isinstance(order, int):
raise TypeError(f"Argument `order` must be an integer, not {type(order)}.")
p, k = prime_factors(order)
if not len(p) == len(k) == 1:
s = " + ".join([f"{pp}**{kk}" for pp, kk in zip(p, k)])
raise ValueError(f"Argument `order` must be a prime power, not {order} = {s}.")
p, m = p[0], k[0]
if m == 1:
if not irreducible_poly is None:
raise ValueError(f"Argument `irreducible_poly` can only be specified for prime fields, not the extension field GF({p}^{m}).")
return GF_prime(p, primitive_element=primitive_element, verify_primitive=verify_primitive, target=target, mode=mode)
else:
return GF_extension(p, m, primitive_element=primitive_element, irreducible_poly=irreducible_poly, verify_primitive=verify_primitive, verify_irreducible=verify_irreducible, target=target, mode=mode) | d09dea199559aad111e6aa30a2c391da9ae6b551 | 8,039 |
import typing
def remove_fields_with_value_none(fields: typing.Dict) -> typing.Dict:
"""
Remove keys whose value is none
:param fields: the fields to clean
:return: a copy of fields, without the none values
"""
fields = dict((key, value) for key, value in fields.items() if
value is not None) # Strip out none values
return fields | 22d7ac2a77248809c691bdb98f5f6ebaaf6d4f2b | 8,040 |
def make_values(params, point): #240 (line num in coconut source)
"""Return a dictionary with the values replaced by the values in point,
where point is a list of the values corresponding to the sorted params.""" #242 (line num in coconut source)
values = {} #243 (line num in coconut source)
for i, k in (enumerate)((sorted)(params)): #244 (line num in coconut source)
values[k] = point[i] #245 (line num in coconut source)
return values | 8287b49e54cb08802350a3a15805dc20def10ece | 8,041 |
def elbow_method(data):
"""
This function will compute elbow method and generate elbow visualization
:param data: 2 columns dataframe for cluster analysis
:return: Plotly Figures
"""
distortions = []
K = range(1, 10)
for k in K:
elbow_kmean = model_kmeans(data, k)
distortions.append(elbow_kmean.inertia_)
elbow = pd.DataFrame({'k': K,
'inertia': distortions})
fig = go.Figure(data=go.Scatter(x=elbow['k'], y=elbow['inertia']))
fig.update_layout(title='Elbows Methods for finding best K values in KMeans',
xaxis_title='K',
yaxis_title='Inertia')
return fig | 5420ce252f8a89ae3540ce37cbfd4f31f0cbe93e | 8,042 |
from typing import Callable
def sa_middleware(key: str = DEFAULT_KEY) -> 'Callable':
""" SQLAlchemy asynchronous middleware factory. """
@middleware
async def sa_middleware_(request: 'Request', handler: 'Callable')\
-> 'StreamResponse':
if key in request:
raise DuplicateRequestKeyError(key)
Session = request.config_dict.get(key)
async with Session() as request[key]:
return await handler(request)
return sa_middleware_ | df4da137e45fcaa2962626a4f3676d9f7b9ecce9 | 8,043 |
def dice_loss(y_true, y_pred):
"""
dice_loss
"""
smooth = 1.
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
dice_coef = (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + \
K.sum(K.square(y_pred),-1) + smooth)
return 1 - dice_coef | 863f69071375f37fed3c8910e52c1ffbda14cd71 | 8,044 |
def webdriver_init(mobile):
"""
Initialize a mobile/desktop web driver.
This initialize a web driver with a default user agent regarding the mobile
demand. Default uer agents are defined by MOBILE_USER_AGENT and DESKTOP_USER_AGENT.
:param mobile: The mobile flag
:type conn: bool
:return: A web driver
:rtype: WebDriver
"""
if mobile:
return webdriver_init_with_caps(MOBILE_USER_AGENT)
else:
return webdriver_init_with_caps(DESKTOP_USER_AGENT) | 49215bfd5363b9e7e82329b42b97ad04402b7edb | 8,045 |
import hashlib
def calculate_file_hash(f, alg, buf_size):
"""BUF_SIZE - 64 kb
need for large file"""
h = hashlib.new(alg)
for chunk in iter(lambda: f.read(buf_size), b""):
h.update(chunk)
return h.hexdigest() | 6361ef8f18f5ae66e1d51503426c77f7505e10be | 8,046 |
def update(A, B, DA, DB, f, k, delta_t):
"""Apply the Gray-Scott update formula"""
# compute the diffusion part of the update
diff_A = DA * apply_laplacian(A)
diff_B = DB * apply_laplacian(B)
# Apply chemical reaction
reaction = A*B**2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1-A)
diff_B -= (k+f) * B
A += diff_A * delta_t
B += diff_B * delta_t
return A, B | 75c2004ea089d5b3a9f4ec71fc27510d1c0dc5c0 | 8,047 |
import typing
import hashlib
def sha512(data: typing.Optional[bytes] = None):
"""Returns a sha512 hash object; optionally initialized with a string."""
if data is None:
return hashlib.sha512()
return hashlib.sha512(data) | 067fffc4c006d9c46e5037b07b86149ac15bb573 | 8,048 |
import os
def _circle_ci_pr():
"""Get the current CircleCI pull request (if any).
Returns:
Optional[int]: The current pull request ID.
"""
try:
return int(os.getenv(env.CIRCLE_CI_PR_NUM, ''))
except ValueError:
return None | ae8739a4abbc04181fd17dcd8963beed71db3597 | 8,049 |
def get_entropy_of_maxes():
"""
Specialized code for retrieving guesses and confidence of largest model of each type from the images giving largest
entropy.
:return: dict containing the models predictions and confidence, as well as the correct label under "y".
"""
high_entropy_list = get_high_entropy_mnist_test()
d = {}
images = []
values = []
for i in high_entropy_list:
images.append(i[0])
values.append(i[1])
d["y"] = np.array(values)
d["d"] = []
d["f"] = []
model_paths = ["ffnn_models", "dropout_models"]
for model in model_paths:
pred = model_predictor(model + "/model_50000", np.array(images), np.array(values))[0]
for i in pred:
d[model[0]].append((np.argmax(i), i))
return d | 9a43ac44a61776d25d3b46a7fb733d95720e3beb | 8,050 |
from typing import List
from typing import Dict
from typing import Any
def get_all_links() -> List[Dict[str, Any]]:
"""Returns all links as an iterator"""
return get_entire_collection(LINKS_COLLECTION) | 1bda0ac68f778c77914163dd7855491cc04a2c97 | 8,051 |
def agent(states, actions):
"""
creating a DNN using keras
"""
model = Sequential()
model.add(Flatten(input_shape=(1, states)))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(actions, activation='linear'))
return model | b9f8415955cc1b01dbe46b94dd84ab3daa6811c2 | 8,052 |
def get_package_nvr_from_spec(spec_file):
"""
Return a list of the NVR required for a given spec file
:param spec_file: The path to a spec file
:type spec_file: str
:return: list of nevra that should be built for that spec file
:rtype: str
"""
# Get the dep name & version
spec = rpm.spec(spec_file)
package_nvr = spec.sourceHeader[rpm.RPMTAG_NVR]
# split the dist from the end of the nvr
package_nvr = package_nvr[:package_nvr.rfind('.')]
return package_nvr | 9f8c5e5451d9b7fd3721881645688781b221e08d | 8,053 |
def rbo_ext(S, T, p):
"""Extrapolated RBO value as defined in equation (30).
Implementation handles uneven lists but not ties.
"""
if len(S) > len(T):
L, S = S, T
else:
L, S = T, S
l, s = len(L), len(S)
xl = overlap(L, S, l)
xs = overlap(L, S, s)
sum1 = sum(overlap(L, S, d) / d * p ** d for d in range(1, l + 1))
sum2 = sum(xs * (d - s) / (s * d) * p ** d for d in range(s + 1, l + 1))
return (1 - p) / p * (sum1 + sum2) + ((xl - xs) / l + xs / s) * p ** l | 367c979f8ece073e86e9fdc48a26bb393f0236be | 8,054 |
def index(request):
""" View of index page """
title = _("Home")
posts = Post.objects.all().order_by('-timestamp')[:5]
return render(request, 'dashboard/index.html', locals()) | a152bfad756be809c56e6dc203cb7cf9d29f4868 | 8,055 |
def incremental_str_maker(str_format='{:03.f}'):
"""Make a function that will produce a (incrementally) new string at every call."""
i = 0
def mk_next_str():
nonlocal i
i += 1
return str_format.format(i)
return mk_next_str | 41ce6e7d7ba69922f92e73ee516e9c09fdbe0713 | 8,056 |
def get_time_slider_range(highlighted=True,
withinHighlighted=True,
highlightedOnly=False):
"""Return the time range from Maya's time slider.
Arguments:
highlighted (bool): When True if will return a selected frame range
(if there's any selection of more than one frame!) otherwise it
will return min and max playback time.
withinHighlighted (bool): By default Maya returns the highlighted range
end as a plus one value. When this is True this will be fixed by
removing one from the last number.
Returns:
list: List of two floats of start and end frame numbers.
"""
if highlighted is True:
gPlaybackSlider = mel.eval("global string $gPlayBackSlider; "
"$gPlayBackSlider = $gPlayBackSlider;")
if cmds.timeControl(gPlaybackSlider, query=True, rangeVisible=True):
highlightedRange = cmds.timeControl(gPlaybackSlider,
query=True,
rangeArray=True)
if withinHighlighted:
highlightedRange[-1] -= 1
return highlightedRange
if not highlightedOnly:
return [cmds.playbackOptions(query=True, minTime=True),
cmds.playbackOptions(query=True, maxTime=True)] | f05ca2bfec8bcfb41be9a0fa83a724d180dc545f | 8,057 |
def update_IW(hyp_D_prev, xikk, xk, Pik_old):
"""
Do an update of Norm-IW conjugate in an exponential form.
"""
suff_D = get_suff_IW_conj(xikk, xk, Pik_old)
hyp_D = hyp_D_prev + suff_D
Dik = get_E_IW_hyp(hyp_D)
return Dik, hyp_D | d787edf13e18cdbc1c6ff3a65168f32ff8c28b1f | 8,058 |
def compute_state(observations, configuration):
"""
:param observations:
:param configuration:
:return StateTensor:
"""
StateTensorType = configuration.STATE_TYPE
return StateTensorType([observations]) | 44a08caa02137438359c4cd764fff1700b6252b2 | 8,059 |
def supports_transfer_syntax(transfer_syntax: pydicom.uid.UID) -> bool:
"""Return ``True`` if the handler supports the `transfer_syntax`.
Parameters
----------
transfer_syntax : uid.UID
The Transfer Syntax UID of the *Pixel Data* that is to be used with
the handler.
"""
return transfer_syntax in SUPPORTED_TRANSFER_SYNTAXES | 65f85a47afc5002ed33e4ad787317d67b4dab218 | 8,060 |
def enhance_user(user, json_safe=False):
"""
Adds computed attributes to AD user results
Args:
user: A dictionary of user attributes
json_safe: If true, converts binary data into base64,
And datetimes into human-readable strings
Returns:
An enhanced dictionary of user attributes
"""
if "memberOf" in user.keys():
user["memberOf"] = sorted(user["memberOf"], key=lambda dn: dn.lower())
if "showInAddressBook" in user.keys():
user["showInAddressBook"] = sorted(user["showInAddressBook"], key=lambda dn: dn.lower())
if "lastLogonTimestamp" in user.keys():
user["lastLogonTimestamp"] = _get_last_logon(user["lastLogonTimestamp"])
if "lockoutTime" in user.keys():
user["lockoutTime"] = convert_ad_timestamp(user["lockoutTime"], json_safe=json_safe)
if "pwdLastSet" in user.keys():
user["pwdLastSet"] = convert_ad_timestamp(user["pwdLastSet"], json_safe=json_safe)
if "userAccountControl" in user.keys():
user["userAccountControl"] = int(user["userAccountControl"])
user["disabled"] = user["userAccountControl"] & 2 != 0
user["passwordExpired"] = user["userAccountControl"] & 8388608 != 0
user["passwordNeverExpires"] = user["userAccountControl"] & 65536 != 0
user["smartcardRequired"] = user["userAccountControl"] & 262144 != 0
if "whenCreated" in user.keys():
user["whenCreated"] = convert_ad_timestamp(user["whenCreated"], json_safe=json_safe)
if "msExchRecipientTypeDetails" in user.keys():
user["msExchRecipientTypeDetails"] = int(user["msExchRecipientTypeDetails"])
user["remoteExchangeMailbox"] = user["msExchRecipientTypeDetails"] in remote_exchange_mailbox_values
user["exchangeMailbox"] = user["msExchRecipientTypeDetails"] in exchange_mailbox_values.keys()
if user["exchangeMailbox"]:
user["exchangeMailboxType"] = exchange_mailbox_values[user["msExchRecipientTypeDetails"]]
return user | 4b6fd08440c9c92d074e639f803b242f044f2ea3 | 8,061 |
from datetime import datetime
import random
def create_data(namespace_id, ocs_client):
"""Creates sample data for the script to use"""
double_type = SdsType(id='doubleType', sdsTypeCode=SdsTypeCode.Double)
datetime_type = SdsType(
id='dateTimeType', sdsTypeCode=SdsTypeCode.DateTime)
pressure_property = SdsTypeProperty(id='pressure', sdsType=double_type)
temperature_property = SdsTypeProperty(id=SAMPLE_FIELD_TO_CONSOLIDATE_TO,
sdsType=double_type)
ambient_temperature_property = SdsTypeProperty(id=SAMPLE_FIELD_TO_CONSOLIDATE,
sdsType=double_type)
time_property = SdsTypeProperty(id='time', sdsType=datetime_type,
isKey=True)
sds_type_1 = SdsType(
id=SAMPLE_TYPE_ID_1,
description='This is a sample Sds type for storing Pressure type '
'events for Data Views',
sdsTypeCode=SdsTypeCode.Object,
properties=[pressure_property, temperature_property, time_property])
sds_type_2 = SdsType(
id=SAMPLE_TYPE_ID_2,
description='This is a new sample Sds type for storing Pressure type '
'events for Data Views',
sdsTypeCode=SdsTypeCode.Object,
properties=[pressure_property, ambient_temperature_property, time_property])
print('Creating SDS Types...')
ocs_client.Types.getOrCreateType(namespace_id, sds_type_1)
ocs_client.Types.getOrCreateType(namespace_id, sds_type_2)
stream1 = SdsStream(
id=SAMPLE_STREAM_ID_1,
name=SAMPLE_STREAM_NAME_1,
description='A Stream to store the sample Pressure events',
typeId=SAMPLE_TYPE_ID_1)
stream2 = SdsStream(
id=SAMPLE_STREAM_ID_2,
name=SAMPLE_STREAM_NAME_2,
description='A Stream to store the sample Pressure events',
typeId=SAMPLE_TYPE_ID_2)
print('Creating SDS Streams...')
ocs_client.Streams.createOrUpdateStream(namespace_id, stream1)
ocs_client.Streams.createOrUpdateStream(namespace_id, stream2)
sample_start_time = datetime.datetime.now() - datetime.timedelta(hours=1)
sample_end_time = datetime.datetime.now()
values1 = []
values2 = []
def value_with_time(timestamp, value, field_name, value2):
"""Formats a JSON data object"""
return f'{{"time": "{timestamp}", "pressure": {str(value)}, "{field_name}": {str(value2)}}}'
print('Generating values...')
for i in range(1, 30, 1):
timestamp = (sample_start_time + datetime.timedelta(minutes=i * 2)
).isoformat(timespec='seconds')
val1 = value_with_time(timestamp, random.uniform(
0, 100), SAMPLE_FIELD_TO_CONSOLIDATE_TO, random.uniform(50, 70))
val2 = value_with_time(timestamp, random.uniform(
0, 100), SAMPLE_FIELD_TO_CONSOLIDATE, random.uniform(50, 70))
values1.append(val1)
values2.append(val2)
print('Sending values...')
ocs_client.Streams.insertValues(
namespace_id,
SAMPLE_STREAM_ID_1,
str(values1).replace("'", ""))
ocs_client.Streams.insertValues(
namespace_id,
SAMPLE_STREAM_ID_2,
str(values2).replace("'", ""))
return (sample_start_time, sample_end_time) | c0b01e36e350d152d758a744735688d15826be06 | 8,062 |
def enhanced_feature_extractor_digit(datum):
"""Feature extraction playground for digits.
You should return a util.Counter() of features
for this datum (datum is of type samples.Datum).
## DESCRIBE YOUR ENHANCED FEATURES HERE...
"""
features = basic_feature_extractor_digit(datum)
"*** YOUR CODE HERE ***"
util.raise_not_defined()
return features | b98ef2caf6b51176fae18ae36dd0c316ab2d8ee7 | 8,063 |
from typing import Union
def linear_resample(x: Union[ivy.Array, ivy.NativeArray], num_samples: int, axis: int = -1, f: ivy.Framework = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Performs linear re-sampling on input image.
:param x: Input array
:type x: array
:param num_samples: The number of interpolated samples to take.
:type num_samples: int
:param axis: The axis along which to perform the resample. Default is last dimension.
:type axis: int, optional
:param f: Machine learning framework. Inferred from inputs if None.
:type f: ml_framework, optional
:return: The array after the linear resampling.
"""
return _cur_framework(x, f=f).linear_resample(x, num_samples, axis) | bd6b54ee5cafe5409eb6aa47da57db2ad3b7fff2 | 8,064 |
import os
def mysql_settings():
"""Return a list of dict of settings for connecting to postgresql.
Will return the correct settings, depending on which of the environments it
is running in. It attempts to set variables in the following order, where
later environments override earlier ones.
1. Local
2. Github Actions
"""
if "GITHUB_ACTIONS" in os.environ:
instances = 2
user = password = db = "python_agent"
base_port = 8080
else:
instances = 1
user = db = USER
password = ""
base_port = 3306
settings = [
{
"user": user,
"password": password,
"name": db,
"host": "127.0.0.1",
"port": base_port + instance_num,
"namespace": str(os.getpid()),
}
for instance_num in range(instances)
]
return settings | 0fb9a0c0bdc71079da46fbc5f5ed35fd19c985df | 8,065 |
def update_milestones(repo, username=None, namespace=None):
"""Update the milestones of a project."""
repo = flask.g.repo
form = pagure.forms.ConfirmationForm()
error = False
if form.validate_on_submit():
redirect = flask.request.args.get("from")
milestones = flask.request.form.getlist("milestones")
miles = {}
keys = []
for idx in milestones:
milestone = flask.request.form.get(
"milestone_%s_name" % (idx), None
)
date = flask.request.form.get("milestone_%s_date" % (idx), None)
active = (
True
if flask.request.form.get("milestone_%s_active" % (idx))
else False
)
if milestone and milestone.strip():
milestone = milestone.strip()
if milestone in miles:
flask.flash(
"Milestone %s is present multiple times" % milestone,
"error",
)
error = True
break
miles[milestone] = {
"date": date.strip() if date else None,
"active": active,
}
keys.append(milestone)
if not error:
try:
repo.milestones = miles
repo.milestones_keys = keys
flask.g.session.add(repo)
flask.g.session.commit()
flask.flash("Milestones updated")
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
flask.flash(str(err), "error")
if redirect == "issues":
return flask.redirect(
flask.url_for(
"ui_ns.view_issues",
username=username,
repo=repo.name,
namespace=namespace,
)
)
return flask.redirect(
flask.url_for(
"ui_ns.view_settings",
username=username,
repo=repo.name,
namespace=namespace,
)
+ "#roadmap-tab"
) | 4869d70a4d8bd85436639dd3214f208822f7241b | 8,066 |
def installedState(item_pl):
"""Checks to see if the item described by item_pl (or a newer version) is
currently installed
All tests must pass to be considered installed.
Returns 1 if it looks like this version is installed
Returns 2 if it looks like a newer version is installed.
Returns 0 otherwise.
"""
foundnewer = False
if item_pl.get('softwareupdatename'):
availableAppleUpdates = appleupdates.softwareUpdateList()
munkicommon.display_debug2(
'Available Apple updates:\n%s' % availableAppleUpdates)
if item_pl['softwareupdatename'] in availableAppleUpdates:
munkicommon.display_debug1(
'%s is in available Apple Software Updates' %
item_pl['softwareupdatename'])
# return 0 so we're marked as needing to be installed
return 0
else:
munkicommon.display_debug1(
'%s is not in available Apple Software Updates' %
item_pl['softwareupdatename'])
# return 1 so we're marked as not needing to be installed
return 1
# does 'installs' exist and is it non-empty?
if item_pl.get('installs', None):
installitems = item_pl['installs']
for item in installitems:
try:
comparison = compareItemVersion(item)
if comparison in (-1, 0):
return 0
elif comparison == 2:
# this item is newer
foundnewer = True
except munkicommon.Error, errmsg:
# some problem with the installs data
munkicommon.display_error(errmsg)
return 0
# if there is no 'installs' key, then we'll use receipt info
# to determine install status.
elif 'receipts' in item_pl:
receipts = item_pl['receipts']
for item in receipts:
try:
comparison = compareReceiptVersion(item)
if comparison in (-1, 0):
# not there or older
return 0
elif comparison == 2:
foundnewer = True
except munkicommon.Error, errmsg:
# some problem with the receipts data
munkicommon.display_error(errmsg)
return 0
# if we got this far, we passed all the tests, so the item
# must be installed (or we don't have enough info...)
if foundnewer:
return 2
else:
return 1 | b3ec76863f75c87ed11a7984eb36384379c96229 | 8,067 |
def trapezoid_vectors(t, depth, big_t, little_t):
"""Trapezoid shape, in the form of vectors, for model.
Parameters
----------
t : float
Vector of independent values to evaluate trapezoid model.
depth : float
Depth of trapezoid.
big_t : float
Full trapezoid duration.
little_t : float
Ingress/egress duration.
Returns
-------
output : float
Vector of trapezoid model values.
"""
output = np.full_like(t, 1.0)
t = np.abs(t)
big_t_half = big_t * 0.5
little_t_half = little_t * 0.5
one_minus_depth = 1.0 - depth
output = np.where(t <= big_t_half - little_t_half, one_minus_depth, output)
return np.where(
np.logical_and(t > big_t_half - little_t_half,
t < big_t_half + little_t_half),
one_minus_depth + ((depth / little_t) *
(t - big_t_half + little_t_half)),
output) | 759c7cf946bf9ea998644bea9b28f46dea5a6e55 | 8,068 |
def get_rules(clf, class_names, feature_names):
"""
Extracts the rules from a decision tree classifier.
The keyword arguments correspond to the objects returned by
tree.build_tree.
Keyword arguments:
clf: A sklearn.tree.DecisionTreeClassifier.
class_names: A list(str) containing the class names.
feature_names: A list(str) containing the feature names.
Returns:
A list(str) where each element is a rule describing a leaf node.
"""
tree = clf.tree_
rules = traverse(tree, 0, class_names, feature_names, [], [], [], [])
rules = prune_rules(rules, feature_names)
n_rules = len(rules)
print('\tExtracted', n_rules, 'rule' + ('s.' if n_rules > 1 else '.'))
rules_str = []
for (features, thresholds, decisions, class_name) in rules:
rule = lists2rule(features, thresholds, decisions, class_name)
rules_str.append(rule)
return rules_str | a4d9bc1964553d384f1c795e2ad4834a532ddbba | 8,069 |
import os
import subprocess
def save_repo(repo, target="/run/install"):
"""copy a repo to the place where the installer will look for it later."""
newdir = mkdir_seq(os.path.join(target, "DD-"))
log.debug("save_repo: copying %s to %s", repo, newdir)
subprocess.call(["cp", "-arT", repo, newdir])
return newdir | 678d35effe4cc72ce7a46cf6af9ff0b62a277607 | 8,070 |
def file_parser(localpath = None, url = None, sep = " ", delimiter = "\t"):
"""
DOCSTRING:
INPUT:
> 'localpath' : String (str). Ideally expects a local object with a read() method (such as a file handle or StringIO).
By default, 'localpath=dummy_file' parameter can be passed to auto-detect and parse one of our dummy 'Payments' file in Amazon format. Acceptable input file extensions include .CSV, .TSV and .TXT. Needs to be passed in within quotes, either single or double quotes. Default 'dummy_file' doesn't require additional quotes.
> 'url' : [OPTIONAL] String (str). If supplied with value, 'localpath' needs to be left at default 'None' or else shall output an error message. Expected file type contained within URL should be either in .CSV, .TSV and .TXT format. Needs to be passed in within quotes, either single or double quotes. Default 'url=ur' can be passed w/o additional quotes for fetching dummy data.
> 'sep' : [OPTIONAL] String (str). Optional, and isn't expected to be modified unless critical. Powered by Python’s builtin parsing sniffer tool.
In addition, separators longer than 1 character and different from '\s+' will be interpreted as regular expressions and will also force the use of the Python parsing engine. Note that regex separators are prone to ignoring quoted data. [Regex example: '\r\t'].
> 'delimiter' : [OPTIONAL] String (str). Parameter isn't expected to be modified (Like setting to 'None') unless critical. Alternative argument name for previous argument 'sep', so a careful choice needs to be made.
OUTPUT:
Shall result into a Pandas DataFrame or TextParser for further data processing.
"""
# Checking existence of 'filepath' or 'url' parameter before parsing:
if localpath == None and url == None:
return "Please input EITHER local file path to 'localpath' parameter OR any valid readable URL to 'url' parameter"
elif localpath != None and url == None:
if localpath.lower().endswith((".txt", ".csv", ".tsv")):
data = pd.read_csv(localpath, sep = sep, delimiter=delimiter, parse_dates=[0], infer_datetime_format=True)
return data
else:
return "This file format is not supported. Kindly refer to our functional flow documentation for further assistance!"
elif localpath == None and url != None:
data = pd.read_csv(url, sep = sep, delimiter=delimiter, parse_dates=[0], infer_datetime_format=True)
return data
else:
return "Please pass valid input for processing." | a1f8cc5fceffdc2a20f745afbce44645634221bd | 8,071 |
def is_list(var, *, debug=False):
"""
is this a list or tuple? (DOES NOT include str)
"""
print_debug(debug, "is_list: got type %s" % (type(var)))
return isinstance(var, (list, tuple)) | 8c24f02aea597c6d17218a468ec59481d873a950 | 8,072 |
import logging
def zonotope_minimize(avfun, avdom, avdfun):
"""
Minimize a response surface defined on a zonotope.
:param function avfun: A function of the active variables.
:param ActiveVariableDomain avdom: Contains information about the domain of
`avfun`.
:param function avdfun: Returns the gradient of `avfun`.
:return: ystar, The estimated minimizer of `avfun`.
:rtype: ndarray
:return: fstar, The estimated minimum of `avfun`.
:rtype: float
**See Also**
optimizers.av_minimize
**Notes**
This function wraps the scipy.optimize implementation of SLSQP with linear
inequality constraints derived from the zonotope.
"""
n = avdom.subspaces.W1.shape[1]
logging.getLogger(__name__).debug('Zonotope minimization in {:d} vars.'.format(n))
opts = {'disp':False, 'maxiter':1e4, 'ftol':1e-9}
# a bit of globalization
curr_state = np.random.get_state()
np.random.seed(42)
minf = 1e100
minres = []
for i in range(10):
y0 = np.random.normal(size=(1, n))
cons = avdom.constraints
result = scopt.minimize(avfun, y0, constraints=cons, method='SLSQP', \
jac=avdfun, options=opts)
if not result.success:
raise Exception('SLSQP failed with message: {}.'.format(result.message))
if result.fun < minf:
minf = result.fun
minres = result
logging.getLogger(__name__).debug('\tMinimum {:6.4f}.'.format(minf))
np.random.set_state(curr_state)
ystar, fstar = minres.x, minres.fun
return ystar, fstar | af42a5a8e2c73c9fa0f4f7dcfdd159809e0f7a10 | 8,073 |
def node_to_truncated_gr(node, bin_width=0.1):
"""
Parses truncated GR node to an instance of the
:class: openquake.hazardlib.mfd.truncated_gr.TruncatedGRMFD
"""
# Parse to float dictionary
if not all([node.attrib[key]
for key in ["minMag", "maxMag", "aValue", "bValue"]]):
return None
tgr = dict((key, float_(node.attrib[key])) for key in node.attrib)
return mfd.truncated_gr.TruncatedGRMFD(min_mag=tgr["minMag"],
max_mag=tgr["maxMag"],
bin_width=bin_width,
a_val=tgr["aValue"],
b_val=tgr["bValue"]) | 0fd81e01140ec7b1a38f28c527139b61cc1c3a92 | 8,074 |
def nt(node, tag):
""" returns text of the tag or None if the
tag does not exist """
if node.find(tag) is not None and node.find(tag).text is not None:
return node.find(tag).text
else:
return None | 7ca5f83cf18f918f594374fa2aa875415238eef6 | 8,075 |
def set_user_favorites(username, **_):
"""
Sets the user's Favorites
Variables:
username => Name of the user you want to set the favorites for
Arguments:
None
Data Block:
{ # Dictionary of
"alert": [
"<name_of_query>": # Named queries
"*:*", # The actual query to run
...
}
Result example:
{
"success": true # Was saving the favorites successful ?
}
"""
data = request.json
favorites = {
"alert": [],
"search": [],
"signature": [],
"submission": [],
"error": []
}
for key in data:
if key not in favorites:
return make_api_response("", err="Invalid favorite type (%s)" % key, status_code=400)
favorites.update(data)
return make_api_response({"success": STORAGE.user_favorites.save(username, data)}) | 391ff8e9736bb2baddbf388c5a203cf9d2d7bdcc | 8,076 |
def delete_token(token_id):
"""Revoke a specific token in the application auth database.
:type token_id: str
:param token_id: Token identifier
:rtype: tuple
:return: None, status code
"""
client_data = g.client_data
if not valid_token_id(token_id):
raise MalformedTokenIdException
token = current_app.auth_db.lookup_token(token_id)
if token is None:
raise TokenNotFoundException
if not isinstance(token, Token):
raise InternalServerErrorException("auth_db.lookup_token did not return a token object")
if "admin" in client_data.roles:
current_app.auth_db.revoke_token(token_id)
else:
if token.client_id != client_data.client_id:
raise InadequateRolesException("Cannot revoke a token which you do not own")
current_app.auth_db.revoke_token(token_id)
return "", 204 | 204f4ae2c0dc7c704f05baa86930bc7962d1b639 | 8,077 |
from typing import Optional
async def update_country(identifier: Optional[str] = None, name: Optional[str] = None, capital: Optional[str] = None,
country: UpdateCountryModel = Body(...), current_user: AdminModel = Depends(get_current_user)):
"""
Update a country by name or capital name:
- **current user** should be admin
- **name**: country name
- **capital**: capital name of the country
"""
variables = locals()
options = {'identifier': '_id', 'name': 'name', 'capital': 'capital'}
for key in variables.keys():
if variables[key] is not None:
return await update_object({options[key]: variables[key]}, country, 'countries')
raise HTTPException(status_code=404, detail='Set some parameters') | 7805ca1d8e99e21b2558258e890f11fb4f697df9 | 8,078 |
def twoSum(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
"""method-1 time O(n), traverse all, get rest"""
for i in range(len(nums)):
res = target - nums[i]
if res in nums:
return [i, nums.index(res)]
else:
return [] | 8c5cd095c7800fa5da698dffa0d76d3c00a8a3c1 | 8,079 |
def transform_image(image, code_vectors):
"""
Quantize image using the code_vectors (aka centroids)
Return a new image by replacing each RGB value in image with the nearest code vector
(nearest in euclidean distance sense)
returns:
numpy array of shape image.shape
"""
assert image.shape[2] == 3 and len(image.shape) == 3, \
'Image should be a 3-D array with size (?,?,3)'
assert code_vectors.shape[1] == 3 and len(code_vectors.shape) == 2, \
'code_vectors should be a 2-D array with size (?,3)'
# TODO
# - replace each pixel (a 3-dimensional point) by its nearest code vector
N, M, D = image.shape
flatten = image.reshape(N*M, D)
# Assign every pixel to its nearest code vector
distances = squared_euclidean_distances(x=flatten, y=code_vectors)
assignment = np.argmin(distances, axis=1)
compressed = code_vectors[assignment].reshape(N, M, D)
return compressed | 19aae8ca188de03f4b916f52d724048c757e936f | 8,080 |
def create_linking_context_from_compilation_outputs(
*,
actions,
additional_inputs = [],
alwayslink = False,
compilation_outputs,
feature_configuration,
label,
linking_contexts = [],
module_context,
name = None,
swift_toolchain,
user_link_flags = []):
"""Creates a linking context from the outputs of a Swift compilation.
On some platforms, this function will spawn additional post-compile actions
for the module in order to add their outputs to the linking context. For
example, if the toolchain that requires a "module-wrap" invocation to embed
the `.swiftmodule` into an object file for debugging purposes, or if it
extracts auto-linking information from the object files to generate a linker
command line parameters file, those actions will be created here.
Args:
actions: The context's `actions` object.
additional_inputs: A `list` of `File`s containing any additional files
that are referenced by `user_link_flags` and therefore need to be
propagated up to the linker.
alwayslink: If True, any binary that depends on the providers returned
by this function will link in all of the library's object files,
even if some contain no symbols referenced by the binary.
compilation_outputs: A `CcCompilationOutputs` value containing the
object files to link. Typically, this is the second tuple element in
the value returned by `swift_common.compile`.
feature_configuration: A feature configuration obtained from
`swift_common.configure_features`.
label: The `Label` of the target being built. This is used as the owner
of the linker inputs created for post-compile actions (if any), and
the label's name component also determines the name of the artifact
unless it is overridden by the `name` argument.
linking_contexts: A `list` of `CcLinkingContext`s containing libraries
from dependencies.
name: A string that is used to derive the name of the library or
libraries linked by this function. If this is not provided or is a
falsy value, the name component of the `label` argument is used.
module_context: The module context returned by `swift_common.compile`
containing information about the Swift module that was compiled.
Typically, this is the first tuple element in the value returned by
`swift_common.compile`.
swift_toolchain: The `SwiftToolchainInfo` provider of the toolchain.
user_link_flags: A `list` of strings containing additional flags that
will be passed to the linker for any binary that links with the
returned linking context.
Returns:
A tuple of `(CcLinkingContext, CcLinkingOutputs)` containing the linking
context to be propagated by the caller's `CcInfo` provider and the
artifact representing the library that was linked, respectively.
"""
extra_linking_contexts = [
cc_info.linking_context
for cc_info in swift_toolchain.implicit_deps_providers.cc_infos
]
if module_context and module_context.swift:
post_compile_linker_inputs = []
# Ensure that the .swiftmodule file is embedded in the final library or
# binary for debugging purposes.
if should_embed_swiftmodule_for_debugging(
feature_configuration = feature_configuration,
module_context = module_context,
):
post_compile_linker_inputs.append(
ensure_swiftmodule_is_embedded(
actions = actions,
feature_configuration = feature_configuration,
label = label,
swiftmodule = module_context.swift.swiftmodule,
swift_toolchain = swift_toolchain,
),
)
# Invoke an autolink-extract action for toolchains that require it.
if is_action_enabled(
action_name = swift_action_names.AUTOLINK_EXTRACT,
swift_toolchain = swift_toolchain,
):
autolink_file = derived_files.autolink_flags(
actions = actions,
target_name = label.name,
)
register_autolink_extract_action(
actions = actions,
autolink_file = autolink_file,
feature_configuration = feature_configuration,
module_name = module_context.name,
object_files = compilation_outputs.objects,
swift_toolchain = swift_toolchain,
)
post_compile_linker_inputs.append(
cc_common.create_linker_input(
owner = label,
user_link_flags = depset(
["@{}".format(autolink_file.path)],
),
additional_inputs = depset([autolink_file]),
),
)
extra_linking_contexts.append(
cc_common.create_linking_context(
linker_inputs = depset(post_compile_linker_inputs),
),
)
if not name:
name = label.name
return cc_common.create_linking_context_from_compilation_outputs(
actions = actions,
feature_configuration = get_cc_feature_configuration(
feature_configuration,
),
cc_toolchain = swift_toolchain.cc_toolchain_info,
compilation_outputs = compilation_outputs,
name = name,
user_link_flags = user_link_flags,
linking_contexts = linking_contexts + extra_linking_contexts,
alwayslink = alwayslink,
additional_inputs = additional_inputs,
disallow_static_libraries = False,
disallow_dynamic_library = True,
grep_includes = None,
) | 4918932b7f47495a59226de64cb4c110227a9c8e | 8,081 |
from miniworld.util import ConcurrencyUtil
def wait_until_uds_reachable(uds_path, return_sock=False):
""" Wait until the unix domain socket at `uds_path` is reachable.
Returns
-------
socket.socket
"""
sock = ConcurrencyUtil.wait_until_fun_returns_true(lambda x: x[0] is True, uds_reachable, uds_path,
return_sock=return_sock)[1]
return sock | 574053ed1b4ccacda37bec58740ad497e690746a | 8,082 |
def get_relation_count_df(
dataset: Dataset,
merge_subsets: bool = True,
add_labels: bool = True,
) -> pd.DataFrame:
"""Create a dataframe with relation counts.
:param dataset:
The dataset.
:param add_labels:
Whether to add relation labels to the dataframe.
:param merge_subsets:
Whether to merge subsets, i.e., train/validation/test.
:param add_labels:
Whether to add entity / relation labels.
:return:
A dataframe with columns (relation_id, count, relation_label?, subset?)
"""
return _common(
dataset=dataset,
triple_func=triple_analysis.get_relation_counts,
merge_subsets=merge_subsets,
add_labels=add_labels,
) | a15c22d0790c346cac3842f84a80ee7c27c4471a | 8,083 |
def get_tests(run_id):
"""
Ручка для получения информации о тест (из тест-рана)
Выходящий параметр: test_id
"""
client = APIClient('https://testrail.homecred.it')
client.user = '[email protected]'
client.password = 'Qwerty_22'
tests = client.send_get('get_tests/%s' % run_id)
return tests | a188e8a04dc72e485d5c519c09fcdbd8f2f18f31 | 8,084 |
from datetime import datetime
def create_ffs():
"""
Create a new Powergate Filecoin Filesystem (FFS)
"""
powergate = PowerGateClient(app.config["POWERGATE_ADDRESS"])
ffs = powergate.ffs.create()
creation_date = datetime.now().replace(microsecond=0)
# TODO salt token id
filecoin_file_system = Ffs(
ffs_id=ffs.id, token=ffs.token, creation_date=creation_date, user_id=current_user.id,
)
db.session.add(filecoin_file_system)
# Create new FFS wallet and add entry in log table
address = powergate.ffs.addrs_list(ffs.token)
obj = MessageToDict(address)
wallet = obj["addrs"][0]["addr"]
wallet = Wallets(created=creation_date,
address=wallet,
ffs=ffs.id,
user_id=current_user.id,)
db.session.add(wallet)
db.session.commit()
new_ffs = Ffs.query.filter_by(ffs_id=ffs.id).first()
return new_ffs | 4163d080ae0ec7e2de2090b2ee6112fdbec89d75 | 8,085 |
def other(player):
"""Return the other player, for a player PLAYER numbered 0 or 1.
>>> other(0)
1
>>> other(1)
0
"""
return 1 - player | 08503c35276cf86efa15631bb6b893d72cbae4d5 | 8,086 |
def _explored_parameters_in_group(traj, group_node):
"""Checks if one the parameters in `group_node` is explored.
:param traj: Trajectory container
:param group_node: Group node
:return: `True` or `False`
"""
explored = False
for param in traj.f_get_explored_parameters():
if param in group_node:
explored = True
break
return explored | 71cbafbad0dcc3fa9294c0bede5f6a09941d452b | 8,087 |
def _execute(query,
data=None,
config_file=DEFAULT_CONFIG_FILE):
"""Execute SQL query on a postgres db"""
# Connect to an existing database.
postgres_db_credentials = postgres_db(config_file)
conn = psycopg2.connect(dbname=postgres_db_credentials["dbname"],
user=postgres_db_credentials["user"],
password=postgres_db_credentials["password"],
host=postgres_db_credentials["host"],
port=postgres_db_credentials["port"])
# Open a cursor to perform database operations.
cur = conn.cursor()
if data is None:
cur.execute(query)
elif isinstance(data, list):
execute_values(cur, query, data, template=None, page_size=100)
else:
cur.execute(query, data)
conn.commit()
if cur.description is None:
result = None
elif len(cur.description) == 1:
result, = cur.fetchone()
else:
result = cur.fetchall()
cur.close()
conn.close()
return result | 84884b6a0902ce7fe964b145f3124a1699f72453 | 8,088 |
from pathlib import Path
def _construct_out_filename(fname, group_name):
"""
Construct a specifically formatted output filename.
The vrt will be placed adjacent to the HDF5 file, as
such write access is required.
"""
basedir = fname.absolute().parent
basename = fname.with_suffix('.vrt').name.replace(
'wagl',
group_name
)
out_fname = basedir.joinpath(Path(basename))
return out_fname | 117bb8470ab65f0b9fb11bb3151ae653e5e28d23 | 8,089 |
import json
def _deposit_need_factory(name, **kwargs):
"""Generate a JSON argument string from the given keyword arguments.
The JSON string is always generated the same way so that the resulting Need
is equal to any other Need generated with the same name and kwargs.
"""
if kwargs:
for key, value in enumerate(kwargs):
if value is None:
del kwargs[key]
if not kwargs:
argument = None
else:
argument = json.dumps(kwargs, separators=(',', ':'), sort_keys=True)
return ParameterizedActionNeed(name, argument) | 9c8813f0be657b51a787d9badd2f677aca84a002 | 8,090 |
def not_equal(version1, version2):
"""
Evaluates the expression: version1 != version2.
:type version1: str
:type version2: str
:rtype: bool
"""
return compare(version1, '!=', version2) | 5dab948ec2a3eb8d3cb68fcd9887aedb394757df | 8,091 |
def get_sp_list():
"""
Gets all tickers from S&P 500
"""
bs = get_soup('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
sp_companies = bs.find_all('a', class_="external text")
return sp_companies | be850de6fc787faaa05bbd3100dc82ce56cceb22 | 8,092 |
def get_params_nowcast(
to, tf,
i, j,
path, nconst,
depthrange='None',
depav=False, tidecorr=tidetools.CorrTides):
"""This function loads all the data between the start and the end date that
contains hourly velocities in the netCDF4 nowcast files in the specified
depth range. Then masks, rotates and unstaggers the time series. The
unstaggering causes the shapes of the returned arrays to be 1 less than
those of the input arrays in the y and x dimensions. Finally it calculates
tidal ellipse parameters from the u and v time series. Maintains the shape
of the velocities enters only loosing the time dimensions.
:arg to: The beginning of the date range of interest
:type to: datetime object
:arg tf: The end of the date range of interest
:type tf: datetime object
:arg i: x index, must have at least 2 values for unstaggering, will loose
the first i during the unstaggering in prepare_vel.
:type i: float or list
:arg j: y index, must have at least 2 values for unstaggering, will loose
the first j during the unstaggering in prepare_vel.
:type j: float or list
:arg path: Defines the path used(eg. nowcast)
:type path: string
:arg depthrange: Depth values of interest in meters as a float for a single
depth or a list for a range. A float will find the closest depth that
is <= the value given. Default is 'None' for the whole water column
(0-441m).
:type depav: float, string or list.
:arg depav: True will depth average over the whole depth profile given.
Default is False.
:type depav: boolean
:arg depth: depth vector corresponding to the depth of the velocities, only
requiered if depav=True.
:type depth: :py:class:'np.ndarray' or string
:returns: params, dep
params is dictionary object of the ellipse parameters for each constituent
dep is the depths of the ellipse paramters
"""
u, v, time, dep = ellipse_files_nowcast(
to, tf,
i, j,
path,
depthrange=depthrange)
u_u, v_v = prepare_vel(u, v, depav=depav, depth=dep)
params = get_params(u_u, v_v, time, nconst, tidecorr=tidecorr)
return params, dep | 4cf44961da3109593176476d8e4092a2c05b7a18 | 8,093 |
def convert_size(size):
""" Helper function to convert ISPMan sizes to readable units. """
return number_to_human_size(int(size)*1024) | a28c8332d8f44071409436f4ec7e844a58837f49 | 8,094 |
def get_suppressed_output(
detections,
filter_id: int,
iou: float,
confidence: float,
) -> tuple:
"""Filters detections based on the intersection of union theory.
:param detections: The tensorflow prediction output.
:param filter_id: The specific class to be filtered.
:param iou: The intersection of union threshold.
:param confidence: The confidence threshold.
:returns: tuple of suppressed bbox, suppressed scores and suppressed classes.
"""
detection_masks = (
detections["detection_masks"]
if "detection_masks" in detections
else None
)
detection_boxes = detections["detection_boxes"]
detection_scores = detections["detection_scores"]
detection_classes = detections["detection_classes"]
return (
_non_max_suppress_bbox(
bbox=detection_boxes,
scores=detection_scores,
classes=detection_classes,
filter_class=filter_id,
iou=iou,
confidence=confidence,
)
if detection_masks is None
else _non_max_suppress_mask(
bbox=detection_boxes,
scores=detection_scores,
classes=detection_classes,
masks=detection_masks,
filter_class=filter_id,
iou=iou,
confidence=confidence,
)
) | b6a294611ec22fd48a7a72e51e66e43732c1d3f7 | 8,095 |
def tf_nan_func(func, **kwargs):
"""
takes function with X as input parameter and applies function only on
finite values,
helpful for tf value calculation which can not deal with nan values
:param func: function call with argument X
:param kwargs: other arguments for func
:return: executed func output with nan values
"""
mask = tfm.is_finite(kwargs["X"])
empty_t = tf.cast(tf.fill(mask.shape, np.nan), dtype=kwargs["X"].dtype)
for i in kwargs:
# workaround of tf.rank(kwargs[i]) > 0, avoid scalar value in mask
if kwargs[i].shape != ():
# keep only finite
kwargs[i] = tf.boolean_mask(kwargs[i], tfm.is_finite(kwargs[i]))
res_func = func(**kwargs)
full_t = tf.tensor_scatter_nd_update(empty_t, tf.where(mask), res_func)
return full_t | d43e509a142bf78025d32984c9ecb0c0856e9a90 | 8,096 |
def deep_update(original,
new_dict,
new_keys_allowed=False,
allow_new_subkey_list=None,
override_all_if_type_changes=None):
"""Updates original dict with values from new_dict recursively.
If new key is introduced in new_dict, then if new_keys_allowed is not
True, an error will be thrown. Further, for sub-dicts, if the key is
in the allow_new_subkey_list, then new subkeys can be introduced.
Args:
original (dict): Dictionary with default values.
new_dict (dict): Dictionary with values to be updated
new_keys_allowed (bool): Whether new keys are allowed.
allow_new_subkey_list (Optional[List[str]]): List of keys that
correspond to dict values where new subkeys can be introduced.
This is only at the top level.
override_all_if_type_changes(Optional[List[str]]): List of top level
keys with value=dict, for which we always simply override the
entire value (dict), iff the "type" key in that value dict changes.
"""
allow_new_subkey_list = allow_new_subkey_list or []
override_all_if_type_changes = override_all_if_type_changes or []
for k, value in new_dict.items():
if k not in original and not new_keys_allowed:
raise Exception("Unknown config parameter `{}` ".format(k))
# Both orginal value and new one are dicts.
if isinstance(original.get(k), dict) and isinstance(value, dict):
# Check old type vs old one. If different, override entire value.
if k in override_all_if_type_changes and \
"type" in value and "type" in original[k] and \
value["type"] != original[k]["type"]:
original[k] = value
# Allowed key -> ok to add new subkeys.
elif k in allow_new_subkey_list:
deep_update(original[k], value, True)
# Non-allowed key.
else:
deep_update(original[k], value, new_keys_allowed)
# Original value not a dict OR new value not a dict:
# Override entire value.
else:
original[k] = value
return original | 12573fd3efef4fc9d6c222ccc3ea525c131a2088 | 8,097 |
def queued_archive_jobs():
"""Fetch the info about jobs waiting in the archive queue.
Returns
-------
jobs: dict
"""
jobs = pbs_jobs()
return [
job
for job in jobs
if (job["job_state"] == "Q" and job["queue"] == "archivelong")
] | af4495f6484cf2e819655a1807a38556f62119a5 | 8,098 |
def getCustomKernelSolutionObj(kernelName, directory=globalParameters["CustomKernelDirectory"]):
"""Creates the Solution object for a custom kernel"""
kernelConfig = getCustomKernelConfig(kernelName, directory)
for k, v in kernelConfig.items():
if k != "ProblemType":
checkParametersAreValid((k, [v]), validParameters)
kernelConfig["KernelLanguage"] = "Assembly"
kernelConfig["CustomKernelName"] = kernelName
return Solution(kernelConfig) | 31cec952f3dc5afefa5a50bc8a54fe00eb3d3fe9 | 8,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.