content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def numpy_ndarray(pa_arr):
"""Return numpy.ndarray view of a pyarrow.Array
"""
if pa_arr.null_count == 0:
# TODO: would memoryview.cast approach be more efficient? see xnd_xnd.
return pa_arr.to_numpy()
pa_nul, pa_buf = pa_arr.buffers()
raise NotImplementedError('numpy.ndarray view of pyarrow.Array with nulls')
|
b018bc26983ff638441e1bfca3cb5633022de399
| 19,200 |
import select
import json
async def _async_get_states_and_events_with_filter(
hass: HomeAssistant, sqlalchemy_filter: Filters, entity_ids: set[str]
) -> tuple[list[Row], list[Row]]:
"""Get states from the database based on a filter."""
for entity_id in entity_ids:
hass.states.async_set(entity_id, STATE_ON)
hass.bus.async_fire("any", {ATTR_ENTITY_ID: entity_id})
await async_wait_recording_done(hass)
def _get_states_with_session():
with session_scope(hass=hass) as session:
return session.execute(
select(States.entity_id).filter(
sqlalchemy_filter.states_entity_filter()
)
).all()
filtered_states_entity_ids = {
row[0]
for row in await get_instance(hass).async_add_executor_job(
_get_states_with_session
)
}
def _get_events_with_session():
with session_scope(hass=hass) as session:
return session.execute(
select(EventData.shared_data).filter(
sqlalchemy_filter.events_entity_filter()
)
).all()
filtered_events_entity_ids = set()
for row in await get_instance(hass).async_add_executor_job(
_get_events_with_session
):
event_data = json.loads(row[0])
if ATTR_ENTITY_ID not in event_data:
continue
filtered_events_entity_ids.add(json.loads(row[0])[ATTR_ENTITY_ID])
return filtered_states_entity_ids, filtered_events_entity_ids
|
fa0131a87ac9ac517ffd63bb563600e12bed68de
| 19,201 |
def decrypt_password(private_key: PrivateKey, encrypted: str) -> str:
"""Return decrypt the given encrypted password using private_key and the RSA cryptosystem.
Your implementation should be very similar to the one from class, except now
the public key is a data class rather than a tuple.
"""
n = private_key.p * private_key.q
return ''.join([chr(pow(ord(c), private_key.d, n)) for c in encrypted])
|
607b5e33cff940aa999f56b2e39f56673d94ff7f
| 19,202 |
from typing import OrderedDict
def load_jed(fn):
"""
JEDEC file generated by 1410/84 from PALCE20V8H-15 06/28/20 22:42:11*
DM AMD*
DD PALCE20V8H-15*
QF2706*
G0*
F0*
L00000 0000000000000000000000000100000000000000*
"""
ret = {}
d = OrderedDict()
with open(fn) as f:
li = 0
for l in f:
li += 1
# remove *, newline
l = l.strip()[0:-1]
if not l:
continue
if li == 2:
ret["description"] = l
continue
parts = l.split(" ")
main_line = " ".join(parts[1:])
if parts[0] == "DM":
ret["vendor"] = main_line
elif parts[0] == "DD":
ret["part"] = main_line
elif l[0:2] == "QF":
ret["len"] = int(l[2:])
elif l[0] == "L":
# L00000 0000000000000000000000000100000000000000*
addr, bits = l.split(" ")
addr = int(addr[1:], 10)
d[addr] = bits
else:
continue
ret["data"] = d
return ret
|
6570bcdaabb495c13e9419a532c85b15efdf957a
| 19,203 |
def plaintext(text, keeplinebreaks=True):
"""Extract the text elements from (X)HTML content
>>> plaintext('<b>1 < 2</b>')
u'1 < 2'
>>> plaintext(tag('1 ', tag.b('<'), ' 2'))
u'1 < 2'
>>> plaintext('''<b>1
... <
... 2</b>''', keeplinebreaks=False)
u'1 < 2'
:param text: `unicode` or `Fragment`
:param keeplinebreaks: optionally keep linebreaks
"""
if isinstance(text, Fragment):
text = text.as_text()
else:
text = stripentities(striptags(text))
if not keeplinebreaks:
text = text.replace(u'\n', u' ')
return text
|
c4e5e9a9b41fc7e0dc6b50995d7ec9a9bae1296f
| 19,204 |
import os
def check_out_dir(out_dir, base_dir):
"""Creates the output folder."""
if out_dir is None:
out_dir = pjoin(base_dir, default_out_dir_name)
try:
os.makedirs(out_dir, exist_ok=True)
except:
raise IOError('Unable to create the output directory as requested.')
return out_dir
|
5de68a0e8931a6ae3183eb3a3f5d4173ff697296
| 19,205 |
def fetch_rows(product):
"""
Returns the product and a list of timestamp and price for the given product in the current DATE,
ordered by timestamp.
"""
# We query the data lake by passing a SQL query to maystreet_data.query
# Note that when we filter by month/day, they need to be 0-padded strings,
# e.g. January is '01' and not 1.
query = f"""
SELECT
ExchangeTimestamp AS ts,
price
FROM
"prod_lake"."p_mst_data_lake".mt_trade
WHERE
y = '{DATE.year}'
AND m = '{str(DATE.month).rjust(2, '0')}'
AND d = '{str(DATE.day).rjust(2, '0')}'
AND product = '{product}'
ORDER BY
ExchangeTimestamp
"""
return product, list(md.query(md.DataSource.DATA_LAKE, query))
|
8b6f3df658ca38054bd49255b0842f40f6d4bffa
| 19,206 |
def create_learner(sm_writer, model_helper):
"""Create the learner as specified by FLAGS.learner.
Args:
* sm_writer: TensorFlow's summary writer
* model_helper: model helper with definitions of model & dataset
Returns:
* learner: the specified learner
"""
learner = None
if FLAGS.learner == 'full-prec':
learner = FullPrecLearner(sm_writer, model_helper)
elif FLAGS.learner == 'weight-sparse':
learner = WeightSparseLearner(sm_writer, model_helper)
elif FLAGS.learner == 'channel':
learner = ChannelPrunedLearner(sm_writer, model_helper)
elif FLAGS.learner == 'chn-pruned-gpu':
learner = ChannelPrunedGpuLearner(sm_writer, model_helper)
elif FLAGS.learner == 'chn-pruned-rmt':
learner = ChannelPrunedRmtLearner(sm_writer, model_helper)
elif FLAGS.learner == 'dis-chn-pruned':
learner = DisChnPrunedLearner(sm_writer, model_helper)
elif FLAGS.learner == 'uniform':
learner = UniformQuantLearner(sm_writer, model_helper)
elif FLAGS.learner == 'uniform-tf':
learner = UniformQuantTFLearner(sm_writer, model_helper)
elif FLAGS.learner == 'non-uniform':
learner = NonUniformQuantLearner(sm_writer, model_helper)
else:
raise ValueError('unrecognized learner\'s name: ' + FLAGS.learner)
return learner
|
76231a6413560ccc1e1d90fb974f90a83b3bb4f4
| 19,207 |
def load_decoder(autoencoder):
"""
Gets the decoders associated with the inputted model
"""
dim = len(autoencoder.get_config()['input_layers'])
mag_phase_flag = False
decoders = []
if dim == 2:
mag_phase_flag = True
decoders.append(autoencoder.get_layer('mag_decoder'))
decoders.append(autoencoder.get_layer('phase_decoder'))
else:
decoders.append(autoencoder.get_layer('decoder'))
return decoders,mag_phase_flag
|
8e39470e48f5a6c147d93567c0bdb33a588c790d
| 19,208 |
def translate_boarding_cards(boarding_cards):
"""Translate list of BoardingCards to readable travel instructions.
This function sorts list of random BoardingCard objects connecting starts
with ends of every stage of the trip then returns readable instructions
that include seat numbers, location names and additional data.
:param boarding_cards: list of :class:`BoardingCard` objects.
:return: list of human readable string that describe the whole trip.
"""
# Creating helper maps, one is keyed based on start locations, second one
# is keyed on end locations
starts_map = {
boarding_card.start_key: boarding_card for boarding_card
in boarding_cards
}
ends_map = {
boarding_card.end_key: boarding_card for boarding_card
in boarding_cards
}
# Guessing start and end of the trip
trip_start_keys = [
start_key for start_key in starts_map
if start_key not in ends_map
]
trip_end_keys = [
end_key for end_key in ends_map
if end_key not in starts_map
]
# Validating our guess of start and end of the trip
if len(trip_start_keys) > 1:
raise ValueError(u'More than 1 starting point in the trip!')
if not trip_start_keys:
raise ValueError(u'No starting point in the trip!')
if len(trip_end_keys) > 1:
raise ValueError(u'More than 1 ending point in the trip!')
if not trip_end_keys:
raise ValueError(u'No ending point in the trip!')
trip_start_key = trip_start_keys[0]
trip_end_key = trip_end_keys[0]
# Connecting boarding cards into ordered trip list
trip = [starts_map[trip_start_key]]
current_stop_index = 0
trip_reached_end = False
while not trip_reached_end:
last_stop = trip[current_stop_index]
if last_stop.end_key == trip_end_key:
trip_reached_end = True
else:
trip.append(starts_map[last_stop.end_key])
current_stop_index += 1
# building human readable messages from every stop of the trip
directions = [
boarding_card.human_readable_message for boarding_card in trip
]
if TRIP_FINISH_MESSAGE:
directions.append(TRIP_FINISH_MESSAGE)
return directions
|
0986ab2669fa4376aebd28804586a1566544610e
| 19,209 |
def detect_side(start: dict, point: dict, degrees):
"""detect to which side robot should rotate"""
if start['lat'] < point['lat'] and start['lng'] < point['lng']:
return f'{degrees} degrees right'
elif start['lat'] < point['lat'] and start['lng'] > point['lng']:
return f'{degrees} degrees left'
elif start['lat'] > point['lat'] and start['lng'] < point['lng']:
return f'{degrees + 90} degrees right'
elif start['lat'] > point['lat'] and start['lng'] > point['lng']:
return f'{degrees + 90} degrees left'
elif degrees == 0:
return f'{0} degress'
elif degrees == 180:
return f'{180} degrees right'
elif start['lat'] == point['lat'] and start['lng'] < point['lng']:
return f'{degrees} degress right'
elif start['lat'] == point['lat'] and start['lng'] > point['lng']:
return f'{degrees} degress left'
|
124833bbdcdf36c280cdde8e829f15ae5301e323
| 19,210 |
import sys
def ScanSlnFile(filename):
"""Scan a Visual Studio .sln and extract the project dependencies."""
try:
sln = open(filename, "r")
except IOError:
sys.stderr.write("Unable to open " + filename + " for reading.\n")
return 1
projects = {}
project = None
while 1:
line = sln.readline().strip()
if not line:
break
if line.startswith('Project("{'):
# Project definition line looks like
# Project("$TypeGuid") = "$ProjectName", "$ProjectPath", "$ProjectGuid"$
items = line.split('"')
project = Project()
project.name = items[3]
project.path = items[5]
project.guid = items[7]
project.type = items[1]
projects[items[7]] = project
# Start of a dependency group.
if line == "ProjectSection(ProjectDependencies) = postProject":
line = sln.readline().strip()
# End of a dependency group.
while line and line != "EndProjectSection":
project.deps.append(line[:len(project.guid)])
line = sln.readline().strip()
# We are done parsing.
sln.close()
return projects
|
b3f1d4b54027d6eb1aa6b0a3358e9afdb3e8248b
| 19,211 |
import pathlib
import sh
def iterate_log_lines(file_path:pathlib.Path, n:int = 0, **kwargs):
"""Reads the file in line by line
dev note: One of the best featuers of this functions is we can use efficient
unix style operations. Because we know we are inside of a unix container
there should be no problem relying on GNU tail directly.
"""
abs_path = file_path.absolute()
def get_tail_iter(replay=0):
return sh.tail("-n", replay, "-f", str(abs_path), _iter=True)
tail_itr = get_tail_iter(replay=n)
while True:
try:
for line in tail_itr:
yield line.strip()
except KeyboardInterrupt as err:
raise err
except Exception as err:
log.error(err)
log.warning("continuing tail of file")
tail_itr = get_tail_iter(replay=0)
|
54684fcc7a41b623321534202ee250e7c46760d2
| 19,212 |
import torch
def moving_sum(x, start_idx: int, end_idx: int):
"""
From MONOTONIC CHUNKWISE ATTENTION
https://arxiv.org/pdf/1712.05382.pdf
Equation (18)
x = [x_1, x_2, ..., x_N]
MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m
for n in {1, 2, 3, ..., N}
x : src_len, batch_size
start_idx : start idx
end_idx : end idx
Example
src_len = 5
batch_size = 3
x =
[[ 0, 5, 10],
[ 1, 6, 11],
[ 2, 7, 12],
[ 3, 8, 13],
[ 4, 9, 14]]
MovingSum(x, 3, 1) =
[[ 0, 5, 10],
[ 1, 11, 21],
[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39]]
MovingSum(x, 1, 3) =
[[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39],
[ 7, 17, 27],
[ 4, 9, 14]]
"""
assert start_idx > 0 and end_idx > 0
assert len(x.size()) == 2
src_len, batch_size = x.size()
# batch_size, 1, src_len
x = x.t().unsqueeze(1)
# batch_size, 1, src_len
moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1])
moving_sum = (
torch.nn.functional.conv1d(
x, moving_sum_weight, padding=start_idx + end_idx - 1
)
.squeeze(1)
.t()
)
moving_sum = moving_sum[end_idx:-start_idx]
assert src_len == moving_sum.size(0)
assert batch_size == moving_sum.size(1)
return moving_sum
|
fa3cb672e23fccad75965da2ca10955134167c7e
| 19,213 |
import json
import time
def _wait_for_event(event_name, redis_address, extra_buffer=0):
"""Block until an event has been broadcast.
This is used to synchronize drivers for the multi-node tests.
Args:
event_name: The name of the event to wait for.
redis_address: The address of the Redis server to use for
synchronization.
extra_buffer: An amount of time in seconds to wait after the event.
Returns:
The data that was passed into the corresponding _broadcast_event call.
"""
redis_host, redis_port = redis_address.split(":")
redis_client = redis.StrictRedis(host=redis_host, port=int(redis_port))
while True:
event_infos = redis_client.lrange(EVENT_KEY, 0, -1)
events = {}
for event_info in event_infos:
name, data = json.loads(event_info)
if name in events:
raise Exception("The same event {} was broadcast twice."
.format(name))
events[name] = data
if event_name in events:
# Potentially sleep a little longer and then return the event data.
time.sleep(extra_buffer)
return events[event_name]
time.sleep(0.1)
|
0aa10f52e1682dd9d1cdc0949d245da26c26bcd4
| 19,214 |
def _stack_exists(stack_name):
""" Checks if the stack exists.
Returns True if it exists and False if not.
"""
cf = boto3.client('cloudformation')
exists = False
try:
cf.describe_stacks(StackName=stack_name)
exists = True
except botocore.exceptions.ClientError as ex:
if ex.response['Error']['Code'] == 'ValidationError':
exists = False
else:
raise
return exists
|
5ddc6c17342e3c03317d5da0bf8b4d0a338a8f21
| 19,215 |
def make_sequence_output(detections, classes):
"""
Create the output object for an entire sequence
:param detections: A list of lists of detections. Must contain an entry for each image in the sequence
:param classes: The list of classes in the order they appear in the label probabilities
:return:
"""
return {
'detections': detections,
'classes': classes
}
|
019d3b74699af20a9f3cbc43b575e8bae5e15946
| 19,216 |
def json_to_dataframe(json, subset=0):
"""Load data from path. The file needs to be a .csv
Returns:\n
Dataframe
"""
# This is to make sure it has the right format when passed to pandas
if type(json) != list:
json = [json]
try:
df = pd.DataFrame(json, [i for i in range(0, len(json))])
except KeyError as identifier:
print("There was an error")
# raise identifier
if subset == 0:
return df
return df.head(subset)
|
151914e3e11759ff74283c303912a0b6842cd213
| 19,217 |
def dms_to_angle(dms):
"""
Get the angle from a tuple of numbers or strings giving its sexagesimal
representation in degrees
@param dms: (degrees, minutes, seconds)
"""
sign = 1
angle_string = dms[0]
if angle_string.startswith('-'):
sign = -1
angle_string = angle_string[1:]
angle_deg = int(angle_string)
angle_min = int(dms[1])
angle_sec = float(dms[2])
if not 0 <= angle_min < 60:
raise VdtAngleError("not a valid value for minutes: " + str(angle_min))
if not 0 <= angle_sec < 60:
raise VdtAngleError("not a valid value for seconds: " + str(angle_sec))
return sign * VAngle((angle_deg, angle_min, angle_sec), unit=u.deg)
|
c56c66093a877aae6474d583da6d1db81ccbc7cd
| 19,218 |
def fix(text):
"""Repairs encoding problems."""
# NOTE(Jonas): This seems to be fixed on the PHP side for now.
# import ftfy
# return ftfy.fix_text(text)
return text
|
7fd97db345a604131f52b272a7dd13ab4f3f9153
| 19,219 |
def generate_labeled_regions(shape, n_regions, rand_gen=None, labels=None,
affine=np.eye(4), dtype=np.int):
"""Generate a 3D volume with labeled regions.
Parameters
----------
shape: tuple
shape of returned array
n_regions: int
number of regions to generate. By default (if "labels" is None),
add a background with value zero.
labels: iterable
labels to use for each zone. If provided, n_regions is unused.
rand_gen: numpy.random.RandomState
random generator to use for generation.
affine: numpy.ndarray
affine of returned image
Returns
-------
regions: nibabel.Nifti1Image
data has shape "shape", containing region labels.
"""
n_voxels = shape[0] * shape[1] * shape[2]
if labels is None:
labels = range(0, n_regions + 1)
n_regions += 1
else:
n_regions = len(labels)
regions = generate_regions_ts(n_voxels, n_regions, rand_gen=rand_gen)
# replace weights with labels
for n, row in zip(labels, regions):
row[row > 0] = n
data = np.zeros(shape, dtype=dtype)
data[np.ones(shape, dtype=np.bool)] = regions.sum(axis=0).T
return nibabel.Nifti1Image(data, affine)
|
501c9bab430558fdc0cf45491498c8e3bcc7d3c4
| 19,220 |
def get_source_item_ids(portal, q=None):
"""
Get ids of hosted feature services that have an associated scene service.
Can pass in portal search function query (q).
Returns ids only for valid source items.
"""
source_item_ids = []
scene_item_ids = get_scene_service_item_ids(portal)
items = portal.search(q=q)
for item in items:
if item['type'] == 'Feature Service':
if '/Hosted/' in item['url']:
if 'Hosted Service' in item['typeKeywords']:
# if the service has been published the item
# will have 'Hosted Service' in typeKeywords
# Check if the feature service has an associated
# scene service
feat_service_name = item['url'].split('/')[-2]
for scene_id in scene_item_ids:
scene_service_name = portal.item(scene_id)['url'].split('/')[-2]
if feat_service_name == scene_service_name:
if item['id'] not in source_item_ids:
source_item_ids.append(item['id'])
return source_item_ids
|
448ac2d94fda4dc3c69bd8fe9eb00587a0f0dcb2
| 19,221 |
def rasterize_poly(poly_xy, shape):
"""
Args:
poly_xy: [(x1, y1), (x2, y2), ...]
Returns a bool array containing True for pixels inside the polygon
"""
_poly = poly_xy[:-1]
# PIL wants *EXACTLY* a list of tuple (NOT a numpy array)
_poly = [tuple(p) for p in _poly]
img = Image.new('L', (shape[1], shape[0]), 0)
ImageDraw.Draw(img).polygon(_poly, outline=0, fill=1)
return np.array(img) == 1
|
d1abf5cef5a1fb57286ff38d575a575a679a4002
| 19,222 |
def from_url_representation(url_rep: str) -> str:
"""Reconvert url representation of path to actual path"""
return url_rep.replace("__", "/").replace("-_-", "_")
|
5cf4e1e8cb284c66449807ea275e4fa6b5a3e3ad
| 19,223 |
from unittest.mock import patch
async def test_async_start_from_history_and_switch_to_watching_state_changes_multiple(
hass,
recorder_mock,
):
"""Test we startup from history and switch to watching state changes."""
hass.config.set_time_zone("UTC")
utcnow = dt_util.utcnow()
start_time = utcnow.replace(hour=0, minute=0, second=0, microsecond=0)
# Start t0 t1 t2 Startup End
# |--20min--|--20min--|--10min--|--10min--|---------30min---------|---15min--|---15min--|
# |---on----|---on----|---on----|---on----|----------on-----------|---off----|----on----|
def _fake_states(*args, **kwargs):
return {
"binary_sensor.state": [
ha.State(
"binary_sensor.state",
"on",
last_changed=start_time,
last_updated=start_time,
),
]
}
with patch(
"homeassistant.components.recorder.history.state_changes_during_period",
_fake_states,
):
with freeze_time(start_time):
await async_setup_component(
hass,
"sensor",
{
"sensor": [
{
"platform": "history_stats",
"entity_id": "binary_sensor.state",
"name": "sensor1",
"state": "on",
"start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}",
"duration": {"hours": 2},
"type": "time",
},
{
"platform": "history_stats",
"entity_id": "binary_sensor.state",
"name": "sensor2",
"state": "on",
"start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}",
"duration": {"hours": 2},
"type": "time",
},
{
"platform": "history_stats",
"entity_id": "binary_sensor.state",
"name": "sensor3",
"state": "on",
"start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}",
"duration": {"hours": 2},
"type": "count",
},
{
"platform": "history_stats",
"entity_id": "binary_sensor.state",
"name": "sensor4",
"state": "on",
"start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}",
"duration": {"hours": 2},
"type": "ratio",
},
]
},
)
await hass.async_block_till_done()
for i in range(1, 5):
await async_update_entity(hass, f"sensor.sensor{i}")
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "0.0"
assert hass.states.get("sensor.sensor2").state == "0.0"
assert hass.states.get("sensor.sensor3").state == "0"
assert hass.states.get("sensor.sensor4").state == "0.0"
one_hour_in = start_time + timedelta(minutes=60)
with freeze_time(one_hour_in):
async_fire_time_changed(hass, one_hour_in)
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "1.0"
assert hass.states.get("sensor.sensor2").state == "1.0"
assert hass.states.get("sensor.sensor3").state == "0"
assert hass.states.get("sensor.sensor4").state == "50.0"
turn_off_time = start_time + timedelta(minutes=90)
with freeze_time(turn_off_time):
hass.states.async_set("binary_sensor.state", "off")
await hass.async_block_till_done()
async_fire_time_changed(hass, turn_off_time)
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "1.5"
assert hass.states.get("sensor.sensor2").state == "1.5"
assert hass.states.get("sensor.sensor3").state == "0"
assert hass.states.get("sensor.sensor4").state == "75.0"
turn_back_on_time = start_time + timedelta(minutes=105)
with freeze_time(turn_back_on_time):
async_fire_time_changed(hass, turn_back_on_time)
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "1.5"
assert hass.states.get("sensor.sensor2").state == "1.5"
assert hass.states.get("sensor.sensor3").state == "0"
assert hass.states.get("sensor.sensor4").state == "75.0"
with freeze_time(turn_back_on_time):
hass.states.async_set("binary_sensor.state", "on")
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "1.5"
assert hass.states.get("sensor.sensor2").state == "1.5"
assert hass.states.get("sensor.sensor3").state == "1"
assert hass.states.get("sensor.sensor4").state == "75.0"
end_time = start_time + timedelta(minutes=120)
with freeze_time(end_time):
async_fire_time_changed(hass, end_time)
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "1.75"
assert hass.states.get("sensor.sensor2").state == "1.75"
assert hass.states.get("sensor.sensor3").state == "1"
assert hass.states.get("sensor.sensor4").state == "87.5"
|
6fb66dde3fad24fbccffb0f8ce74e666e3551e56
| 19,224 |
def runningmean(data, nav):
"""
Compute the running mean of a 1-dimenional array.
Args:
data: Input data of shape (N, )
nav: Number of points over which the data will be averaged
Returns:
Array of shape (N-(nav-1), )
"""
return np.convolve(data, np.ones((nav,)) / nav, mode='valid')
|
8ba55de399d8789a43624582ac14f2f4804668ef
| 19,225 |
def test_space(gym_space, expected_size, expected_min, expected_max):
"""Test that an action or observation space is the correct size and bounds.
Parameters
----------
gym_space : gym.spaces.Box
gym space object to be tested
expected_size : int
expected size
expected_min : float or array_like
expected minimum value(s)
expected_max : float or array_like
expected maximum value(s)
Returns
-------
bool
True if the test passed, False otherwise
"""
return gym_space.shape[0] == expected_size \
and all(gym_space.high == expected_max) \
and all(gym_space.low == expected_min)
|
e43e2e4d064bec033e6cef6f9c1c905b13541cc7
| 19,226 |
from typing import Optional
from typing import List
def multiindex_strategy(
pandera_dtype: Optional[DataType] = None,
strategy: Optional[SearchStrategy] = None,
*,
indexes: Optional[List] = None,
size: Optional[int] = None,
):
"""Strategy to generate a pandas MultiIndex object.
:param pandera_dtype: :class:`pandera.dtypes.DataType` instance.
:param strategy: an optional hypothesis strategy. If specified, the
pandas dtype strategy will be chained onto this strategy.
:param indexes: a list of :class:`~pandera.schema_components.Index`
objects.
:param size: number of elements in the Series.
:returns: ``hypothesis`` strategy.
"""
# pylint: disable=unnecessary-lambda
if strategy:
raise BaseStrategyOnlyError(
"The dataframe strategy is a base strategy. You cannot specify "
"the strategy argument to chain it to a parent strategy."
)
indexes = [] if indexes is None else indexes
index_dtypes = {
index.name if index.name is not None else i: str(index.dtype)
for i, index in enumerate(indexes)
}
nullable_index = {
index.name if index.name is not None else i: index.nullable
for i, index in enumerate(indexes)
}
strategy = pdst.data_frames(
[index.strategy_component() for index in indexes],
index=pdst.range_indexes(
min_size=0 if size is None else size, max_size=size
),
).map(lambda x: x.astype(index_dtypes))
# this is a hack to convert np.str_ data values into native python str.
for name, dtype in index_dtypes.items():
if dtype in {"object", "str"} or dtype.startswith("string"):
# pylint: disable=cell-var-from-loop,undefined-loop-variable
strategy = strategy.map(
lambda df: df.assign(**{name: df[name].map(str)})
)
if any(nullable_index.values()):
strategy = null_dataframe_masks(strategy, nullable_index)
return strategy.map(pd.MultiIndex.from_frame)
|
580a312790d7ff5d9c5f5309f3100e4ebd490f7e
| 19,227 |
def pitch_from_centers(X, Y):
"""Spot pitch in X and Y direction estimated from spot centers (X, Y).
"""
assert X.shape == Y.shape
assert X.size > 1
nspots_y, nspots_x = X.shape
if nspots_x > 1 and nspots_y == 1:
pitch_x = pitch_y = np.mean(np.diff(X, axis=1))
elif nspots_y > 1 and nspots_x == 1:
pitch_x = pitch_y = np.mean(np.diff(Y, axis=0))
else:
# both nspots_x and nspots_y are > 1
pitch_x = np.mean(np.diff(X, axis=1))
pitch_y = np.mean(np.diff(Y, axis=0))
return pitch_x, pitch_y
|
c9816d3bee4d658a3b00769f26f22b8c0cd0fd10
| 19,228 |
def _create_lists(config, results, current, stack, inside_cartesian=None):
"""
An ugly recursive method to transform config dict
into a tree of AbstractNestedList.
"""
# Have we done it already?
try:
return results[current]
except KeyError:
pass
# Check recursion depth and detect loops
if current in stack:
raise ConfigurationError('Rule {!r} is recursive: {!r}'.format(stack[0], stack))
if len(stack) > 99:
raise ConfigurationError('Rule {!r} is too deep'.format(stack[0]))
# Track recursion depth
stack.append(current)
try:
# Check what kind of list we have
listdef = config[current]
list_type = listdef[_CONF.FIELD.TYPE]
# 1. List of words
if list_type == _CONF.TYPE.WORDS:
results[current] = WordList(listdef['words'])
# List of phrases
elif list_type == _CONF.TYPE.PHRASES:
results[current] = PhraseList(listdef['phrases'])
# 2. Simple list of lists
elif list_type == _CONF.TYPE.NESTED:
results[current] = NestedList([_create_lists(config, results, x, stack,
inside_cartesian=inside_cartesian)
for x in listdef[_CONF.FIELD.LISTS]])
# 3. Cartesian list of lists
elif list_type == _CONF.TYPE.CARTESIAN:
if inside_cartesian is not None:
raise ConfigurationError("Cartesian list {!r} contains another Cartesian list "
"{!r}. Nested Cartesian lists are not allowed."
.format(inside_cartesian, current))
results[current] = CartesianList([_create_lists(config, results, x, stack,
inside_cartesian=current)
for x in listdef[_CONF.FIELD.LISTS]])
# 4. Scalar
elif list_type == _CONF.TYPE.CONST:
results[current] = Scalar(listdef[_CONF.FIELD.VALUE])
# Unknown type
else:
raise InitializationError("Unknown list type: {!r}".format(list_type))
# Return the result
return results[current]
finally:
stack.pop()
|
ef9a51023a44ae1cdbfbadbc762a0ffcd1959562
| 19,229 |
def encode(value):
"""
Encode strings in UTF-8.
:param value: value to be encoded in UTF-8
:return: encoded value
"""
return str(u''.join(value).encode('utf-8'))
|
697f99f028d4b978b591d006273b9d5f688711f3
| 19,230 |
def get_season(months, str_='{}'):
"""
Creates a season string.
Parameters:
- months (list of int)
- str_ (str, optional): Formatter string, should contain exactly one {}
at the position where the season substring is included.
Returns:
str
"""
if months is None:
return ''
elif len(set(months).difference([1, 2, 12])) == 0:
return str_.format('DJF')
elif len(set(months).difference([3, 4, 5])) == 0:
return str_.format('MAM')
elif len(set(months).difference([6, 7, 8])) == 0:
return str_.format('JJA')
elif len(set(months).difference([9, 10, 11])) == 0:
return str_.format('SON')
elif len(set(months).difference([11, 12, 1, 2, 3])) == 0:
return str_.format('NDJFM')
elif len(set(months).difference([5, 6, 7, 8, 9])) == 0:
return str_.format('MJJAS')
else:
return str_.format('-'.join(map(str, months)))
|
73b4e8169f08ef286a0b57779d22c3436538fc30
| 19,231 |
def data_availability(tags):
"""
get availability based on the validation tags
Args:
tags (pandas.DataFrame): errors tagged as true (see function data_validation)
Returns:
pandas.Series: availability
"""
return ~tags.any(axis=1)
|
240bed8f169d23610f11c214d3644f02e5435412
| 19,232 |
async def fetch_image_by_id(
image_uid: str
):
"""
API request to return a single image by uid
"""
image_uid = int(image_uid)
image = utils_com.get_com_image_by_uid(image_uid)
return image
|
153d24fd35ce18ae9c94d1c7ecf797154bc32c0f
| 19,233 |
from datetime import datetime
def get_spring_break(soup_lst, year):
"""
Purpose:
* returns a list of the weekdays during spring break
* only relevant for spring semesters
"""
spring_break_week = set()
# search for the "Spring Break begins after last class." text
for i in range(len(soup_lst)):
if soup_lst[i] == "Spring Break begins after last class.":
pre_friday = datetime.strptime(
soup_lst[i - 1] + " " + year, "%B %d %Y")
break
next_day = pre_friday + timedelta(1)
while next_day.weekday() != 4:
if next_day.weekday() != 5 and next_day.weekday() != 6:
spring_break_week.add(next_day)
next_day += timedelta(1)
spring_break_week.add(next_day)
return spring_break_week
|
cfd80d12da8a22a26d66f4f64f6f8511ed7238a4
| 19,234 |
def GetProQ3Option(query_para):#{{{
"""Return the proq3opt in list
"""
yes_or_no_opt = {}
for item in ['isDeepLearning', 'isRepack', 'isKeepFiles']:
if query_para[item]:
yes_or_no_opt[item] = "yes"
else:
yes_or_no_opt[item] = "no"
proq3opt = [
"-r", yes_or_no_opt['isRepack'],
"-deep", yes_or_no_opt['isDeepLearning'],
"-k", yes_or_no_opt['isKeepFiles'],
"-quality", query_para['method_quality'],
"-output_pdbs", "yes" #always output PDB file (with proq3 written at the B-factor column)
]
if 'targetlength' in query_para:
proq3opt += ["-t", str(query_para['targetlength'])]
return proq3opt
|
e2fe6ba97aa96d01a19a191aabcc3e793a63c490
| 19,235 |
def is_empty_config(host):
"""
Check if any services should to be configured to run on the given host.
"""
return host.AS is None
|
c4ec3861c497ac49ed69ecd1d6da31ab8fe2829c
| 19,236 |
def total_value(metric):
"""Given a time series of values, sum the values"""
total = 0
for i in metric:
total += i
return total
|
4454bfaeb0797bc03b14819bde48dc8f5accc4d3
| 19,237 |
import os
import tqdm
import json
def unpackJSON(target_naming_scheme, chemdf_dict):
"""
most granular data for each row of the final CSV is the well information.
Each well will need all associated information of chemicals, run, etc.
Unpack those values first and then copy the generated array to each of the invidual wells
developed enough now that it should be broken up into smaller pieces!
Parameters
----------
target_naming_scheme : target folder for storing the run and associated data.
chemdf_dict : dict of pandas.DataFrames assembled from all lab inventories
reads in all of the chemical inventories which describe the chemical
content from each lab used across the dataset construction
Return
------
concat_df_raw : pd.DataFrame, all of the raw values from the processed JSON files
Notes: unlike previous version, no additional calculations are performed,
just parsing the files
"""
concat_df = pd.DataFrame()
concat_df_raw = pd.DataFrame()
json_list = []
for my_exp_json in sorted(os.listdir(target_naming_scheme)):
if my_exp_json.endswith(".json"):
json_list.append(my_exp_json)
for my_exp_json in tqdm(json_list):
modlog.info('(3/4) Unpacking %s' %my_exp_json)
concat_df = pd.DataFrame()
#appends each run to the original dataframe
json_fname = (os.path.join(target_naming_scheme, my_exp_json))
experiment_dict = json.load(open(json_fname, 'r'))
modlog.info('Parsing %s to 2d dataframe' %json_fname)
tray_df = parser.tray_parser(experiment_dict) #generates the tray level dataframe
concat_df = pd.concat([concat_df,tray_df], ignore_index=True, sort=True)
#generates a well level unique ID and aligns
runID_df=pd.DataFrame(data=[concat_df['_raw_jobserial'] + '_' + concat_df['_raw_vialsite']]).transpose()
runID_df.columns=['runid_vial']
#combines all operations into a final dataframe for the entire tray level view with all information
concat_df = pd.concat([concat_df, runID_df], sort=True, axis=1)
#Combines the most recent dataframe with the final dataframe which is targeted for export
concat_df_raw = pd.concat([concat_df_raw,concat_df], sort=True)
return(concat_df_raw)
|
096f1e6619d44b9d4d19c7b96664561070ca264b
| 19,238 |
import re
def validate_email_add(email_str):
"""Validates the email string"""
email = extract_email_id(email_str)
return re.match("[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", email.lower())
|
0f77f223b208471a960e2829efb12a85f82b1381
| 19,239 |
def get_seed_nodes_json(json_node: dict, seed_nodes_control: dict or list) -> dict:
""" We need to seed some json sections for extract_fields.
This seeds those nodes as needed. """
seed_json_output = {}
if isinstance(seed_nodes_control, dict) or isinstance(seed_nodes_control, list):
for node in seed_nodes_control:
for key, value in node.items():
if value in json_node:
seed_json_output[key] = json_node[value]
return seed_json_output
|
f3672ee019ff4bb72f25582daf5c83fa7c8f72d0
| 19,240 |
def load_object(import_path):
"""
Loads an object from an 'import_path', like in MIDDLEWARE_CLASSES and the
likes.
Import paths should be: "mypackage.mymodule.MyObject". It then imports the
module up until the last dot and tries to get the attribute after that dot
from the imported module.
If the import path does not contain any dots, a TypeError is raised.
If the module cannot be imported, an ImportError is raised.
If the attribute does not exist in the module, a AttributeError is raised.
"""
if not isinstance(import_path, basestring):
return import_path
if '.' not in import_path:
raise TypeError(
"'import_path' argument to 'django_load.core.load_object' " +\
"must contain at least one dot.")
module_name, object_name = import_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, object_name)
|
5fd45ee31a440cbdd4c90e875e04f4f8f1856b3a
| 19,241 |
def _inufft(kspace,
trajectory,
sensitivities=None,
image_shape=None,
tol=1e-5,
max_iter=10,
return_cg_state=False,
multicoil=None,
combine_coils=True):
"""MR image reconstruction using iterative inverse NUFFT.
For the parameters, see `tfmr.reconstruct`.
"""
kspace = tf.convert_to_tensor(kspace)
trajectory = tf.convert_to_tensor(trajectory)
if sensitivities is not None:
sensitivities = tf.convert_to_tensor(sensitivities)
# Infer rank from number of dimensions in trajectory.
rank = trajectory.shape[-1]
if rank > 3:
raise ValueError(
f"Can only reconstruct images up to rank 3, but `trajectory` implies "
f"rank {rank}.")
# Check inputs and set defaults.
if image_shape is None:
# `image_shape` is required.
raise ValueError("Argument `image_shape` must be provided for NUFFT.")
image_shape = tf.TensorShape(image_shape)
image_shape.assert_has_rank(rank)
if multicoil is None:
# `multicoil` defaults to True if sensitivities were passed; False
# otherwise.
multicoil = sensitivities is not None
batch_shape = tf.shape(kspace)[:-1]
# Set up system operator and right hand side.
linop_nufft = linalg_ops.LinearOperatorNUFFT(image_shape, trajectory)
operator = tf.linalg.LinearOperatorComposition(
[linop_nufft.H, linop_nufft],
is_self_adjoint=True, is_positive_definite=True)
# Compute right hand side.
rhs = tf.linalg.matvec(linop_nufft.H, kspace)
# Solve linear system using conjugate gradient iteration.
result = linalg_ops.conjugate_gradient(operator, rhs, x=None,
tol=tol, max_iter=max_iter)
# Restore image shape.
image = tf.reshape(result.x, tf.concat([batch_shape, image_shape], 0))
# Do coil combination.
if multicoil and combine_coils:
image = coil_ops.combine_coils(image, maps=sensitivities, coil_axis=-rank-1)
return (image, result) if return_cg_state else image
|
892350c74ca0b7163b4aec9278af30dd770b5e1e
| 19,242 |
def adjust_cart(request, item_id):
"""Adjust the quantity of the specified product to the specified amount"""
album = get_object_or_404(Album, pk=item_id)
# Returns 404 if an invalid quantity is entered
try:
quantity = int(request.POST.get("quantity"))
except Exception as e:
return HttpResponse(status=404)
cart = request.session.get("cart", {})
# Updates quantity of existing cart item or removes from cart if quantity < 0
if quantity > 0:
cart[item_id] = quantity
messages.success(request, f"Updated {album.name} quantity to {cart[item_id]}")
else:
cart.pop(item_id)
messages.success(request, f"Removed {album.name} from your cart.")
request.session["cart"] = cart
return redirect(reverse("view_cart"))
|
ed455747341cf581725d2fae326292155a3b77a8
| 19,243 |
def calculate_delta_v(scouseobject, momone, momnine):
"""
Calculate the difference between the moment one and the velocity of the
channel containing the peak flux
Parameters
----------
scouseobject : instance of the scousepy class
momone : ndarray
moment one (intensity-weighted average velocity) map
momnine : ndarray
map containing the velocities of channels containing the peak flux at
each location
"""
# Generate an empty array
delta_v = np.empty(np.shape(momone))
delta_v.fill(np.nan)
delta_v = np.abs(momone.value-momnine.value)
return delta_v
|
a894eac64f5b88fd6230eb060583fd15552bc8d8
| 19,244 |
def _validate_image(values):
"""
Validates the incoming data and raises a Invalid exception
if anything is out of order.
:param values: Mapping of image metadata to check
"""
status = values.get('status', None)
if not status:
msg = "Image status is required."
raise exception.Invalid(msg)
if status not in STATUSES:
msg = "Invalid image status '%s' for image." % status
raise exception.Invalid(msg)
return values
|
d0ebb8ecbde452c3128e93e917482cff13e47947
| 19,245 |
def revcmp(x, y):
"""Does the reverse of cmp():
Return negative if y<x, zero if y==x, positive if y>x"""
return cmp(y, x)
|
52e5382211379d09703996b0da89821a9521de73
| 19,246 |
def linear_regression(data: pd.DataFrame):
"""
https://www.statsmodels.org/
:param data: 数据集中要包含收盘价Close
:return: 拟合的y,k,b以及k转化的角度
"""
y_arr = data.Close.values
x_arr = np.arange(0, len(y_arr))
b_arr = sm.add_constant(x_arr)
model = regression.linear_model.OLS(y_arr, b_arr).fit()
b, k = model.params # y = kx + b : params[1] = k
y_fit = x_arr * k + b
return y_fit, k, b, np.rad2deg(k)
|
9b30a6d90ed1e0131e12b2f7944eb58a90676ad3
| 19,247 |
from datetime import datetime
from operator import and_
def get_expiry():
"""
Returns the membership IDs of memberships expiring within 'time_frame' amount of MONTHS
"""
time_frame = request.args.get('time_frame')
try:
time_frame = int(time_frame)
except ValueError as e:
print(e)
return jsonify({
'code': 400,
'error': 'Not valid monthly time frame, should only be int'
})
expiring_members = []
session = Session()
now = datetime.date.today()
relativeMonths = now - relativedelta(months=time_frame)
memberShooterTable = session.query(Member, Shooter) \
.join(Shooter) \
.filter(and_(Member.endDate > relativeMonths, Member.status != Status.EXPIRED))
print("Memberships expiring with " + str(time_frame) + " months")
for row in memberShooterTable:
print(row)
print(row.Member.email)
print(row.Shooter.name)
returnMember = {'name': row.Shooter.name,
'mid': row.Member.mid,
'email': row.Member.email,
'endDate': row.Member.endDate}
expiring_members.append(returnMember)
return jsonify({
'code': 200,
'table': 'Expiring Members',
'entries': expiring_members
})
|
4fd13a5e2de1feb4b797c225c349031a903f2673
| 19,248 |
def get_functions(input_file):
"""Alias for load_data bellow."""
return load_data(input_file)
|
7f286809a3c27db32e0aeb3f08d41989a7b3fad2
| 19,249 |
def is_elem_ref(elem_ref):
"""
Returns true if the elem_ref is an element reference
:param elem_ref:
:return:
"""
return (
elem_ref
and isinstance(elem_ref, tuple)
and len(elem_ref) == 3
and (elem_ref[0] == ElemRefObj or elem_ref[0] == ElemRefArr)
)
|
282a5ba04b2cafedd5a043bf83b4ccbd6196ae44
| 19,250 |
from typing import Tuple
from typing import List
def analyse_subcommand(
analyser: Analyser,
param: Subcommand
) -> Tuple[str, SubcommandResult]:
"""
分析 Subcommand 部分
Args:
analyser: 使用的分析器
param: 目标Subcommand
"""
if param.requires:
if analyser.sentences != param.requires:
raise ParamsUnmatched(f"{param.name}'s required is not '{' '.join(analyser.sentences)}'")
analyser.sentences = []
if param.is_compact:
name, _ = analyser.next_data()
if name.startswith(param.name):
analyser.reduce_data(name.lstrip(param.name), replace=True)
else:
raise ParamsUnmatched(f"{name} dose not matched with {param.name}")
else:
name, _ = analyser.next_data(param.separators)
if name != param.name: # 先匹配选项名称
raise ParamsUnmatched(f"{name} dose not matched with {param.name}")
name = param.dest
res: SubcommandResult = {"value": None, "args": {}, 'options': {}}
if param.sub_part_len.stop == 0:
res['value'] = Ellipsis
return name, res
args = False
subcommand = res['options']
need_args = param.nargs > 0
for _ in param.sub_part_len:
sub_param = analyse_params(analyser, param.sub_params) # type: ignore
if sub_param and isinstance(sub_param, List):
for p in sub_param:
_current_index = analyser.current_index
_content_index = analyser.content_index
try:
subcommand.setdefault(*analyse_option(analyser, p))
break
except Exception as e:
exc = e
analyser.current_index = _current_index
analyser.content_index = _content_index
continue
else:
raise exc # type: ignore # noqa
elif not args:
res['args'] = analyse_args(analyser, param.args, param.nargs)
args = True
if need_args and not args:
raise ArgumentMissing(config.lang.subcommand_args_missing.format(name=name))
return name, res
|
d3be0a7709ae2ebfab414d30494fe7baeba5de8d
| 19,251 |
def fetch_pg_types(columns_info, trans_obj):
"""
This method is used to fetch the pg types, which is required
to map the data type comes as a result of the query.
Args:
columns_info:
"""
# get the default connection as current connection attached to trans id
# holds the cursor which has query result so we cannot use that connection
# to execute another query otherwise we'll lose query result.
manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(trans_obj.sid)
default_conn = manager.connection(did=trans_obj.did)
# Connect to the Server if not connected.
res = []
if not default_conn.connected():
status, msg = default_conn.connect()
if not status:
return status, msg
oids = [columns_info[col]['type_code'] for col in columns_info]
if oids:
status, res = default_conn.execute_dict(
u"""SELECT oid, format_type(oid,null) as typname FROM pg_type WHERE oid IN %s ORDER BY oid;
""", [tuple(oids)])
if not status:
return False, res
return status, res['rows']
else:
return True, []
|
87bdc81134ee4d83ffbce05a77abec555b55a661
| 19,252 |
def open_popup(text) -> bool:
""" Opens popup when it's text is updated """
if text is not None:
return True
return False
|
8ced6b6e73531f97df8ac7fe38723438077ca6d1
| 19,253 |
def se_beta_formatter(value: str) -> str:
"""
SE Beta formatter.
This formats SE beta values. A valid SE beta values
is a positive float.
@param value:
@return:
"""
try:
se_beta = float(value)
if se_beta >= 0:
result = str(se_beta)
else:
raise ValueError(f'position expected positive float "{value}"')
except ValueError as value_error:
raise ValueError(
f'position could not be parsed as integer "{value}" details : {value_error}',
) from value_error
return result
|
30dde489e1a8a70c0f1093caa1ce289c759b26d6
| 19,254 |
from typing import Optional
def replace_missing_data(
data: pd.DataFrame,
target_col: str,
source_col: str,
dropna: Optional[bool] = False,
inplace: Optional[bool] = False,
) -> Optional[pd.DataFrame]:
"""Replace missing data in one column by data from another column.
Parameters
----------
data : :class:`~pandas.DataFrame`
input data with values to replace
target_col : str
target column, i.e., column in which missing values should be replaced
source_col : str
source column, i.e., column values used to replace missing values in ``target_col``
dropna : bool, optional
whether to drop rows with missing values in ``target_col`` or not. Default: ``False``
inplace : bool, optional
whether to perform the operation inplace or not. Default: ``False``
Returns
-------
:class:`~pandas.DataFrame` or ``None``
dataframe with replaced missing values or ``None`` if ``inplace`` is ``True``
"""
_assert_is_dtype(data, pd.DataFrame)
if not inplace:
data = data.copy()
data[target_col].fillna(data[source_col], inplace=True)
if dropna:
data.dropna(subset=[target_col], inplace=True)
if inplace:
return None
return data
|
a94e41cb88bcf502192855276ed1f11f73b1c3a1
| 19,255 |
def jsonpath_parse(data, jsonpath, match_all=False):
"""Parse value in the data for the given ``jsonpath``.
Retrieve the nested entry corresponding to ``data[jsonpath]``. For
example, a ``jsonpath`` of ".foo.bar.baz" means that the data section
should conform to:
.. code-block:: yaml
---
foo:
bar:
baz: <data_to_be_extracted_here>
:param data: The `data` section of a document.
:param jsonpath: A multi-part key that references a nested path in
``data``.
:param match_all: Whether to return all matches or just the first one.
:returns: Entry that corresponds to ``data[jsonpath]`` if present,
else None.
Example::
src_name = sub['src']['name']
src_path = sub['src']['path']
src_doc = db_api.document_get(schema=src_schema, name=src_name)
src_secret = utils.jsonpath_parse(src_doc['data'], src_path)
# Do something with the extracted secret from the source document.
"""
jsonpath = _normalize_jsonpath(jsonpath)
p = _jsonpath_parse(jsonpath)
matches = p.find(data)
if matches:
result = [m.value for m in matches]
return result if match_all else result[0]
|
3b5ab89d8315e36f8412e874f393c414c76b8587
| 19,256 |
def extract_urlparam(name, urlparam):
"""
Attempts to extract a url parameter embedded in another URL
parameter.
"""
if urlparam is None:
return None
query = name+'='
if query in urlparam:
split_args = urlparam[urlparam.index(query):].replace(query, '').split('&')
return split_args[0] if split_args else None
else:
return None
|
198771d40eeddc3b7dbf2924d9d49fe7a7f0a51d
| 19,257 |
def get_nlb_data(elb_data, region, load_balancer_name, ssl_hc_path):
"""
Render a dictionary which contains Network Load Balancer attributes
"""
if debug:
logger.debug("Building the Network Load Balancer data structure")
# this is used for building the load balancer spec
nlb_data = {'VpcId': elb_data['LoadBalancerDescriptions'][0]['VPCId'], 'Region': region,
'Nlb_name': elb_data['LoadBalancerDescriptions'][0]['LoadBalancerName'],
'Subnets': elb_data['LoadBalancerDescriptions'][0]['Subnets'],
'Security_groups': elb_data['LoadBalancerDescriptions'][0]['SecurityGroups'],
'Scheme': elb_data['LoadBalancerDescriptions'][0]['Scheme'],
'Tags': elb_data['TagDescriptions'][0]['Tags'],
'listeners': [],
'Type': 'network',
'target_group_attributes': [],
'target_group_arns': []}
# this is used for building the listeners specs
for elb_listener in elb_data['LoadBalancerDescriptions'][0]['ListenerDescriptions']:
listener = {'Protocol': elb_listener['Listener']['Protocol'],
'Port': elb_listener['Listener']['LoadBalancerPort'],
'TargetGroup_Port': elb_listener['Listener']['InstancePort'],
'TargetGroup_Protocol': elb_listener['Listener']['InstanceProtocol']}
targetgroup_attribute = {
'dereg_timeout_seconds_delay': str(elb_data['LoadBalancerAttributes']['ConnectionDraining']['Timeout']),
'TargetGroup_Port': elb_listener['Listener']['InstancePort']
}
nlb_data['listeners'].append(listener)
nlb_data['target_group_attributes'].append(targetgroup_attribute)
# this is used for building the target groups
nlb_data['target_groups'] = []
target_group = {}
# Get health check target
hc_target = elb_data['LoadBalancerDescriptions'][
0]['HealthCheck']['Target']
# Set health check interval
if elb_data['LoadBalancerDescriptions'][0]['HealthCheck']['Interval'] < 15:
print("The minimal supported health check interval is 10. Setting it to 10 seconds")
target_group['HealthCheckIntervalSeconds'] = 10
else:
print("The health check internal is set to 30 seconds")
target_group['HealthCheckIntervalSeconds'] = 30
# Set healthy and unhealthy threshold to the same value which is the
# healthy threshold of Classic Load Balancer
target_group['HealthyThresholdCount'] = elb_data['LoadBalancerDescriptions'][0]['HealthCheck'][
'HealthyThreshold']
target_group['UnhealthyThresholdCount'] = elb_data['LoadBalancerDescriptions'][0]['HealthCheck'][
'HealthyThreshold']
# Set VPC ID
target_group['VpcId'] = elb_data[
'LoadBalancerDescriptions'][0]['VPCId']
# Set health check protocol
target_group['HealthCheckProtocol'] = hc_target.split(':')[0]
# If health check protocol is TCP
if hc_target.split(':')[0] == "TCP":
target_group['HealthCheckPort'] = hc_target.split(':')[1]
# If health check protocol is HTTP or HTTPs
elif hc_target.split(':')[0] == "SSL":
target_group['HealthCheckProtocol'] = "HTTPS"
target_group['HealthCheckPort'] = hc_target.split(':')[1]
target_group['HealthCheckPath'] = ssl_hc_path
else:
target_group['HealthCheckPort'] = hc_target.split(':')[1].split('/')[0]
target_group['HealthCheckPath'] = '/' + hc_target.split('/', 1)[1]
for listener in nlb_data['listeners']:
target_group['Protocol'] = listener['TargetGroup_Protocol']
target_group['Port'] = listener['TargetGroup_Port']
# target group name comes from the first 18 character of the Classic Load Balancer name, \
# "-nlb-tg-" and target group port.
target_group['Name'] = load_balancer_name[: 18] + "-nlb-tg-" + \
str(listener['TargetGroup_Port'])
# Only append unique Target Group
if target_group not in nlb_data['target_groups']:
nlb_data['target_groups'].append(target_group.copy())
# Get registered backend instances
nlb_data['instanceIds'] = []
for instance in elb_data['LoadBalancerDescriptions'][0]['Instances']:
nlb_data['instanceIds'].append(instance['InstanceId'])
if debug:
logger.debug("nlb_data:")
logger.debug(nlb_data)
return nlb_data
|
3cd162176f7e3580f749bfb8af7e8436c4c2dd18
| 19,258 |
from typing import Dict
from typing import Any
def _get_required_var(key: str, data: Dict[str, Any]) -> str:
"""Get a value from a dict coerced to str.
raise RequiredVariableNotPresentException if it does not exist"""
value = data.get(key)
if value is None:
raise RequiredVariableNotPresentException(f"Missing required var {key}")
return str(value)
|
b94db42048779df532a55c2604c7c1b5d02a4f7f
| 19,259 |
def phones():
"""Return a list of phones used in the main dict."""
cmu_phones = []
for line in phones_stream():
parts = line.decode("utf-8").strip().split()
cmu_phones.append((parts[0], parts[1:]))
return cmu_phones
|
972c4c0739cd3c823f98eb6314d25f07b9f720f6
| 19,260 |
def load_specific_forecast(city, provider, date, forecasts):
"""reads in the city, provider, date and forecast_path and returns the data queried from the forecast path
:param city: city for which the weather forecast is for
:type string
:param provider: provider for which the weather forecast is for
:type string
:param date: date for which the weather forecast is for, e.g. '2015-06-29'
:type datetime
:param forecasts: dataframe containing all forecasts
:type pandas dataframe
:return: dataFrame containing relevant dwd data
"""
# get rows with the correct city, provider and date
data_city = forecasts[forecasts['city']==city]
data_provider = data_city[data_city['Provider']==provider]
if provider != 'openweathermap':
# cut the time
data_provider.loc[:, 'Date'] = data_provider.loc[:, 'Date'].map(cut_time, na_action='ignore')
data_provider.loc[:, 'ref_date'] = data_provider.loc[:,'ref_date'].map(cut_time, na_action='ignore')
else:
data_provider.loc[:, 'ref_date'] = data_provider.loc[:,'ref_date'].map(cut_time,na_action='ignore')
data_provider.loc[:, 'Date'] = data_provider.loc[:,'pred_offset'].map(cut_time, na_action='ignore')
data_provider.loc[:, 'pred_offset'] = (data_provider.loc[:,'Date'] - data_provider['ref_date']).\
map(lambda delta: delta/np.timedelta64(1, 'D'), na_action='ignore')
return data_provider[data_provider['Date'] == date]
|
95f00fd07d218f1e19eb6d771898453d2495cb1d
| 19,261 |
from numpy import array, isnan
from mne.channels import Montage
def eeg_to_montage(eeg):
"""Returns an instance of montage from an eeg file"""
pos = array([eeg.info['chs'][i]['loc'][:3]
for i in range(eeg.info['nchan'])])
if not isnan(pos).all():
selection = [i for i in range(eeg.info['nchan'])]
montage = Montage(pos, eeg.info['ch_names'],
selection=selection, kind='custom')
return montage
else:
return None
|
9d0823bc9633ead4081b4b068717c8f9385c3e69
| 19,262 |
def mul_inv2(x:int, k:int) -> int:
""" Computes x*2^{-1} in (Z/3^kZ)*."""
return (inv2(k)*x)%(3**k)
|
5789b4b9837f5b3bf6093aa586fca8f133ff8c51
| 19,263 |
def Be(Subject = P.CA(), Contract=FALSE):
"""Synonym for Agree("be")."""
return Agree("be", Subject, Contract)
|
e6b1f07d17c34157b9b1ca216f4c0e99c5b25c00
| 19,264 |
def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=0.99):
"""
Armijo linesearch function that works with matrices
find an approximate minimum of f(xk+alpha*pk) that satifies the
armijo conditions.
Parameters
----------
f : function
loss function
xk : np.ndarray
initial position
pk : np.ndarray
descent direction
gfk : np.ndarray
gradient of f at xk
old_fval : float
loss value at xk
args : tuple, optional
arguments given to f
c1 : float, optional
c1 const in armijo rule (>0)
alpha0 : float, optional
initial step (>0)
Returns
-------
alpha : float
step that satisfy armijo conditions
fc : int
nb of function call
fa : float
loss value at step alpha
"""
xk = np.atleast_1d(xk)
fc = [0]
def phi(alpha1):
fc[0] += 1
return f(xk + alpha1 * pk, *args)
if old_fval is None:
phi0 = phi(0.)
else:
phi0 = old_fval
derphi0 = np.sum(gfk.T * pk)
alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1, alpha0=alpha0)
return alpha, fc[0], phi1
|
aefbe34ad1b28317e4fc21b1d80beda430183660
| 19,265 |
def lprob2sigma(lprob):
""" translates a log_e(probability) to units of Gaussian sigmas """
if (lprob>-36.):
sigma = norm.ppf(1.-0.5*exp(1.*lprob))
else:
sigma = sqrt( log(2./pi) - 2.*log(8.2) - 2.*lprob )
return float(sigma)
|
b224e9b50fc2a171cbb849965946ccae804648d7
| 19,266 |
def convert_from_fortran_bool(stringbool):
"""
Converts a string in this case ('T', 'F', or 't', 'f') to True or False
:param stringbool: a string ('t', 'f', 'F', 'T')
:return: boolean (either True or False)
"""
true_items = ['True', 't', 'T']
false_items = ['False', 'f', 'F']
if isinstance(stringbool, str):
if stringbool in false_items:
return False
elif stringbool in true_items:
return True
else:
raise ValueError(f"Could not convert: '{stringbool}' to boolean, "
"which is not 'True', 'False', 't', 'T', 'F' or 'f'")
elif isinstance(stringbool, bool):
return stringbool # no conversion needed...
raise TypeError(f"Could not convert: '{stringbool}' to boolean, " 'only accepts str or boolean')
|
b9840c41a978003e8dcc5191bd7f859fc5b0ecb7
| 19,267 |
def gaussian_device(n_subsystems):
"""Number of qubits or modes."""
return DummyDevice(wires=n_subsystems)
|
c2779958009ebe2dd7907a0a5f418535d782f4a0
| 19,268 |
def create_playlist(current_user, user_id):
"""
Creates a playlist.
:param user_id: the ID of the user.
:return: 200, playlist created successfully.
"""
x = user_id
user = session.query(User).filter_by(id=user_id).one()
data = request.get_json()
new_playlist = Playlist(name=data['name'],
description=data['description'],
user_id=x)
db.session.add(new_playlist)
db.session.commit()
return jsonify({ 'message' : 'playlist %s created successfully %name' })
|
6116949956bbc077205adb66bf51b160b7a0d812
| 19,269 |
def gram_matrix(y):
"""
Input shape: b,c,h,w
Output shape: b,c,c
"""
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
|
9ea7595870dccc1375626c374fb9db1436523e40
| 19,270 |
import torch
def process_pair_v2(data, global_labels):
"""
:param path: graph pair data.
:return data: Dictionary with data, also containing processed DGL graphs.
"""
# print('Using v2 process_pair')
edges_1 = data["graph_1"] #diff from v1
edges_2 = data["graph_2"] #diff from v1
edges_1 = np.array(edges_1, dtype=np.int64);
edges_2 = np.array(edges_2, dtype=np.int64);
G_1 = dgl.DGLGraph((edges_1[:,0], edges_1[:,1]));
G_2 = dgl.DGLGraph((edges_2[:,0], edges_2[:,1]));
G_1.add_edges(G_1.nodes(), G_1.nodes()) #diff from v1
G_2.add_edges(G_2.nodes(), G_2.nodes()) #diff from v1
edges_1 = torch.from_numpy(edges_1.T).type(torch.long)
edges_2 = torch.from_numpy(edges_2.T).type(torch.long)
data["edge_index_1"] = edges_1
data["edge_index_2"] = edges_2
features_1, features_2 = [], []
for n in data["labels_1"]:
features_1.append([1.0 if global_labels[n] == i else 0.0 for i in global_labels.values()])
for n in data["labels_2"]:
features_2.append([1.0 if global_labels[n] == i else 0.0 for i in global_labels.values()])
G_1.ndata['features'] = torch.FloatTensor(np.array(features_1));
G_2.ndata['features'] = torch.FloatTensor(np.array(features_2));
G_1.ndata['type'] = np.array(data["labels_1"]);
G_2.ndata['type'] = np.array(data["labels_2"]);
data['G_1'] = G_1;
data['G_2'] = G_2;
norm_ged = data["ged"]/(0.5*(len(data["labels_1"])+len(data["labels_2"])))
data["target"] = torch.from_numpy(np.exp(-norm_ged).reshape(1, 1)).view(-1).float()
return data
|
61e0194f521132cfa4e96db925f566abf6b3b427
| 19,271 |
from typing import Tuple
def calculate_line_changes(diff: Diff) -> Tuple[int, int]:
"""Return a two-tuple (additions, deletions) of a diff."""
additions = 0
deletions = 0
raw_diff = "\n".join(diff.raw_unified_diff())
for line in raw_diff.splitlines():
if line.startswith("+ "):
additions += 1
elif line.startswith("- "):
deletions += 1
return additions, deletions
|
437859735c904a3c7754091c6cb97ba528dc7e72
| 19,272 |
def get_synonyms(token):
""" get synonyms of word using wordnet
args:
token: string
returns:
synonyms: list containing synonyms as strings
"""
synonyms = []
if len(wordnet.synsets(token)) == 0:
return None
for synset in wordnet.synsets(token):
for lemma in synset.lemmas():
synonyms.append(lemma.name())
synonyms = _remove_repeated_elements(synonyms)
synonyms.remove(token)
return synonyms
|
ec26875e694f860c38b709979dfc8328eff17f0f
| 19,273 |
def concat_experiments_on_channel(experiments, channel_name):
"""Combines channel values from experiments into one dataframe.
This function helps to compare channel values from a list of experiments
by combining them in a dataframe. E.g: Say we want to extract the `log_loss`
channel values for a list of experiments. The resulting dataframe will have
['id','x_log_loss','y_log_loss'] columns.
Args:
experiments(list): list of `neptune.experiments.Experiment` objects.
channel_name(str): name of the channel for which we want to extract values.
Returns:
`pandas.DataFrame`: Dataframe of ['id','x_CHANNEL_NAME','y_CHANNEL_NAME']
values concatenated from a list of experiments.
Examples:
Instantiate a session::
from neptune.sessions import Session
session = Session()
Fetch a project and a list of experiments::
project = session.get_projects('neptune-ai')['neptune-ai/Salt-Detection']
experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000)
Construct a channel value dataframe::
from neptunecontrib.api.utils import concat_experiments_on_channel
compare_df = concat_experiments_on_channel(experiments,'unet_0 epoch_val iout loss')
Note:
If an experiment in the list of experiments does not contain the channel with a specified channel_name
it will be omitted.
"""
combined_df = []
for experiment in experiments:
if channel_name in experiment.get_channels().keys():
channel_df = experiment.get_numeric_channels_values(channel_name)
channel_df['id'] = experiment.id
combined_df.append(channel_df)
combined_df = pd.concat(combined_df, axis=0)
return combined_df
|
04c8004ccb1a2b5ec2906bb1183e685b8c8ff763
| 19,274 |
def sghmc_naive_mh_noresample_uni(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula):
"""
This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo
with Metropolis-Hastings correction in unidimensional cases without resampling
procedure.
"""
B = 1/2*epsilon*V
theta = [theta_init]
r = [r_init]
for t in range(nt-1):
epsilon0 = max(epsilon, formula(t))
theta0, r0 = theta[-1], r[-1]
for i in range(m):
theta0 = theta0 + epsilon0*1/M*r0
r0 = r0 - epsilon0*du_hat_func(theta0) + np.random.normal(0, np.sqrt(2*B*epsilon0))
# Metropolis-Hastings correction
u = np.random.uniform()
H1 = u_hat_func(theta0) + 1/2*r0**2*1/M
H2 = u_hat_func(theta[-1]) + 1/2*r[-1]**2*1/M
p = np.exp(H2 - H1)
if u < min(1,p):
theta.append(theta0)
r.append(r0)
return [theta, r]
|
4f330bf3025506bc2bafca0891025ac8b9a4f280
| 19,275 |
def detect_voices(aud, sr=44100):
"""
Detect the presence and absence of voices in an array of audio
Args:
Returns:
"""
pcm_16 = np.round(
(np.iinfo(np.int16).max * aud)).astype(np.int16).tobytes()
voices = [
VAD.is_speech(pcm_16[2 * ix:2 * (ix + SMOOTHING_WSIZE)],
sample_rate=SAMPLING_RATE)
for ix in range(0, len(aud), SMOOTHING_WSIZE)
]
return voices
|
ec987cf5e3384cb20d52d07684f7afb5f38f0e98
| 19,276 |
def process_to_binary_otsu_image(img_path, inverse=False, max_threshold=255):
"""
Purpose:
Process an image to binary colours using binary otsu thresholding.
Args:
img_path - path to the image to process
inverse - if true an inverted binary thresholding will be applied (optional).
max_threshold - the max value to be given if a pixels value is more than the threshold value (optional).
Returns:
binary_image_tuple[0] - optimal threshold value found by otsu threshold.
binary_image_tuple[1] - binary image.
"""
img = cv2.imread(img_path)
gray_img = convert_bgr_to_gray(img)
if inverse:
binary_image_tuple = threshold_binary_inv_otsu(gray_img, max_threshold)
else:
binary_image_tuple = threshold_binary_otsu(gray_img, max_threshold)
return binary_image_tuple
|
f450d29540679f2fa7736e7cd0257a56b58c8a8d
| 19,277 |
import hashlib
import random
def _fold_in_str(rng, data):
"""Folds a string into a jax.random.PRNGKey using its SHA-1 hash."""
m = hashlib.sha1()
m.update(data.encode('utf-8'))
d = m.digest()
hash_int = int.from_bytes(d[:4], byteorder='big', signed=True)
return random.fold_in(rng, hash_int)
|
e0b3d135a9573892cf7f4cfdcea1bc29bbc3e8c0
| 19,278 |
def create_task_dialog(request):
"""called when creating tasks
"""
return data_dialog(request, mode='create', entity_type='Task')
|
6712048914a8417792b0dc8a1ab60c081886d4fa
| 19,279 |
def raw_rearrange(da, pattern, **kwargs):
"""Crudely wrap `einops.rearrange <https://einops.rocks/api/rearrange/>`_.
Wrapper around einops.rearrange with a very similar syntax.
Spaces, parenthesis ``()`` and `->` are not allowed in dimension names.
Parameters
----------
da : xarray.DataArray
Input array
pattern : string
Pattern string. Same syntax as patterns in einops with two
caveats:
* Unless splitting or stacking, you must use the actual dimension names.
* When splitting or stacking you can use `(dim1 dim2)=dim`. This is `necessary`
for the left hand side as it identifies the dimension to split, and
optional on the right hand side, if omitted the stacked dimension will be given
a default name.
kwargs : dict, optional
Passed to :func:`xarray_einstats.einops.rearrange`
Returns
-------
xarray.DataArray
See Also
--------
xarray_einstats.einops.rearrange:
More flexible and powerful wrapper over einops.rearrange. It is also more verbose.
"""
if "->" in pattern:
in_pattern, out_pattern = pattern.split("->")
in_dims = translate_pattern(in_pattern)
else:
out_pattern = pattern
in_dims = None
out_dims = translate_pattern(out_pattern)
return rearrange(da, out_dims=out_dims, in_dims=in_dims, **kwargs)
|
a16c8e439882acba930fa143e9d2428d38d1ca70
| 19,280 |
from typing import Tuple
def get_users() -> Tuple[int, ...]:
"""Count user ids in db."""
db = get_database_connection()
user_searches = db.keys(pattern=f'{DB_SEARCH_PREFIX}*')
user_ids = [
int(user_search.decode('utf-8').lstrip(DB_SEARCH_PREFIX))
for user_search in user_searches
]
return tuple(user_ids)
|
2eaded42444fe4ad5395387ddd45022a9e8736ce
| 19,281 |
from typing import Optional
async def login(
email: str,
password: str,
session: Optional[ClientSession] = None,
*,
conf_update_interval: Optional[timedelta] = None,
device_set_debounce: Optional[timedelta] = None,
):
"""Login using email and password."""
if session:
response = await _do_login(session, email, password, headers=_headers(""))
else:
async with ClientSession() as _session:
response = await _do_login(_session, email, password)
return Client(
response.get("userunits", '0'),
session,
conf_update_interval=conf_update_interval,
device_set_debounce=device_set_debounce,
)
|
274f0785eb0e2fb3b73cfc4c2810df03df52d7b1
| 19,282 |
def rcomp_prediction(system, rcomp, predargs, init_cond):
""" Make a prediction with the given system
Parameters:
system (str): Name of the system to predict
rcomp (ResComp): Trained reservoir computer
predargs (variable length arguments): Passed directly into rcomp.predict
init_cond (dict): Keyword args passed rcomp.predict
Returns:
pre (ndarray): Reservoir computer prediction
"""
if system == "softrobot":
pre = rcomp.predict(*predargs, **init_cond)
else:
pre = rcomp.predict(predargs, **init_cond)
return pre
|
fb4eb3e710335788333a12abcd494015f4784a78
| 19,283 |
def get_best_model(X ,y):
"""Select best model from RandomForestClassifier and AdaBoostClassifier"""
ensembles = [
(RandomForestClassifier, SelectParam({
'estimator': RandomForestClassifier(warm_start=True, random_state=7),
'param_grid': {
'n_estimators': [10, 15, 20],
'criterion': ['gini', 'entropy'],
'max_features': [FEATURE_NUM+n for n in [-4, -2, 0]],
'max_depth': [10, 15],
'bootstrap': [True],
'warm_start': [True],
},
'n_jobs':1
})),
(AdaBoostClassifier, SelectParam({
'estimator': AdaBoostClassifier(random_state=7),
'param_grid': {
'algorithm': ['SAMME', 'SAMME.R'],
'n_estimators': [10, 15, 20],
'learning_rate': [1e-3, 1e-2, 1e-1]
},
'n_jobs': 1
}))
]
best_score = 0
best_model = None
for ensemble, select in ensembles:
param = select.get_param(X, y)
model = ensemble(**param)
score = cross_val_score(model, X, y).mean()
if best_score < score:
best_score = score
best_model = model
return best_model
|
c1ac787d89a0086c263490b12081e0bfe98c6c57
| 19,284 |
def compareDates(dateA: list, dateB: list) -> int:
"""
Compares dateA and dateB\n
returns: 1 if dateA > dateB,\n
-1 if dateA <dateB,
0 if dateA == dateB \n
raise Exception if dates are invalid
"""
if not checkDateValidity(dateA, dateB):
raise invalidDateException('Invalid Dates')
i = 2
while i >= 0:
if dateA[i] < dateB[i]:
return -1
elif dateA[i] > dateB[i]:
return 1
else:
if i == 0:
return 0
i -= 1
|
927af2e0164706e8013badd90638b2561ab74241
| 19,285 |
def renderPage(res, topLevelContext=context.WebContext,
reqFactory=FakeRequest):
"""
Render the given resource. Return a Deferred which fires when it has
rendered.
"""
req = reqFactory()
ctx = topLevelContext(tag=res)
ctx.remember(req, inevow.IRequest)
render = appserver.NevowRequest(None, True).gotPageContext
result = render(ctx)
result.addCallback(lambda x: req.accumulator)
return result
|
136a06274c9cbb34951a7d2b8544328b4a1f4b60
| 19,286 |
def get_header_value(headers, name, default=None):
""" Return header value, doing case-insensitive match """
if not headers:
return default
if isinstance(headers, dict):
headers = headers.items()
name = to_bytes(name.lower())
for k, v in headers:
if name == to_bytes(k.lower()):
return v
return default
|
9ddb9754061554bd59b429b78e472bd514e4c14d
| 19,287 |
def parse_gt_from_anno(img_anno, classes):
"""parse_gt_from_anno"""
print('parse ground truth files...')
ground_truth = {}
for img_name, annos in img_anno.items():
objs = []
for anno in annos:
if anno[1] == 0. and anno[2] == 0. and anno[3] == 0. and anno[4] == 0.:
continue
if int(anno[0]) == -1:
continue
xmin = anno[1]
ymin = anno[2]
xmax = xmin + anno[3] - 1
ymax = ymin + anno[4] - 1
xmin = int(xmin)
ymin = int(ymin)
xmax = int(xmax)
ymax = int(ymax)
cls = classes[int(anno[0])]
gt_box = {'class': cls, 'box': [xmin, ymin, xmax, ymax]}
objs.append(gt_box)
ground_truth[img_name] = objs
return ground_truth
|
63ba02bb0511cdc02245528041257639e764605f
| 19,288 |
def pt_to_tup(pt):
"""
Convenience method to generate a pair of two ints from a tuple or list.
Parameters
----------
pt : list OR tuple
Can be a list or a tuple of >=2 elements as floats or ints.
Returns
-------
pt : tuple of int
A pair of two ints.
"""
return (int(pt[0]),int(pt[1]));
|
7013b2477959f528b98d364e4cc44ac8700fb366
| 19,289 |
def _operation(m1, m2, op, k):
"""Generalized function for basic"""
"""matrix operations"""
n = len(m1)
res = [n*[0] for i in range(n)]
if n == len(m2):
for i in range(n):
for j in range(n):
tab = {
"+" : m1[i][j]+m2[i][j],
"-" : m1[i][j]-m2[i][j],
"*s": m1[i][j]*k,
}
res[i][j] = tab[op]
return res
|
5e00ad1a9fbadb9712631450b106b81e5a3413ed
| 19,290 |
def jacobi_d1(x, n, alpha, beta):
"""Evaluate the first derivative of Jacobi polynomial at x using eq. A.1.8
Args:
x: the location where the value will be evaluated
n: the order of Jacobi polynomial
alpha: the alpha parameter of Jacobi polynomial
beta: the beta parameter of Jacobi polynomial
Returns:
the first derivative of Jacobi polynomial at x
Raises:
None
"""
jacobi_check(n, alpha, beta)
if n == 0:
return 0.
else:
return 0.5 * (alpha + beta + n + 1) * \
jacobi_r(x, n - 1, alpha + 1, beta + 1)
|
4a982827916466fad0ed812d2ec3792ca1605f0a
| 19,291 |
def gate_expand_1toN(U, N, target):
"""
Create a Qobj representing a one-qubit gate that act on a system with N
qubits.
Parameters
----------
U : Qobj
The one-qubit gate
N : integer
The number of qubits in the target space.
target : integer
The index of the target qubit.
Returns
-------
gate : qobj
Quantum object representation of N-qubit gate.
"""
if N < 1:
raise ValueError("integer N must be larger or equal to 1")
if target >= N:
raise ValueError("target must be integer < integer N")
return tensor([identity(2)] * (target) + [U] +
[identity(2)] * (N - target - 1))
|
efb3d4da51e2f6dc90ba7bcf5e085cb651a4fed0
| 19,292 |
from typing import Dict
from typing import Any
from typing import List
def build_component_dependency_graph(
pipeline_definition: Dict[str, Any], component_definitions: Dict[str, Any]
) -> DiGraph:
"""
Builds a dependency graph between components. Dependencies are:
- referenced components during component build time (e.g. init params)
- predecessor components in the pipeline that produce the needed input
This enables sorting the components in a working and meaningful order for instantiation using topological sorting.
:param pipeline_definition: the definition of the pipeline (e.g. use get_pipeline_definition() to obtain it)
:param component_definitions: the definition of the pipeline components (e.g. use get_component_definitions() to obtain it)
"""
graph = DiGraph()
for component_name, component_definition in component_definitions.items():
params = component_definition.get("params", {})
referenced_components: List[str] = list()
for param_value in params.values():
# Currently we don't do any additional type validation here.
# See https://github.com/deepset-ai/haystack/pull/2253#discussion_r815951591.
if param_value in component_definitions:
referenced_components.append(param_value)
for referenced_component in referenced_components:
graph.add_edge(referenced_component, component_name)
for node in pipeline_definition["nodes"]:
node_name = node["name"]
graph.add_node(node_name)
for input in node["inputs"]:
if input in component_definitions:
# Special case for (actually permitted) cyclic dependencies between two components:
# e.g. DensePassageRetriever depends on ElasticsearchDocumentStore.
# In indexing pipelines ElasticsearchDocumentStore depends on DensePassageRetriever's output.
# But this second dependency is looser, so we neglect it.
if not graph.has_edge(node_name, input):
graph.add_edge(input, node_name)
return graph
|
c70655e4d2b2405d991af43d4a1ad67eb1d8c9d3
| 19,293 |
def count_distribution_artefacts(distribution_artefacts):
"""
Count distribution artefacts in nested list.
:param distribution_artefacts: Nested list containing distribution artefacts mapped to media packages and tenants
:type distribution_artefacts: dict
:return: Amount of distribution artefacts
:rtype: int
"""
return sum([sum([len(distribution_artefacts[tenant][media_package]) for media_package in
distribution_artefacts[tenant].keys()]) for tenant in distribution_artefacts.keys()])
|
b9bc159523e8cbb4745d8b7e8897360f6f9c1960
| 19,294 |
def nelson_siegel_yield(tau, theta):
"""For details, see here.
Parameters
----------
tau : array, shape (n_,)
theta : array, shape (4,)
Returns
-------
y : array, shape (n_,)
"""
y = theta[0] - theta[1] * \
((1 - np.exp(-theta[3] * tau)) /
(theta[3] * tau)) + theta[2] * \
((1 - np.exp(-theta[3] * tau)) /
(theta[3] * tau) - np.exp(-theta[3] * tau))
return np.squeeze(y)
|
ba328c7698f088c3e371462b6a92c62517054af5
| 19,295 |
import argparse
def parsing(lst=None):
"""
Function for parsing command line
>>> parsing(["2020", "80", "90", "dataset"])
(2020, 80.0, 90.0, 'dataset')
"""
parser = argparse.ArgumentParser(description="""Module, which reads data from a file\
with a films list, determines films, \
made in the given year, and geolocation of their production places.
Then finds 10 or fewer such nearest to the given point places, makes markers \
for them, and creates a map with a layer of that markers.
Also, there is another layer, which contains markers\
of film shooting places in Ukraine.
You should enter the year of the films' production, the coordinates of the needed point,\
in comparison to which\
nearest films will be displayed (lat, lon), and the path to the dataset with your films.""")
parser.add_argument("year", metavar="Year", type=int, help="Year of films, which\
will be displayed.")
parser.add_argument("latitude", metavar="Latitude", type=float, \
help="Latitude of your point.")
parser.add_argument("longitude", metavar="Longitude", type=float,\
help="Longitude of your point.")
parser.add_argument("path", metavar="Path", help="Path to your dataset.")
if lst:
results = parser.parse_args(lst)
else:
results = parser.parse_args()
universal_message = ", please check your coordinates"
if not -90 <= results.latitude <= 90:
message = "%r not in range [-90, 90]" % (results.latitude,)
raise argparse.ArgumentTypeError(message + universal_message)
if not -90 <= results.longitude <= 90:
message = "%r not in range [-90, 90]" % (results.longitude,)
raise argparse.ArgumentTypeError(message + universal_message)
return results.year, results.latitude, results.longitude, results.path
|
f625f09b31b60b80d91474560ca01b5df92d567c
| 19,296 |
def fix_filename(filename):
"""Replace illegal or problematic characters from a filename."""
return filename.translate(_filename_trans)
|
dc8c8e1f85a7372db97273ae595ff69520824574
| 19,297 |
def QDenseModel(weights_f, load_weights=False):
"""Construct QDenseModel."""
x = x_in = Input((RESHAPED,), name="input")
x = QActivation("quantized_relu(4)", name="act_i")(x)
x = QDense(N_HIDDEN, kernel_quantizer=ternary(),
bias_quantizer=quantized_bits(4, 0, 1), name="dense0")(x)
x = QActivation("quantized_relu(2)", name="act0")(x)
x = QDense(
NB_CLASSES,
kernel_quantizer=quantized_bits(4, 0, 1),
bias_quantizer=quantized_bits(4, 0, 1),
name="dense2")(
x)
x = Activation("softmax", name="softmax")(x)
model = Model(inputs=[x_in], outputs=[x])
model.summary()
model.compile(loss="categorical_crossentropy",
optimizer=OPTIMIZER, metrics=["accuracy"])
if load_weights and weights_f:
model.load_weights(weights_f)
print_qstats(model)
return model
|
c84f591866708ea086e8c22ff333d60381e1f865
| 19,298 |
import os
import json
def _load_config():
"""Load the StreamAlert Athena configuration files
Returns:
dict: Configuration settings by file, includes two keys:
lambda, All lambda function settings
global, StreamAlert global settings
Raises:
ConfigError: For invalid or missing configuration files.
"""
config_files = ('lambda', 'global')
config = {}
for config_file in config_files:
config_file_path = 'conf/{}.json'.format(config_file)
if not os.path.exists(config_file_path):
raise ConfigError('The \'{}\' config file was not found'.format(
config_file_path))
with open(config_file_path) as config_fh:
try:
config[config_file] = json.load(config_fh)
except ValueError:
raise ConfigError('The \'{}\' config file is not valid JSON'.format(
config_file))
return config
|
beef540e97e3322071561009a726c4c7c529f6f6
| 19,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.