content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import json
import os
def fontwidth(string, font='sans-serif'):
"""Function: Returns the px width of a string assuming a base size of 16px."""
_fontwidth = json.load(open(os.path.join(abs_path(), 'fonts.json'), encoding='utf-8'))
codes_len = 127
default_width = 32
default_width_idx = 120
for _fontrow in _fontwidth:
_fontrow['widths'] = pd.np.array(_fontrow['widths'], dtype=float)
_fontrow['widths'] = pd.np.insert(_fontrow['widths'], 0, np.zeros(default_width))
# Add the first font stack at the end, making it the default
_fontwidth.append(_fontwidth[0])
# Convert all characters to ASCII codes. Treat Unicode as single char
codes = pd.np.fromstring(string.encode('ascii', 'replace'), dtype=pd.np.uint8)
# Drop everything that's out of bounds. We'll adjust for them later
valid = codes[codes < codes_len]
# Get the font
for row in _fontwidth:
if font in row['family']:
break
# Compute and return the width, defaulting unknowns to 'x' (char 120)
widths = row['widths']
return widths[valid].sum() + widths[default_width_idx] * (len(codes) - len(valid)) | 1fa6b71881c8711908935bc97a06b7cbbca0e809 | 5,600 |
def create_model():
"""ResNet34 inspired analog model.
Returns:
nn.Modules: created model
"""
block_per_layers = (3, 4, 6, 3)
base_channel = 16
channel = (base_channel, 2*base_channel, 4*base_channel)
l0 = nn.Sequential(
nn.Conv2d(3, channel[0], kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(channel[0]),
nn.ReLU()
)
l1 = nn.Sequential(*concatenate_layer_blocks(channel[0], channel[0], block_per_layers[0],
first_layer=True))
l2 = nn.Sequential(*concatenate_layer_blocks(channel[0], channel[1], block_per_layers[1]))
l3 = nn.Sequential(*concatenate_layer_blocks(channel[1], channel[2], block_per_layers[2]))
l4 = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(channel[2], N_CLASSES)
)
return nn.Sequential(l0, l1, l2, l3, l4) | fa9157c457b31b8bd0c3ca0b5dac8f68734486ec | 5,601 |
from typing import Union
from typing import Optional
def kv_get(key: Union[str, bytes],
*,
namespace: Optional[str] = None) -> bytes:
"""Fetch the value of a binary key."""
if isinstance(key, str):
key = key.encode()
assert isinstance(key, bytes)
return global_state_client.kv_get(key, namespace) | 03be836a3d42f39f2b28b2f3ea557cdf39918bcd | 5,602 |
def construct_model(data, local_settings, covariate_multipliers, covariate_data_spec):
"""Makes a Cascade model from EpiViz-AT settings and data.
Args:
data: An object with both ``age_specific_death_rate`` and ``locations``.
local_settings: A settings object from ``cascade_plan``.
covariate_multipliers (List[EpiVizCovariateMultiplier]): descriptions of
covariate multipliers.
covariate_data_spec (List[EpiVizCovariate]): the covariates themselves.
Some covariates aren't used by covariate multipliers but are
included to calculate hold outs.
Returns:
cascade.model.Model: The model to fit.
"""
ev_settings = local_settings.settings
parent_location_id = local_settings.parent_location_id
default_age_time = dict()
default_age_time["age"] = np.linspace(0, 100, 21)
default_age_time["time"] = np.linspace(1990, 2015, 6)
for kind in ["age", "time"]:
default_grid = getattr(ev_settings.model, f"default_{kind}_grid")
if default_grid is not None:
default_age_time[kind] = np.sort(np.array(default_grid, dtype=np.float))
# Use this age and time when a smooth grid doesn't depend on age and time.
single_age = default_age_time["age"][:1]
single_time = [default_age_time["time"][len(default_age_time["time"]) // 2]]
single_age_time = (single_age, single_time)
nonzero_rates = [smooth.rate for smooth in ev_settings.rate]
children = list(data.locations.successors(parent_location_id))
model = Model(
nonzero_rates=nonzero_rates,
parent_location=parent_location_id,
child_location=children,
covariates=covariates_list(covariate_data_spec),
weights=None,
)
construct_model_rates(default_age_time, single_age_time, ev_settings, model)
# No random effects if there is only one child.
if children and len(children) > 1:
construct_model_random_effects(default_age_time, single_age_time, ev_settings, model)
construct_model_covariates(default_age_time, single_age_time, covariate_multipliers, model)
asdr = data.age_specific_death_rate
if ev_settings.model.constrain_omega:
constrain_omega(
default_age_time, asdr, ev_settings, model, parent_location_id, children
)
return model | eb4287dcaceb1cf320bf8761143c96ffadc148b2 | 5,603 |
def set_off():
"""
Turns OFF the lamp.
"""
unicorn.set_status(False)
return OK | e82ab948a1656c343237a11e048c1ed38487353b | 5,604 |
def get_all_active_bets():
"""
Gets all the active bets for all
active discord ritoman users
"""
return session.query(LoLBets).filter(LoLBets.completed == false()).all() | 844b36b695bf67db3cff82711b9e17da3db20c8e | 5,605 |
def get_quantize_pos_min_diffs(inputs, f_min, f_max, q_min, q_max, bit_width):
"""Get quantize pos which makes min difference between float and quantzed. """
with tf.name_scope("GetQuantizePosMinDiffs"):
min_scale_inv = tf.math.divide(f_min, q_min)
max_scale_inv = tf.math.divide(f_max, q_max)
float_scale_inv = tf.math.maximum(min_scale_inv, max_scale_inv)
non_overflow_pos = get_quantize_pos_non_overflow(inputs, f_min, f_max,
q_min, q_max)
def calc_pos():
diffs = []
for i in range(5):
with tf.name_scope("FakeQuantizeWithScale_{}".format(i)):
# fake quantize
scale = tf.math.pow(2.0, non_overflow_pos + i, name="scale")
quantized = dpu_symmetry_quantize(inputs, scale, q_min, q_max)
dequantized = dpu_symmetry_dequantize(quantized, scale, q_min, q_max)
diff = tf.pow(inputs - dequantized, 2)
diff = tf.reduce_sum(diff)
diffs.append(diff)
pos_offset = tf.argmin(diffs)
quantize_pos = non_overflow_pos + tf.cast(pos_offset, tf.float32)
return quantize_pos
return tf.cond(float_scale_inv < 1e-9, lambda: 127.0, calc_pos) | 63cc0b8ac370513ecfbe3068d02299a3d3016638 | 5,606 |
def _non_string_elements(x):
"""
Simple helper to check that all values of x are string. Returns all non string elements as (position, element).
:param x: Iterable
:return: [(int, !String), ...]
"""
problems = []
for i in range(0, len(x)):
if not isinstance(x[i], str):
problems.append((i, x[i]))
return problems | 974715622949157693084823a52a88973b51d100 | 5,607 |
from pathlib import Path
def configure_dirs(base_path: str, config_name: str, dataset_name: str) -> str:
"""
Performs configuration of directories for storing vectors
:param base_path:
:param config_name:
:param dataset_name:
:return: Full configuration path
"""
base_path = Path(base_path)
base_path.mkdir(exist_ok=True)
full_path = base_path / config_name
full_path.mkdir(exist_ok=True)
full_path = full_path / dataset_name
full_path.mkdir(exist_ok=True)
return str(full_path) | b4791f836d0414ed582c3093b3dac1727f82a39c | 5,608 |
def config_entry_version_fixture():
"""Define a config entry version fixture."""
return 2 | cac78c1f02668c95ce918d6219cadd5f08ab21c9 | 5,609 |
def edge_dfs(G, source=None, orientation=None):
"""A directed, depth-first-search of edges in `G`, beginning at `source`.
Yield the edges of G in a depth-first-search order continuing until
all edges are generated.
Parameters
----------
G : graph
A directed/undirected graph/multigraph.
source : node, list of nodes
The node from which the traversal begins. If None, then a source
is chosen arbitrarily and repeatedly until all edges from each node in
the graph are searched.
orientation : None | 'original' | 'reverse' | 'ignore' (default: None)
For directed graphs and directed multigraphs, edge traversals need not
respect the original orientation of the edges.
When set to 'reverse' every edge is traversed in the reverse direction.
When set to 'ignore', every edge is treated as undirected.
When set to 'original', every edge is treated as directed.
In all three cases, the yielded edge tuples add a last entry to
indicate the direction in which that edge was traversed.
If orientation is None, the yielded edge has no direction indicated.
The direction is respected, but not reported.
Yields
------
edge : directed edge
A directed edge indicating the path taken by the depth-first traversal.
For graphs, `edge` is of the form `(u, v)` where `u` and `v`
are the tail and head of the edge as determined by the traversal.
For multigraphs, `edge` is of the form `(u, v, key)`, where `key` is
the key of the edge. When the graph is directed, then `u` and `v`
are always in the order of the actual directed edge.
If orientation is not None then the edge tuple is extended to include
the direction of traversal ('forward' or 'reverse') on that edge.
Examples
--------
>>> nodes = [0, 1, 2, 3]
>>> edges = [(0, 1), (1, 0), (1, 0), (2, 1), (3, 1)]
>>> list(nx.edge_dfs(nx.Graph(edges), nodes))
[(0, 1), (1, 2), (1, 3)]
>>> list(nx.edge_dfs(nx.DiGraph(edges), nodes))
[(0, 1), (1, 0), (2, 1), (3, 1)]
>>> list(nx.edge_dfs(nx.MultiGraph(edges), nodes))
[(0, 1, 0), (1, 0, 1), (0, 1, 2), (1, 2, 0), (1, 3, 0)]
>>> list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes))
[(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 1, 0), (3, 1, 0)]
>>> list(nx.edge_dfs(nx.DiGraph(edges), nodes, orientation="ignore"))
[(0, 1, 'forward'), (1, 0, 'forward'), (2, 1, 'reverse'), (3, 1, 'reverse')]
>>> list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes, orientation="ignore"))
[(0, 1, 0, 'forward'), (1, 0, 0, 'forward'), (1, 0, 1, 'reverse'), (2, 1, 0, 'reverse'), (3, 1, 0, 'reverse')]
Notes
-----
The goal of this function is to visit edges. It differs from the more
familiar depth-first traversal of nodes, as provided by
:func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges`, in
that it does not stop once every node has been visited. In a directed graph
with edges [(0, 1), (1, 2), (2, 1)], the edge (2, 1) would not be visited
if not for the functionality provided by this function.
See Also
--------
:func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges`
"""
nodes = list(G.nbunch_iter(source))
if not nodes:
return
directed = G.is_directed()
kwds = {"data": False}
if G.is_multigraph() is True:
kwds["keys"] = True
# set up edge lookup
if orientation is None:
def edges_from(node):
return iter(G.edges(node, **kwds))
elif not directed or orientation == "original":
def edges_from(node):
for e in G.edges(node, **kwds):
yield e + (FORWARD,)
elif orientation == "reverse":
def edges_from(node):
for e in G.in_edges(node, **kwds):
yield e + (REVERSE,)
elif orientation == "ignore":
def edges_from(node):
for e in G.edges(node, **kwds):
yield e + (FORWARD,)
for e in G.in_edges(node, **kwds):
yield e + (REVERSE,)
else:
raise nx.NetworkXError("invalid orientation argument.")
# set up formation of edge_id to easily look up if edge already returned
if directed:
def edge_id(edge):
# remove direction indicator
return edge[:-1] if orientation is not None else edge
else:
def edge_id(edge):
# single id for undirected requires frozenset on nodes
return (frozenset(edge[:2]),) + edge[2:]
# Basic setup
check_reverse = directed and orientation in ("reverse", "ignore")
visited_edges = set()
visited_nodes = set()
edges = {}
# start DFS
for start_node in nodes:
stack = [start_node]
while stack:
current_node = stack[-1]
if current_node not in visited_nodes:
edges[current_node] = edges_from(current_node)
visited_nodes.add(current_node)
try:
edge = next(edges[current_node])
except StopIteration:
# No more edges from the current node.
stack.pop()
else:
edgeid = edge_id(edge)
if edgeid not in visited_edges:
visited_edges.add(edgeid)
# Mark the traversed "to" node as to-be-explored.
if check_reverse and edge[-1] == REVERSE:
stack.append(edge[0])
else:
stack.append(edge[1])
yield edge | 8e7f1ba137f0392768e5b814a8898842bf2e5c2f | 5,610 |
import hashlib
async def _md5_by_reading(filepath: str, chunk_size: int = DEFAULT_BUFFER_SIZE) -> str:
"""
Compute md5 of a filepath.
"""
file_hash = hashlib.md5()
async with async_open(filepath, "rb") as reader:
async for chunk in reader.iter_chunked(chunk_size):
file_hash.update(chunk)
return file_hash.hexdigest() | d42e3f6ba994bc35c32cab48cdc4b78e44f678d1 | 5,611 |
def client_authenticator_factory(mechanism,password_manager):
"""Create a client authenticator object for given SASL mechanism and
password manager.
:Parameters:
- `mechanism`: name of the SASL mechanism ("PLAIN", "DIGEST-MD5" or "GSSAPI").
- `password_manager`: name of the password manager object providing
authentication credentials.
:Types:
- `mechanism`: `str`
- `password_manager`: `PasswordManager`
:return: new authenticator.
:returntype: `sasl.core.ClientAuthenticator`"""
authenticator=all_mechanisms_dict[mechanism][0]
return authenticator(password_manager) | 93fccb21f71a31fed953f6260f5906b240669033 | 5,612 |
def wait_for_cell_data_connection(
log,
ad,
state,
timeout_value=EventDispatcher.DEFAULT_TIMEOUT):
"""Wait for data connection status to be expected value for default
data subscription.
Wait for the data connection status to be DATA_STATE_CONNECTED
or DATA_STATE_DISCONNECTED.
Args:
log: Log object.
ad: Android Device Object.
state: Expected status: True or False.
If True, it will wait for status to be DATA_STATE_CONNECTED.
If False, it will wait for status ti be DATA_STATE_DISCONNECTED.
timeout_value: wait for cell data timeout value.
This is optional, default value is EventDispatcher.DEFAULT_TIMEOUT
Returns:
True if success.
False if failed.
"""
sub_id = get_default_data_sub_id(ad)
return wait_for_cell_data_connection_for_subscription(log, ad, sub_id,
state, timeout_value) | f2e2474af757c5a36cb054afe1f429638641f2e6 | 5,613 |
import configparser
def _parse_lists(config_parser: configparser.ConfigParser, section: str = '') -> t.Dict:
"""Parses multiline blocks in *.cfg files as lists."""
config = dict(config_parser.items(section))
for key, val in config.items():
if '/' in val and 'parameters' not in section:
config[key] = parse_mars_syntax(val)
elif '\n' in val:
config[key] = _splitlines(val)
return config | d591a9eeb656dff9c4fbbce9964575cd7ce15352 | 5,614 |
def get_filename_pair(filename):
"""
Given the name of a VASF data file (*.rsd) or parameter file (*.rsp) return
a tuple of (parameters_filename, data_filename). It doesn't matter if the
filename is a fully qualified path or not.
- assumes extensions are all caps or all lower
"""
param_filename = data_filename = filename[:-1]
if filename[-1:].isupper():
data_filename += 'D'
param_filename += 'P'
else:
data_filename += 'd'
param_filename += 'p'
return (param_filename, data_filename) | f6eb5a64cf472f230c5806447d9c2ee8ae43a71d | 5,615 |
async def get_reposet(client, headers, reposet_id):
"""Get the reposet by id."""
url = f"https://api.athenian.co/v1/reposet/{reposet_id}"
return await do_request(client, url, headers) | b97262bf1f246bc563abe352f1122a9ad61705c3 | 5,616 |
import sys
import re
import os
import google
def conditions(converted: str) -> bool:
"""Conditions function is used to check the message processed.
Uses the keywords to do a regex match and trigger the appropriate function which has dedicated task.
Args:
converted: Takes the voice recognized statement as argument.
Returns:
bool:
Boolean True only when asked to sleep for conditioned sleep message.
"""
sys.stdout.write(f'\r{converted}')
converted_lower = converted.lower()
todo_checks = ['to do', 'to-do', 'todo']
if any(word in converted_lower for word in keywords.date()) and \
not any(word in converted_lower for word in keywords.avoid()):
current_date()
elif any(word in converted_lower for word in keywords.time()) and \
not any(word in converted_lower for word in keywords.avoid()):
place = ''
for word in converted.split():
if word[0].isupper():
place += word + ' '
elif '.' in word:
place += word + ' '
if place:
current_time(place)
else:
current_time()
elif any(word in converted_lower for word in keywords.weather()) and \
not any(word in converted_lower for word in keywords.avoid()):
place = ''
for word in converted.split():
if word[0].isupper():
place += word + ' '
elif '.' in word:
place += word + ' '
weather_cond = ['tomorrow', 'day after', 'next week', 'tonight', 'afternoon', 'evening']
if any(match in converted_lower for match in weather_cond):
if place:
weather_condition(msg=converted, place=place)
else:
weather_condition(msg=converted)
elif place:
weather(place)
else:
weather()
elif any(word in converted_lower for word in keywords.system_info()):
system_info()
elif any(word in converted for word in keywords.ip_info()) or 'IP' in converted.split():
if 'public' in converted_lower:
if not internet_checker():
speaker.say("You are not connected to the internet sir!")
return False
if ssid := get_ssid():
ssid = f'for the connection {ssid} '
else:
ssid = ''
if public_ip := json_load(urlopen('http://ipinfo.io/json')).get('ip'):
output = f"My public IP {ssid}is {public_ip}"
elif public_ip := json_loads(urlopen('http://ip.jsontest.com').read()).get('ip'):
output = f"My public IP {ssid}is {public_ip}"
else:
output = 'I was unable to fetch the public IP sir!'
else:
ip_address = vpn_checker().split(':')[-1]
output = f"My local IP address for {gethostname()} is {ip_address}"
sys.stdout.write(f'\r{output}')
speaker.say(output)
elif any(word in converted_lower for word in keywords.wikipedia()):
wikipedia_()
elif any(word in converted_lower for word in keywords.news()):
news()
elif any(word in converted_lower for word in keywords.report()):
report()
elif any(word in converted_lower for word in keywords.robinhood()):
robinhood()
elif any(word in converted_lower for word in keywords.repeat()):
repeater()
elif any(word in converted_lower for word in keywords.location()):
location()
elif any(word in converted_lower for word in keywords.locate()):
locate(converted)
elif any(word in converted_lower for word in keywords.gmail()):
gmail()
elif any(word in converted_lower for word in keywords.meaning()):
meaning(converted.split()[-1])
elif any(word in converted_lower for word in keywords.delete_todo()) and \
any(word in converted_lower for word in todo_checks):
delete_todo()
elif any(word in converted_lower for word in keywords.list_todo()):
todo()
elif any(word in converted_lower for word in keywords.add_todo()) and \
any(word in converted_lower for word in todo_checks):
add_todo()
elif any(word in converted_lower for word in keywords.delete_db()):
delete_db()
elif any(word in converted_lower for word in keywords.create_db()):
create_db()
elif any(word in converted_lower for word in keywords.distance()) and \
not any(word in converted_lower for word in keywords.avoid()):
"""the loop below differentiates between two places and one place with two words
eg: New York will be considered as one word and New York and Las Vegas will be considered as two words"""
check = converted.split() # str to list
places = []
for word in check:
if word[0].isupper() or '.' in word: # looks for words that start with uppercase
try:
next_word = check[check.index(word) + 1] # looks if words after an uppercase word is also one
if next_word[0].isupper():
places.append(f"{word + ' ' + check[check.index(word) + 1]}")
else:
if word not in ' '.join(places):
places.append(word)
except IndexError: # catches exception on lowercase word after an upper case word
if word not in ' '.join(places):
places.append(word)
"""the condition below assumes two different words as two places but not including two words starting upper case
right next to each other"""
if len(places) >= 2:
start = places[0]
end = places[1]
elif len(places) == 1:
start = None
end = places[0]
else:
start, end = None, None
distance(start, end)
elif any(word in converted_lower for word in conversation.form()):
speaker.say("I am a program, I'm without form.")
elif any(word in converted_lower for word in keywords.geopy()):
# tries to look for words starting with an upper case letter
place = ''
for word in converted.split():
if word[0].isupper():
place += word + ' '
elif '.' in word:
place += word + ' '
# if no words found starting with an upper case letter, fetches word after the keyword 'is' eg: where is Chicago
if not place:
keyword = 'is'
before_keyword, keyword, after_keyword = converted.partition(keyword)
place = after_keyword.replace(' in', '').strip()
locate_places(place.strip())
elif any(word in converted_lower for word in keywords.directions()):
place = ''
for word in converted.split():
if word[0].isupper():
place += word + ' '
elif '.' in word:
place += word + ' '
place = place.replace('I ', '').strip()
if place:
directions(place)
else:
speaker.say("I can't take you to anywhere without a location sir!")
directions(place=None)
elif any(word in converted_lower for word in keywords.webpage()) and \
not any(word in converted_lower for word in keywords.avoid()):
converted = converted.replace(' In', 'in').replace(' Co. Uk', 'co.uk')
host = (word for word in converted.split() if '.' in word)
webpage(host)
elif any(word in converted_lower for word in keywords.kill_alarm()):
kill_alarm()
elif any(word in converted_lower for word in keywords.alarm()):
alarm(converted_lower)
elif any(word in converted_lower for word in keywords.google_home()):
google_home()
elif any(word in converted_lower for word in keywords.jokes()):
jokes()
elif any(word in converted_lower for word in keywords.reminder()):
reminder(converted_lower)
elif any(word in converted_lower for word in keywords.notes()):
notes()
elif any(word in converted_lower for word in keywords.github()):
auth = HTTPBasicAuth(git_user, git_pass)
response = get('https://api.github.com/user/repos?type=all&per_page=100', auth=auth).json()
result, repos, total, forked, private, archived, licensed = [], [], 0, 0, 0, 0, 0
for i in range(len(response)):
total += 1
forked += 1 if response[i]['fork'] else 0
private += 1 if response[i]['private'] else 0
archived += 1 if response[i]['archived'] else 0
licensed += 1 if response[i]['license'] else 0
repos.append({response[i]['name'].replace('_', ' ').replace('-', ' '): response[i]['clone_url']})
if 'how many' in converted:
speaker.say(f'You have {total} repositories sir, out of which {forked} are forked, {private} are private, '
f'{licensed} are licensed, and {archived} archived.')
else:
[result.append(clone_url) if clone_url not in result and re.search(rf'\b{word}\b', repo.lower()) else None
for word in converted_lower.split() for item in repos for repo, clone_url in item.items()]
if result:
github(target=result)
else:
speaker.say("Sorry sir! I did not find that repo.")
elif any(word in converted_lower for word in keywords.txt_message()):
number = '-'.join([str(s) for s in re.findall(r'\b\d+\b', converted)])
send_sms(number)
elif any(word in converted_lower for word in keywords.google_search()):
phrase = converted.split('for')[-1] if 'for' in converted else None
google_search(phrase)
elif any(word in converted_lower for word in keywords.tv()):
television(converted)
elif any(word in converted_lower for word in keywords.apps()):
apps(converted.split()[-1])
elif any(word in converted_lower for word in keywords.music()):
if 'speaker' in converted_lower:
music(converted)
else:
music()
elif any(word in converted_lower for word in keywords.volume()):
if 'mute' in converted_lower:
level = 0
elif 'max' in converted_lower or 'full' in converted_lower:
level = 100
else:
level = re.findall(r'\b\d+\b', converted) # gets integers from string as a list
level = int(level[0]) if level else 50 # converted to int for volume
volume_controller(level)
speaker.say(f"{choice(ack)}!")
elif any(word in converted_lower for word in keywords.face_detection()):
face_recognition_detection()
elif any(word in converted_lower for word in keywords.speed_test()):
speed_test()
elif any(word in converted_lower for word in keywords.bluetooth()):
bluetooth(phrase=converted_lower)
elif any(word in converted_lower for word in keywords.brightness()) and 'lights' not in converted_lower:
speaker.say(choice(ack))
if 'set' in converted_lower or re.findall(r'\b\d+\b', converted_lower):
level = re.findall(r'\b\d+\b', converted_lower) # gets integers from string as a list
if not level:
level = ['50'] # pass as list for brightness, as args must be iterable
Thread(target=set_brightness, args=level).start()
elif 'decrease' in converted_lower or 'reduce' in converted_lower or 'lower' in converted_lower or \
'dark' in converted_lower or 'dim' in converted_lower:
Thread(target=decrease_brightness).start()
elif 'increase' in converted_lower or 'bright' in converted_lower or 'max' in converted_lower or \
'brighten' in converted_lower or 'light up' in converted_lower:
Thread(target=increase_brightness).start()
elif any(word in converted_lower for word in keywords.lights()):
if not vpn_checker().startswith('VPN'):
lights(converted=converted_lower)
elif any(word in converted_lower for word in keywords.guard_enable() or keywords.guard_disable()):
if any(word in converted_lower for word in keywords.guard_enable()):
logger.info('Enabled Security Mode')
speaker.say(f"Enabled security mode sir! I will look out for potential threats and keep you posted. "
f"Have a nice {part_of_day()}, and enjoy yourself sir!")
speaker.runAndWait()
guard()
elif any(word in converted_lower for word in keywords.flip_a_coin()):
playsound('indicators/coin.mp3')
sleep(0.5)
speaker.say(f"""{choice(['You got', 'It landed on', "It's"])} {choice(['heads', 'tails'])} sir""")
elif any(word in converted_lower for word in keywords.facts()):
speaker.say(getFact(False))
elif any(word in converted_lower for word in keywords.meetings()):
if os.path.isfile('meetings'):
meeting_reader()
else:
if os.environ.get('called_by_offline'):
speaker.say("Meetings file is not ready yet. Please try again in a minute or two.")
return False
meeting = ThreadPool(processes=1).apply_async(func=meetings)
speaker.say("Please give me a moment sir! Let me check your calendar.")
speaker.runAndWait()
try:
speaker.say(meeting.get(timeout=60))
except ThreadTimeoutError:
logger.error('Unable to read the calendar within 60 seconds.')
speaker.say("I wasn't able to read your calendar within the set time limit sir!")
speaker.runAndWait()
elif any(word in converted_lower for word in keywords.voice_changer()):
voice_changer(converted)
elif any(word in converted_lower for word in keywords.system_vitals()):
system_vitals()
elif any(word in converted_lower for word in keywords.vpn_server()):
if vpn_server_check():
speaker.say('An operation for VPN Server is already in progress sir! Please wait and retry.')
elif 'start' in converted_lower or 'trigger' in converted_lower or 'initiate' in converted_lower or \
'enable' in converted_lower or 'spin up' in converted_lower:
Thread(target=vpn_server, args=['START']).start()
speaker.say('VPN Server has been initiated sir! Login details will be sent to you shortly.')
elif 'stop' in converted_lower or 'shut' in converted_lower or 'close' in converted_lower or \
'disable' in converted_lower:
Thread(target=vpn_server, args=['STOP']).start()
speaker.say('VPN Server will be shutdown sir!')
else:
speaker.say("I don't understand the request sir! You can ask me to enable or disable the VPN server.")
elif any(word in converted_lower for word in keywords.personal_cloud()):
if 'enable' in converted_lower or 'initiate' in converted_lower or 'kick off' in converted_lower or \
'start' in converted_lower:
Thread(target=personal_cloud.enable).start()
speaker.say("Personal Cloud has been triggered sir! I will send the login details to your phone number "
"once the server is up and running.")
elif 'disable' in converted_lower or 'stop' in converted_lower:
Thread(target=personal_cloud.disable).start()
speaker.say(choice(ack))
else:
speaker.say("I didn't quite get that sir! Please tell me if I should enable or disable your server.")
elif any(word in converted_lower for word in conversation.greeting()):
speaker.say('I am spectacular. I hope you are doing fine too.')
elif any(word in converted_lower for word in conversation.capabilities()):
speaker.say('There is a lot I can do. For example: I can get you the weather at any location, news around '
'you, meanings of words, launch applications, create a to-do list, check your emails, get your '
'system configuration, tell your investment details, locate your phone, find distance between '
'places, set an alarm, play music on smart devices around you, control your TV, tell a joke, send'
' a message, set reminders, scan and clone your GitHub repositories, and much more. Time to ask,.')
elif any(word in converted_lower for word in conversation.languages()):
speaker.say("Tricky question!. I'm configured in python, and I can speak English.")
elif any(word in converted_lower for word in conversation.whats_up()):
speaker.say("My listeners are up. There is nothing I cannot process. So ask me anything..")
elif any(word in converted_lower for word in conversation.what()):
speaker.say("I'm just a pre-programmed virtual assistant, trying to become a natural language UI.")
elif any(word in converted_lower for word in conversation.who()):
speaker.say("I am Jarvis. A virtual assistant designed by Mr.Raauv.")
elif any(word in converted_lower for word in conversation.about_me()):
speaker.say("I am Jarvis. A virtual assistant designed by Mr.Raauv.")
speaker.say("I'm just a pre-programmed virtual assistant, trying to become a natural language UI.")
speaker.say("I can seamlessly take care of your daily tasks, and also help with most of your work!")
elif any(word in converted_lower for word in keywords.sleep()):
if 'pc' in converted_lower or 'computer' in converted_lower or 'imac' in converted_lower or \
'screen' in converted_lower:
pc_sleep()
else:
speaker.say("Activating sentry mode, enjoy yourself sir!")
if greet_check:
greet_check.pop('status')
return True
elif any(word in converted_lower for word in keywords.restart()):
if 'pc' in converted_lower or 'computer' in converted_lower or 'imac' in converted_lower:
logger.info(f'JARVIS::Restart for {host_info("model")} has been requested.')
restart(target='PC')
else:
logger.info('JARVIS::Self reboot has been requested.')
if 'quick' in converted_lower or 'fast' in converted_lower:
restart(quick=True)
else:
restart()
elif any(word in converted_lower for word in keywords.kill()) and \
not any(word in converted_lower for word in keywords.avoid()):
raise KeyboardInterrupt
elif any(word in converted_lower for word in keywords.shutdown()):
shutdown()
elif any(word in converted_lower for word in keywords.chatbot()):
chatter_bot()
else:
logger.info(f'Received the unrecognized lookup parameter: {converted}')
Thread(target=unrecognized_dumper, args=[converted]).start() # writes to training_data.yaml in a thread
if alpha(converted):
if google_maps(converted):
if google(converted):
# if none of the conditions above are met, opens a google search on default browser
sys.stdout.write(f"\r{converted}")
if google_maps.has_been_called:
google_maps.has_been_called = False
speaker.say("I have also opened a google search for your request.")
else:
speaker.say(f"I heard {converted}. Let me look that up.")
speaker.runAndWait()
speaker.say("I have opened a google search for your request.")
search = str(converted).replace(' ', '+')
unknown_url = f"https://www.google.com/search?q={search}"
web_open(unknown_url) | 3113a35b0a77fff3531238c0d0223ca806a5dba0 | 5,617 |
def detect_os_flavour(os_type):
"""Detect Linux flavours and return the current version"""
if os_type:
# linux
try:
return platform.linux_distribution()[0]
except Exception, e:
return None
else:
# windows
return platform.platform() | 4ab3ebec3683fc99a99e70540ea29d049b54347d | 5,618 |
def straightenImage(im, imextent, mvx=1, mvy=None, verbose=0, interpolation=cv2_interpolation):
""" Scale image to make square pixels
Arguments
---------
im: array
input image
imextend: list of 4 floats
coordinates of image region (x0, x1, y0, y1)
mvx, mvy : float
number of mV per pixel requested
Returns
-------
ims: numpy array
transformed image
(fw, fh, mvx, mvy, H) : data
H is the homogeneous transform from original to straightened image
"""
if cv2 is None:
raise Exception('opencv is not installed, method straightenImage is not available')
dxmv = imextent[1] - imextent[0]
dymv = imextent[3] - imextent[2]
dx = im.shape[1]
dy = im.shape[0]
mvx0 = dxmv / float(dx - 1) # mv/pixel
mvy0 = dymv / float(dy - 1)
if mvy is None:
mvy = mvx
fw = np.abs((float(mvx0) / mvx))
fh = np.abs((float(mvy0) / mvy))
if fw < .5:
fwx = fw
fac = 1
ims = im
while (fwx < .5):
ims = cv2.resize(
ims, None, fx=.5, fy=1, interpolation=cv2.INTER_LINEAR)
fwx *= 2
fac *= 2
ims = cv2.resize(
ims, None, fx=fac * fw, fy=fh, interpolation=interpolation)
else:
ims = cv2.resize(im, None, fx=fw, fy=fh, interpolation=interpolation)
if verbose:
print('straightenImage: size %s fx %.4f fy %.4f' % (im.shape, fw, fh))
print('straightenImage: result size %s mvx %.4f mvy %.4f' % (ims.shape, mvx, mvy))
H = pgeometry.pg_transl2H([-.5, -.5]) .dot(np.diag([fw, fh, 1]).dot(pgeometry.pg_transl2H([.5, .5])))
return ims, (fw, fh, mvx, mvy, H) | ab46e394011a8a2d9ed8974504e4e28b725ead78 | 5,619 |
import math
def H(r2, H_s, H_d, a_s, a_d, gamma_s, gamma_d, G, v):
"""
"""
pi = math.pi
sqrt = math.sqrt
r = sqrt(r2)
H2_s = H_s**2
H2_d = H_d**2
R2_s = r2 + H2_s
R2_d = r2 + H2_d
alpha_s = 1.0 if gamma_s == 1.0 else 4 * H2_s / (pi*R2_s)
alpha_d = 1.0 if gamma_d == 1.0 else 4 * H2_d / (pi*R2_d)
f_s = a_s**3 * alpha_s * (1-v) / (G * (H2_s+r2)**1.5)
f_d = a_d**3 * alpha_d * (1-v) / (G * (H2_d+r2)**1.5)
H = [
[ r*f_s, r*f_d ], # the radial H
[ H_s*f_s, H_d*f_d ] # the vertical H
]
return H | 0fa1606212278def22075692a56468d41a8c7a3c | 5,620 |
from datetime import datetime
def parse_time(event_time):
"""Take a string representation of time from the blockchain, and parse it into datetime object."""
return datetime.strptime(event_time, '%Y-%m-%dT%H:%M:%S') | df5af3a20acbeaa8e7424291d26055d3f38219ed | 5,621 |
import os
def do_pdftk_cat_first_page(pdf_file):
"""The cmd_args magick identify is very slow on page pages hence it
examines every page. We extract the first page to get some informations
about the dimensions of the PDF file."""
output_file = os.path.join(tmp_dir, 'identify.pdf')
cmd_args = ['pdftk', str(pdf_file), 'cat', '1', 'output', output_file]
run.run(cmd_args)
return output_file | f0c0b01411bcc6e66bc2450a5b81f2c1b7dd33ca | 5,622 |
def addBenchmark(df):
"""Add benchmark to df."""
# Compute the inverse of the distance
distance_inv = (1. / df.filter(regex='^distance*', axis=1)).values
# Extract the value at the nearest station
values = df.filter(regex='value_*', axis=1)
# Compute the benchmark
numer = (distance_inv * values).sum(axis=1)
denom = (distance_inv * (values != 0)).sum(axis=1)
# Compute the benchmark
benchmark = numer / denom
df["Benchmark"] = benchmark
return df | 62c63215d622c46bed8200f97ad55b985e2beb20 | 5,623 |
def is_file_like(f):
"""Check to see if ```f``` has a ```read()``` method."""
return hasattr(f, 'read') and callable(f.read) | 9eee8c8f4a6966d1db67fb4aa9149e2fbd390fb9 | 5,624 |
def _string_to_days_since_date(dateStrings, referenceDate='0001-01-01'):
"""
Turn an array-like of date strings into the number of days since the
reference date
"""
dates = [_string_to_datetime(string) for string in dateStrings]
days = _datetime_to_days(dates, referenceDate=referenceDate)
days = np.array(days)
return days | 114883cf0f2e48812a580c4cd8ae64671a4fc126 | 5,625 |
def safe_as_int(val, atol=1e-3):
"""
Attempt to safely cast values to integer format.
Parameters
----------
val : scalar or iterable of scalars
Number or container of numbers which are intended to be interpreted as
integers, e.g., for indexing purposes, but which may not carry integer
type.
atol : float
Absolute tolerance away from nearest integer to consider values in
``val`` functionally integers.
Returns
-------
val_int : NumPy scalar or ndarray of dtype `cupy.int64`
Returns the input value(s) coerced to dtype `cupy.int64` assuming all
were within ``atol`` of the nearest integer.
Notes
-----
This operation calculates ``val`` modulo 1, which returns the mantissa of
all values. Then all mantissas greater than 0.5 are subtracted from one.
Finally, the absolute tolerance from zero is calculated. If it is less
than ``atol`` for all value(s) in ``val``, they are rounded and returned
in an integer array. Or, if ``val`` was a scalar, a NumPy scalar type is
returned.
If any value(s) are outside the specified tolerance, an informative error
is raised.
Examples
--------
>>> safe_as_int(7.0)
7
>>> safe_as_int([9, 4, 2.9999999999])
array([9, 4, 3])
>>> safe_as_int(53.1)
Traceback (most recent call last):
...
ValueError: Integer argument required but received 53.1, check inputs.
>>> safe_as_int(53.01, atol=0.01)
53
"""
mod = np.asarray(val) % 1 # Extract mantissa
# Check for and subtract any mod values > 0.5 from 1
if mod.ndim == 0: # Scalar input, cannot be indexed
if mod > 0.5:
mod = 1 - mod
else: # Iterable input, now ndarray
mod[mod > 0.5] = 1 - mod[mod > 0.5] # Test on each side of nearest int
try:
np.testing.assert_allclose(mod, 0, atol=atol)
except AssertionError:
raise ValueError(
"Integer argument required but received "
"{0}, check inputs.".format(val)
)
return np.around(val).astype(np.int64) | cbaff1fb1568fd43a4dd3a7a2054a805788b912c | 5,626 |
def check_protocol(protocol):
"""
Check if a given protocol works by computing the qubit excitation probabilities
"""
qubit_weight = {}
qubit_weight[protocol[0][0][0]] = 1.0
for pair_set in protocol:
for i, j, p in pair_set:
qubit_weight[j] = qubit_weight[i] * (1.0 - p)
qubit_weight[i] *= p
return qubit_weight | 8b9d0a8e329a340718d37bc79066be4a05cf2d20 | 5,627 |
def detachVolume(**kargs):
""" detachVolume your Additional Volume
* Args:
- zone(String, Required) : [KR-CA, KR-CB, KR-M, KR-M2]
- id(String, Required) : Volume disk ID
* Examples : print(server.detachVolume(zone='KR-M', id='7f933f86-e8bf-4600-9423-09e8f1c84460'))
"""
my_apikey, my_secretkey = c.read_config()
if not 'zone' in kargs:
return c.printZoneHelp()
if not 'id' in kargs:
return '[ktcloud] Missing required argument \"id\" (disk volume id)'
kargs['zoneid'] = c.getzoneidbyhname(kargs['zone'])
M2Bool = c.IsM2(kargs['zone'])
del kargs['zone']
baseurl = c.geturl(ctype='server', m2=M2Bool)
kargs['command'] = 'detachVolume'
kargs['response'] = 'json'
kargs['apikey'] = my_apikey
return c.makerequest(kargs, baseurl, my_secretkey) | 9c837559052fb41f4e40d18c211a497c7de3ca63 | 5,628 |
from typing import List
from typing import Type
from typing import Optional
from typing import Dict
from typing import Any
from typing import Callable
import time
import traceback
def observe(metric: str,
accept_on: List[Type[Exception]] = [], # pylint: disable=E1136
decline_on: List[Type[Exception]] = [], # pylint: disable=E1136
static_tags: List[str] = [], # pylint: disable=E1136
tags_from: Optional[Dict[str, List[str]]] = None, # pylint: disable=E1136
trace_id_from: Optional[Dict[str, str]] = None) -> Any: # pylint: disable=E1136
"""This operator will, based on the provided setup generate logs, metrics, notifications on each call for that execution.
Args:
metric (str): The root-metric which will be updated during execution in e.g. DogStatsd.
accept_on (Optional[List[Exception]], optional): A list of exceptions on which the message will be acknowledged.
decline_on (Optional[List[Exception]], optional): A list of exceptions on which the message will be declined.
static_tags (Optional[List[str]], optional): A list of tags to be appended on each metric update.
tags_from (Optional[Dict[str, List[str]]], optional): A list of tags to be dynamically extracted from the key dictionary.
trace_id_from (Optional[Dict[str, str]], optional): A trace_id to be appended on each log from the key dictionary.
"""
def arrange(func: Callable[..., Any]):
@wraps(func)
def inner(*args: Any, **kwargs: Any) -> Any:
# setup tracing and tags
trace_id = Resolver.resolve_trace_id(trace_id_from=trace_id_from, **kwargs)
identity = Resolver.resolve_identity(*args, func=func, trace_id=trace_id)
additional_tags = Resolver.resolve_tags_from(tags_from=tags_from, **kwargs)
all_tags = additional_tags + static_tags
imetric = Provider.get_metric(*args)
# start timing
time_start: float = time.monotonic()
try:
# actual function execution
response: Any = func(*args, **kwargs)
# calculate process time
process_time = int(time.monotonic() - time_start) * 1000
# append extra tags
all_tags.append(Resolver.resolve_observed_sli_tag(process_time=process_time))
# send metrics, finished successfully
imetric.timing("%s.time.finished" % metric, process_time, all_tags)
imetric.gauge("%s.time_gauge.finished" % metric, process_time, all_tags)
imetric.increment("%s.finished" % metric, 1, all_tags)
except Exception as ex:
# calculate process time
process_time = int(time.monotonic() - time_start) * 1000
# append extra tags
all_tags.append('exception:%s' % type(ex).__name__)
all_tags.append(Resolver.resolve_observed_sli_tag(process_time=process_time))
# accept on, returns True
if type(ex) in accept_on:
# log warning
Provider.get_logger(*args).warning("%s: %s(%s) during '%s' accepted.\n%s" % (
identity, type(ex).__name__, ex, func.__name__, traceback.format_exc()))
# send metrics, raised but accepted
imetric.timing("%s.time.accepted" % metric, process_time, all_tags)
imetric.gauge("%s.time_gauge.accepted" % metric, process_time, all_tags)
imetric.increment('%s.exception.accepted' % metric, 1, all_tags)
# return truthy, to be acknowledged
return True
# decline on, returns False
if type(ex) in decline_on:
# log error
Provider.get_logger(*args).error("%s: %s(%s) during '%s' declined.\n%s" % (
identity, type(ex).__name__, ex, func.__name__, traceback.format_exc()))
# send metrics, raised but declined
imetric.timing("%s.time.declined" % metric, process_time, all_tags)
imetric.gauge("%s.time_gauge.declined" % metric, process_time, all_tags)
imetric.increment('%s.exception.declined' % metric, 1, all_tags)
# return falsy, not to be acknowledged
return False
# unhandled exception, log error
Provider.get_logger(*args).error("%s: %s(%s) during '%s' raised.\n%s" % (
identity, type(ex).__name__, ex, func.__name__, traceback.format_exc()))
# send metrics, raised and unhandled
imetric.timing("%s.time.raised" % metric, process_time, all_tags)
imetric.gauge("%s.time_gauge.raised" % metric, process_time, all_tags)
imetric.increment('%s.exception.raised' % metric, 1, all_tags)
# check if notification client available
slack = Provider.get_slack(*args)
if slack:
# notify
slack.error(header=identity, title=type(ex).__name__, text=f"{ex}\n{traceback.format_exc()}")
# re-raise
raise ex
finally:
# send metric, start
imetric.increment("%s.start" % metric, 1, all_tags)
# return actual response of the function
return response
return inner
return arrange | 501fbaf8b4b3f77e334d7579834162f0393d1b5d | 5,629 |
import pathlib
import os
def append_subdirs_to_mypy_paths(root_directory: str) -> str:
"""
Appends all immediate sudirs of the root_directory to the MYPYPATH , separated by column ':' TODO: Windows ?
in order to be able to use that in a shellscript (because the ENV of the subshell gets lost)
we also return it as a string. This is already in preparation to remove the testloop shellscript
with a python script.
>>> # Setup
>>> save_mypy_path = get_env_data(env_variable='MYPYPATH')
>>> # Test
>>> append_subdirs_to_mypy_paths(str(pathlib.Path(__file__).parent.parent.resolve()))
'...'
>>> assert str(pathlib.Path(__file__).parent.resolve()) in get_env_data(env_variable='MYPYPATH')
>>> append_subdirs_to_mypy_paths('non_existing')
''
>>> # Teardown
>>> set_env_data(env_variable='MYPYPATH', env_str=save_mypy_path)
"""
path_root_directory = pathlib.Path(root_directory).resolve()
if not path_root_directory.is_dir():
logger.warning(f'add mypy paths : the given root directory "{path_root_directory}" does not exist')
return ''
l_subdirs = [str(path_root_directory / _dir) for _dir in next(os.walk(path_root_directory))[1]]
str_current_mypy_paths = get_env_data(env_variable='MYPYPATH')
if str_current_mypy_paths:
l_subdirs.insert(0, str_current_mypy_paths)
str_new_mypy_paths = ':'.join(l_subdirs)
set_env_data(env_variable='MYPYPATH', env_str=str_new_mypy_paths)
return str_new_mypy_paths | d7b3e600c73eb6c9c968060f4162620d996144b3 | 5,630 |
import numpy
def spherical_to_cartesian(lons, lats, depths):
"""
Return the position vectors (in Cartesian coordinates) of list of spherical
coordinates.
For equations see: http://mathworld.wolfram.com/SphericalCoordinates.html.
Parameters are components of spherical coordinates in a form of scalars,
lists or numpy arrays. ``depths`` can be ``None`` in which case it's
considered zero for all points.
:returns:
``numpy.array`` of 3d vectors representing points' coordinates in
Cartesian space. The array has the same shape as parameter arrays.
In particular it means that if ``lons`` and ``lats`` are scalars,
the result is a single 3d vector. Vector of length ``1`` represents
distance of 1 km.
See also :func:`cartesian_to_spherical`.
"""
phi = numpy.radians(lons)
theta = numpy.radians(lats)
if depths is None:
rr = EARTH_RADIUS
else:
rr = EARTH_RADIUS - numpy.array(depths)
cos_theta_r = rr * numpy.cos(theta)
xx = cos_theta_r * numpy.cos(phi)
yy = cos_theta_r * numpy.sin(phi)
zz = rr * numpy.sin(theta)
vectors = numpy.array([xx.transpose(), yy.transpose(), zz.transpose()]) \
.transpose()
return vectors | 107899c23eeb7fb2cf79fbaa6650b4584543260a | 5,631 |
import time
def get_remote_webdriver(hub_url, browser, browser_ver, test_name):
"""
This functions returns remote web-driver instance created in selenoid
machine.
:param hub_url
:param browser: browser name
:param browser_ver: version for browser
:param test_name: test name
:return: remote web-driver instance for specified browser
"""
test_name = browser + browser_ver + "_" + test_name + "-" + time.strftime(
"%m_%d_%y_%H_%M_%S", time.localtime())
driver_local = None
desired_capabilities = {
"version": browser_ver,
"enableVNC": True,
"enableVideo": True,
"enableLog": True,
"videoName": test_name + ".mp4",
"logName": test_name + ".log",
"name": test_name,
"timeZone": "Asia/Kolkata",
"sessionTimeout": "180s"
}
if browser == 'firefox':
profile = webdriver.FirefoxProfile()
profile.set_preference("dom.disable_beforeunload", True)
desired_capabilities["browserName"] = "firefox"
desired_capabilities["requireWindowFocus"] = True
desired_capabilities["enablePersistentHover"] = False
driver_local = webdriver.Remote(
command_executor=hub_url,
desired_capabilities=desired_capabilities, browser_profile=profile)
elif browser == 'chrome':
options = Options()
options.add_argument("--window-size=1280,1024")
desired_capabilities["browserName"] = "chrome"
driver_local = webdriver.Remote(
command_executor=hub_url,
desired_capabilities=desired_capabilities, options=options)
else:
print("Specified browser does not exist.")
# maximize browser window
driver_local.maximize_window()
# driver_local.implicitly_wait(2)
return driver_local | 2f467f38f2fda6e7f95343e842ea42c3bb551181 | 5,632 |
def process_images(rel_root_path, item_type, item_ids, skip_test, split_attr,
gen_image_specs_func, trafo_image_func,
trafo_image_extra_kwargs=None, img_obj_type=None,
img_attr=None, dimensions=(256, 256),
max_valset_size=10000):
"""
This function downloads all photos which are part of the
dataset. This is a general function which can be used for lots of different
layers.
It returns a dictionary which contains the downloaded image paths.
Key: dataset split identifier, can be 'E', 'V', 'R'
Value: tuple of (item indexes in the item_ids array, corresponding image paths)
:param rel_root_path: The root path of the photos and generated training
files relative to the Caffe root path.
:param item_type: The type of the model class for the items which are
classified (e.g. FgPhoto). This class should have 'photo',
'matclass_dataset_split' attributes/properties. The photo attribute should
have most of the Photo model's fields. It is advised to use an actual Photo
instance here. The matclass_dataset_split attribute should indicate in
which dataset split this item is in. The possible dataset splits are 'E'
(test), 'V' (validation), 'R' (training).
:param item_ids: List (or numpy array) of ids into the :ref:`item_type`
table. It should contain the training, validation and test set.
:param skip_test: If true, skip generating file and downloading images for
the test split.
:param split_attr: The attribute name which represents the dataset split in
the database. It should be one character, 'E' meaning test, 'V' meaning
validation, 'R' meaning training.
:param gen_image_specs_func: Function which generates an id, photo id, image
path triplet for each item which we later use to download the images.
:param trafo_image_func: If None, we don't apply any transformation on the
images. Function which transforms an image given the image path and the
extra parameters, it should return the path of the transformed image, which
can be the original image path or a new path.
:ref:`trafo_image_extra_kwargs` will be passed as extra parameters to this function.
:param trafo_image_extra_kwargs: Extra keyword arguments which will be passed to
:ref:`trafo_image_func` function. All of them should be a list which has the
same order as :ref:`item_ids`.
:param img_obj_type: The type of the model class which holds an image.
:param img_attr: The attribute of `img_obj_type` which holds the image.
:param dimensions: The dimensions to resize the downloaded images to. If
None, keep the image as original size.
:param max_valset_size: The maximum size for the validation set.
"""
item_id_to_idx = {id: idx for idx, id in enumerate(item_ids)}
abbr, fnames = get_abbr_fname(skip_test)
# The return value
image_data = {}
for mc_ds_s, fname in zip(abbr, fnames):
data_path = os.path.join(rel_root_path, 'data')
ensuredir(os.path.join(settings.CAFFE_ROOT, data_path))
print 'Generating split file and downloading images for {} split...'.format(fname)
print 'Generating a list of images to download...'
image_specs = []
for item_ids_batch in progress_bar(iter_batch(item_ids, 10000)):
# Note that the order is not going to be the same as
# item_ids_batch, so we expect the data layer to shuffle the data!
items_split = (
item_type.objects.
filter(**{split_attr: mc_ds_s}).
filter(id__in=item_ids_batch).
order_by()
)
# A list of item_id, image_url, image_path tuples
image_specs += gen_image_specs_func(data_path, items_split)
if not image_specs:
image_data[mc_ds_s] = ([], [])
continue
# We want the validation step to finish in tractable time, so we have a
# maximum threshold on the validation set size
if mc_ds_s == 'V' and len(image_specs) > max_valset_size:
print 'Sampling {} images to reduce the size of the validation set...'.format(max_valset_size)
# For reproducibility
random.seed(125)
image_specs = random.sample(image_specs, max_valset_size)
item_ids_perm, img_obj_ids, image_paths_list = zip(*image_specs)
# A corresponding list of indices into the item_ids array
item_idxs = [item_id_to_idx[item_id] for item_id in item_ids_perm]
# Add caffe root to all paths for downloading
full_image_paths_list = [
[
os.path.join(settings.CAFFE_ROOT, ip)
for ip in ipl
]
for ipl in image_paths_list
]
# Downloading images
download_images(
item_type=img_obj_type,
item_ids=list(itertools.chain.from_iterable(img_obj_ids)),
img_attr=img_attr,
image_paths=list(itertools.chain.from_iterable(full_image_paths_list)),
format='JPEG',
dimensions=dimensions,
)
if trafo_image_func:
print 'Transforming images...'
new_image_paths_list = []
new_item_idxs = []
for item_idx, image_paths, full_image_paths in progress_bar(zip(item_idxs, image_paths_list, full_image_paths_list)):
new_image_paths = trafo_image_func(
image_paths,
full_image_paths,
**index_kwargs(trafo_image_extra_kwargs, item_idx)
)
if not new_image_paths:
print ':( {}'.format(full_image_paths)
continue
new_image_paths_list.append(new_image_paths)
new_item_idxs.append(item_idx)
image_paths_list = new_image_paths_list
item_idxs = new_item_idxs
image_data[mc_ds_s] = (item_idxs, image_paths_list)
return image_data | 9d9d6d53ed6a93c2c8b3f7c70d6cda54277b42b1 | 5,633 |
import typing
import os
def evaluate(config: Config) -> typing.Dict[str, float]:
"""
Load and evaluate model on a list generator
Return:
dict of metrics for the model run
"""
logger.info('Running evaluation process...')
net_name = config.net_name
pp_dir = config.paths['preprocess_dir']
pr_dir = config.paths['processed_dir']
model_path = os.path.join(pr_dir,
f"{config.net_name}.h5")
logger.info('Loading model...')
custom_object = {}
custom_object['rank_hinge_loss'] = losses.rank_hinge_loss
model = load_model(model_path,
custom_objects=custom_object)
logger.info('Loading preprocessed test data...')
processed_test = datapack.load_datapack(pp_dir,
name=net_name + "_test")
generator_test = generators.ListGenerator(processed_test,
stage='train')
res = {}
res['MAP'] = 0.0
res['NCDG@3'] = 0.0
res['NCDG@5'] = 0.0
num_valid = 0
logger.info('Evaluating model...')
for i in range(len(generator_test)):
input_data, y_true = generator_test[i]
y_pred = model.predict(input_data,
batch_size=len(y_true),
verbose=0)
res['MAP'] += mean_average_precision(y_true, y_pred)
res['NCDG@3'] += ndcg(3)(y_true, y_pred)
res['NCDG@5'] += ndcg(5)(y_true, y_pred)
num_valid += 1
logger.info('\t'.join(
[f"{k}={v / num_valid:.3f}" for k, v in res.items()]))
return res | 5773acd85bfacc4170fdf4e961a6a39141d32639 | 5,634 |
from typing import Callable
def get_transform_dict(args, strong_aug: Callable):
"""
Generates dictionary with transforms for all datasets
Parameters
----------
args: argparse.Namespace
Namespace object that contains all command line arguments with their corresponding values
strong_aug: Callable
Callable object implementing the applied strong augmentation strategy, i.e. RandAugment or CTAugment
(not implemented yet).
Returns
-------
transform_dict: Dict
Dictionary containing transforms for the labeled train set, unlabeled train set
and the validation / test set
"""
img_size = IMG_SIZE[args.dataset]
padding = int(0.125 * img_size)
return {
"train": FixMatchTransform.labeled(args.dataset, img_size, padding),
"train_unlabeled": FixMatchTransform.unlabeled(args.dataset, strong_aug, img_size, padding),
"test": get_normalizer(args.dataset),
} | 17ecc3ee611a9fa73176f9a9f354e293d7e4cc39 | 5,635 |
def choose_first_not_none(*args):
""" Choose first non None alternative in args.
:param args: alternative list
:return: the first non None alternative.
"""
for a in args:
if a is not None:
return a
return None | fe3efba85251161cd0a6ecb50583cc443cd04dc0 | 5,636 |
def _format_compact(value, short=True):
"""Compact number formatting using proper suffixes based on magnitude.
Compact number formatting has slightly idiosyncratic behavior mainly due to
two rules. First, if the value is below 1000, the formatting should just be a
2 digit decimal formatting. Second, the number is always truncated to leave at
least 2 digits. This means that a number with one digit more than the
magnitude, such as 1250, is still left with 1.2K, whereas one more digit would
leave it without the decimal, such as 12500 becoming 12K.
Args:
value: The value to format.
short: Whether to use the short form suffixes or long form suffixes.
Returns:
A formatted number as a string.
"""
if value < 1000:
return '{0:.2f}'.format(value).rstrip('0').rstrip('.')
suffixes = _SHORT_SUFFIXES if short else _LONG_SUFFIXES
for key, suffix in sorted(suffixes.items(), reverse=True):
if value >= key:
value = value / float(key)
if value >= 10:
pattern = '{0:,.0f}' + suffix
else:
pattern = '{0:.1f}' + suffix
return pattern.format(value) | 3f2a2b034cbe1e8a3f21ded743ec328a692cc039 | 5,637 |
def matrix(mat,nrow=1,ncol=1,byrow=False):
"""Given a two dimensional array, write the array in a matrix form"""
nr=len(mat)
rscript='m<-matrix(data=c('
try:
nc=len(mat[0])
for m in mat:
rscript+=str(m)[1:-1]+ ', '
rscript=rscript[:-2]+'), nrow=%d, ncol=%d, byrow=TRUE,' %(nr,nc)
except TypeError:
rscript+=str(mat)[1:-1]+','
rscript=rscript[:-1]+'), nrow=%d, ncol=%d,' %(nrow,ncol)
if byrow: rscript+='byrow=TRUE,'
rscript=rscript[:-1]+')\n'
return rscript | a28d91d797238857dd2ff58f24655504a936d4a7 | 5,638 |
from re import T
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
"""
return T.all(x, axis=axis, keepdims=keepdims) | b01891385c2b41d42b976beaf0ee8922b632d705 | 5,639 |
def config_find_permitted(cookie, dn, in_class_id, in_filter, in_hierarchical=YesOrNo.FALSE):
""" Auto-generated UCS XML API Method. """
method = ExternalMethod("ConfigFindPermitted")
method.cookie = cookie
method.dn = dn
method.in_class_id = in_class_id
method.in_filter = in_filter
method.in_hierarchical = (("false", "true")[in_hierarchical in ucsgenutils.AFFIRMATIVE_LIST])
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request | 0367fb6d208b4a03ea4c1cc79e14faabe038cba6 | 5,640 |
def reformat_medication_statuses(data: FilteredData) -> FilteredData:
"""
Reformats medication statuses to binary indicators.
Args:
data: The data containing medication statuses to reformat.
Returns:
Data with reformatted medication statuses.
"""
for j in data.medications.columns[data.medications.columns.str.contains(
'_status$')]:
data.medications[j] = (~(data.medications[j].isin(
['NONE', 'NEW']))).astype(int)
return data | 4428d7e65893dfea33490de28e77dffe66562c31 | 5,641 |
def segmentwidget(img, params=None, alg=None):
"""Generate GUI. Produce slider for each parameter for the current segmentor.
Show both options for the masked image.
Keyword arguments:
img -- original image
gmask -- ground truth segmentation mask for the image
params -- list of parameter options
alg -- algorithm to search parameters over
"""
if params:
if alg:
params['algorithm'] = alg;
seg = segmentor.algoFromParams(params)
else:
if alg:
algorithm_gen = segmentor.algorithmspace[alg]
seg = algorithm_gen()
else:
seg = segmentor()
widg = dict()
widglist = []
for ppp, ind in zip(seg.paramindexes, range(len(seg.paramindexes))):
thislist = seg.params.ranges[ppp]
name = ppp
current_value = seg.params[ppp]
if not current_value in thislist:
#TODO: We should find the min distance between current_value and this list and use that instead.
current_value = thislist[0]
thiswidg = widgets.SelectionSlider(options=tuple(thislist),
disabled=False,
description=name,
value=current_value,
continuous_update=False,
orientation='horizontal',
readout=True
)
widglist.append(thiswidg)
widg[ppp] = thiswidg
def func(**kwargs):
"""Find mask and fitness for current algorithm. Show masked image."""
print(seg.params["algorithm"])
for k in kwargs:
seg.params[k] = kwargs[k]
mask = seg.evaluate(img)
#fit = Segmentors.FitnessFunction(mask, gmask)
fig = showtwo(img, mask)
# I like the idea of printing the sharepython but it should be below the figures.
#print(seg.sharepython(img))
# plt.title('Fitness Value: ' + str(fit[0]))
layout = widgets.Layout(grid_template_columns='1fr 1fr 1fr')
u_i = widgets.GridBox(widglist, layout=layout)
out = widgets.interactive_output(func, widg)
display(u_i, out)
return seg.params | 721cb1ddb55a90364593b53c3fdf3b650397a91c | 5,642 |
import json
def list_daemons(dut):
"""Get daemon table from ovsdb-server."""
daemon_list = {}
c = ovs_vsctl + "--format json list daemon"
out = dut(c, shell="bash")
json_out = json.loads(out)['data']
# The output is in the following format
# [["uuid","19b943b0-096c-4d7c-bc0c-5b6ac2f83014"],0,true,"ops-pmd"]
for item in json_out:
daemon_list[item[3]] = {'is_hw_handler': item[2]}
return daemon_list | 35b28e8c38cd48a93f642b0e2820d0ff2ff87450 | 5,643 |
import math
import collections
def Cleanse(obj, encoding="utf-8"):
"""Makes Python object appropriate for JSON serialization.
- Replaces instances of Infinity/-Infinity/NaN with strings.
- Turns byte strings into unicode strings.
- Turns sets into sorted lists.
- Turns tuples into lists.
Args:
obj: Python data structure.
encoding: Charset used to decode byte strings.
Returns:
Unicode JSON data structure.
"""
if isinstance(obj, int):
return obj
elif isinstance(obj, float):
if obj == _INFINITY:
return "Infinity"
elif obj == _NEGATIVE_INFINITY:
return "-Infinity"
elif math.isnan(obj):
return "NaN"
else:
return obj
elif isinstance(obj, bytes):
return obj.decode(encoding)
elif isinstance(obj, (list, tuple)):
return [Cleanse(i, encoding) for i in obj]
elif isinstance(obj, set):
return [Cleanse(i, encoding) for i in sorted(obj)]
elif isinstance(obj, dict):
return collections.OrderedDict(
(Cleanse(k, encoding), Cleanse(v, encoding)) for k, v in obj.items()
)
else:
return obj | e50e44bd3aa685838ea2e68537e2df288e9a058f | 5,644 |
import time
def wait_no_exception(lfunction, exc_class=None, exc_matcher=None):
"""Stops waiting on success."""
start_time = time.time()
if exc_matcher is not None:
exc_class = boto.exception.BotoServerError
if exc_class is None:
exc_class = BaseException
while True:
result = None
try:
result = lfunction()
LOG.info('No Exception in %d second',
time.time() - start_time)
return result
except exc_class as exc:
if exc_matcher is not None:
res = exc_matcher.match(exc)
if res is not None:
LOG.info(res)
raise exc
# Let the other exceptions propagate
dtime = time.time() - start_time
if dtime > default_timeout:
raise TestCase.failureException("Wait timeout exceeded! (%ds)" %
dtime)
time.sleep(default_check_interval) | be5e9798570dd3f6ca6f9b78d136179f68a4ad3c | 5,645 |
def testapp(app, init_database):
"""Create Webtest app."""
testapp = TestApp(app)
#testapp = TestApp(app, extra_environ=dict(REMOTE_USE='test'))
# testapp.set_authorization(('Basic', (app.config['USERNAME'],app.config['PASSWORD'])))
# testapp.get_authorization()
return testapp | 01a579ae22c0eedfaac7d6dd5aeffd1b63f34612 | 5,646 |
def dois(self, key, value):
"""Translates dois fields."""
_identifiers = self.get("identifiers", [])
for v in force_list(value):
material = mapping(
MATERIALS,
clean_val("q", v, str, transform="lower"),
raise_exception=True,
)
doi = {
"value": clean_val("a", v, str, req=True),
"material": material,
"source": clean_val("9", v, str),
"scheme": "DOI",
}
if doi not in _identifiers:
_identifiers.append(doi)
return _identifiers | b7635815c451856d249feebeb1084ad28f0357d9 | 5,647 |
def transition(src,
dest,
state=None,
permissions=None,
required=None,
commit_record=True,
**kwargs):
"""Decorator that marks the wrapped function as a state transition.
:params parameters for transition object, see documentation for details.
:returns: A wrapper around a wrapped function, with added `_fsm` field containing the `Transition` spec.
"""
if permissions is not None and not isinstance(permissions, (list, tuple)):
permissions = [permissions]
if required is not None and not isinstance(required, (list, tuple)):
required = [required]
if not isinstance(src, (list, tuple)):
src = [src]
t = Transition(
src=src,
dest=dest,
state=state,
permissions=permissions,
required=required,
commit_record=commit_record,
**kwargs
)
def inner(f):
@has_required_params(t)
def wrapper(self, *args, **kwargs):
record = self
t.check_valid_state(record)
t.check_permissions(record)
t.execute(record=record, **kwargs)
return f(self, *args, **kwargs)
wrapper._fsm = t
t.function = wrapper
t.original_function = f
return wrapper
return inner | 309ffca49c2dd2af4dabb084c5f642d40e9d34e8 | 5,648 |
def get_html(url):
"""Returns HTML object based on given Gumtree URL.
:param url: Offer URL.
:return: Offer HTML object.
"""
session = HTMLSession()
try:
r = session.get(url)
return r.html
except ParserError:
return None | 6fd3aa8e7f2ff81f912e0d7050872a6e2de14827 | 5,649 |
def _get_lattice_parameters(lattice):
"""Return basis vector lengths
Parameters
----------
lattice : array_like
Basis vectors given as column vectors
shape=(3, 3), dtype='double'
Returns
-------
ndarray, shape=(3,), dtype='double'
"""
return np.array(np.sqrt(np.dot(lattice.T, lattice).diagonal()),
dtype='double') | 405111d5052307dd995e64c2ff7936481db4f34d | 5,650 |
def save_batches(current_memory, id_tmp_dir, batch_num):
"""
batch_num : corresponds to the gradient update number
"""
target_csv = id_tmp_dir + "/batch" + str(batch_num) + ".csv"
obs_copy = deepcopy(current_memory['current_obs'])
reward_copy = deepcopy(current_memory['rewards'])
current_obs_batch = obs_copy.cpu().numpy()
obs_x = current_obs_batch[:,0]
obs_y = current_obs_batch[:,1]
reward_batch = reward_copy.cpu().numpy()
batch_list = np.column_stack((obs_x, obs_y, reward_batch))
fileheader = 'X-Position, Y-Position, Reward'
np.savetxt(target_csv, batch_list, delimiter=' ', header=fileheader)
return 0 | 311bf2c1083156dc26acb855f2cc61f142a1586b | 5,651 |
def ikrvea_mm(
reference_point: np.ndarray,
individuals: np.ndarray,
objectives: np.ndarray,
uncertainity: np.ndarray,
problem: MOProblem,
u: int) -> float:
""" Selects the solutions that need to be reevaluated with the original functions.
This model management is based on the following papaer:
'P. Aghaei Pour, T. Rodemann, J. Hakanen, and K. Miettinen, “Surrogate assisted interactive
multiobjective optimization in energy system design of buildings,”
Optimization and Engineering, 2021.'
Args:
reference_front (np.ndarray): The reference front that the current front is being compared to.
Should be an one-dimensional array.
individuals (np.ndarray): Current individuals generated by using surrogate models
objectives (np.ndarray): Current objectives generated by using surrogate models
uncertainity (np.ndarray): Current Uncertainty values generated by using surrogate models
problem : the problem class
Returns:
float: the new problem object that has an updated archive.
"""
nd = remove_duplicate(individuals, problem.archive.drop(
problem.objective_names, axis=1).to_numpy()) #removing duplicate solutions
if len(nd) == 0:
return problem
else:
non_duplicate_dv = individuals[nd]
non_duplicate_obj = objectives[nd]
non_duplicate_unc = uncertainity[nd]
# Selecting solutions with lowest ASF values
asf_solutions = SimpleASF([1]*problem.n_of_objectives).__call__(non_duplicate_obj, reference_point)
idx = np.argpartition(asf_solutions, 2*u)
asf_unc = np.max(non_duplicate_unc [idx[0:2*u]], axis= 1)
# index of solutions with lowest Uncertainty
lowest_unc_index = np.argpartition(asf_unc, u)[0:u]
# evaluating the solutions in asf_unc with lowest uncertainty. The archive will get update in problem.evaluate()
problem.evaluate(non_duplicate_dv[lowest_unc_index], use_surrogate=False)[0]
problem.train(models=GaussianProcessRegressor,\
model_parameters={'kernel': Matern(nu=1.5)})
return problem | f9959cf7ddfcc2aa3aa2fc8c3062cfb63082b242 | 5,652 |
def homepage(request):
"""Main view of app.
We will display page with few step CTA links?
:param request: WSGIRequest instance
"""
if logged_as_admin(request):
offers = Offer.objects.get_for_administrator()
else:
offers = Offer.objects.get_weightened()
return render(
request,
'homepage.html',
{
'offers': offers,
'MEDIA_URL': settings.MEDIA_URL,
}
) | 003e6f86ab09ede87e7f1c86910808c2da9c1e9d | 5,653 |
def add_dict(dct1, dct2):
"""Returns a new dictionaries where the content of the dictionaries `dct1`
and `dct2` are merged together."""
result = dct1.copy()
result.update(dct2)
return result | eba785e4d00534e94c1bdde413603d64e18aac05 | 5,654 |
import tempfile
from pathlib import Path
def edit_temp(contents="", name=""):
"""
Create a temporary file and open it in the system's default editor for the
user to edit. The saved contents of the file will be returned when the
editor is closed.
:param contents: Pre-fill the file with the given text.
:param name: Ensure that the temp filename has the given name.
:return: Contents of the file when the editor is closed.
"""
# Create a temp file, ensure it has requested name and contents
td = tempfile.TemporaryDirectory()
tfpath = Path(td.name) / (name or DEFAULT_TEMPFILE)
write_file(tfpath, contents)
# Edit interactively
return edit(tfpath) | 174acda4961cc945b917be6c66d1218d3e46914d | 5,655 |
def _new_primitive_control(
rabi_rotation=None,
azimuthal_angle=0.,
maximum_rabi_rate=2. * np.pi,
**kwargs):
"""
Primitive driven control.
Parameters
----------
rabi_rotation : float, optional
The total rabi rotation to be performed by the driven control.
maximum_rabi_rate : float, optional
Defaults to 2.*np.pi
The maximum rabi frequency for the driven control.
azimuthal_angle : float, optional
The azimuthal position of the driven control.
kwargs : dict
Other keywords required to make a qctrlopencontrols.DrivenControls.
Returns
-------
qctrlopencontrols.DrivenControl
The driven control.
"""
(maximum_rabi_rate, rabi_rotation, azimuthal_angle) = _predefined_common_attributes(
maximum_rabi_rate, rabi_rotation, azimuthal_angle)
return DrivenControl(
rabi_rates=[maximum_rabi_rate],
azimuthal_angles=[azimuthal_angle],
detunings=[0],
durations=[rabi_rotation/maximum_rabi_rate],
**kwargs) | dcaab9ace0269a0404435639d0e9e9e025f1013a | 5,656 |
from typing import Optional
from typing import Any
from typing import Tuple
def check_fit_params(
X: TwoDimArrayLikeType,
y: OneDimArrayLikeType,
sample_weight: Optional[OneDimArrayLikeType] = None,
estimator: Optional[BaseEstimator] = None,
**kwargs: Any
) -> Tuple[TwoDimArrayLikeType, OneDimArrayLikeType, OneDimArrayLikeType]:
"""Check `X`, `y` and `sample_weight`.
Parameters
----------
X
Data.
y
Target.
sample_weight
Weights of data.
estimator
Object to use to fit the data.
**kwargs
Other keywords passed to `sklearn.utils.check_array`.
Returns
-------
X
Converted and validated data.
y
Converted and validated target.
sample_weight
Converted and validated weights of data.
"""
X = check_X(X, estimator=estimator, **kwargs)
if not isinstance(y, pd.Series):
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if is_classifier(estimator):
check_classification_targets(y)
if sample_weight is None:
n_samples = _num_samples(X)
sample_weight = np.ones(n_samples)
sample_weight = np.asarray(sample_weight)
class_weight = getattr(estimator, "class_weight", None)
if class_weight is not None:
sample_weight *= compute_sample_weight(class_weight, y)
check_consistent_length(X, y, sample_weight)
return X, y, sample_weight | f385fb9db06d1bd0ee0a9ecef74fbca8f0754a4d | 5,657 |
async def example_quest(request: web.Request) -> web.Response:
"""
Example quest handler that handles a POST request with a computer science trivia question
:param request: The request object
"""
# Verify that it is a POST request, since that's what this quest is supposed to handle
if request.method == 'POST':
# We will always get JSON from the server, so convert it to a Python dict
data = await request.json()
# Let's see what the server is asking
print(f'Server sent POST to /my-simple-quest:', data)
# Ok so we know that the question is "Who invented C++?"
# The request always contains a "msg" field, and the response always expects an "answer" field
response = { 'answer': 'bjarne stroustrup' }
# The server always expects a JSON response
return web.json_response(response)
else:
Log.error('This quest is supposed to handle POST requests') | fed5ad72d9343c1fd420d98e638bf3f0de995670 | 5,658 |
import matplotlib.pyplot as plt
def view_api_image(image_type, catalog_name, source_id):
"""Source spectrum image."""
catalog = source_catalogs[catalog_name]
source = catalog[source_id]
plt.style.use('fivethirtyeight')
if image_type == 'spectrum':
fig, ax = plt.subplots()
source.plot_spectrum(ax=ax)
if image_type == 'lightcurve':
fig, ax = plt.subplots()
source.plot_lightcurve(ax=ax)
elif image_type == 'test':
fig, ax = plt.subplots()
ax.plot([2, 4, 3])
else:
raise ValueError('Invalid image_type: {}'.format(image_type))
fig.tight_layout()
# fig.canvas.draw()
img = BytesIO()
fig.savefig(img)
img.seek(0)
del fig, ax
return send_file(img, mimetype='image/png') | 8137b0ca42b3e161fbd77d5f73beb2adeefa1fd3 | 5,659 |
def get_record_base_model(type_enum):
"""Return the dimension model class for a DimensionType."""
dim_model = _DIMENSION_TO_MODEL.get(type_enum)
if dim_model is None:
raise DSGInvalidDimension(f"no mapping for {type_enum}")
return dim_model | dc232a173ea92bcb6ee2bafcd9eeae46862da5ec | 5,660 |
import subprocess
def cmd(command):
"""
Run a command and return its stdout
"""
try:
completed = subprocess.run(
command.split(" "),
stdout=subprocess.PIPE,
)
except FileNotFoundError:
panic(f"Command `{command}` not found.")
if completed.returncode > 0:
panic(f"Command `{command}` returned a non 0 status code.")
return completed.stdout.decode('utf-8').rstrip() | cd378b564844988d1dd9fbd674b21cde0c071964 | 5,661 |
from io import StringIO
def sas_to_pandas(sas_code, wrds_id, fpath):
"""Function that runs SAS code on WRDS or local server
and returns a Pandas data frame."""
p = get_process(sas_code, wrds_id, fpath)
if wrds_id:
df = pd.read_csv(StringIO(p.read().decode('utf-8')))
else:
df = pd.read_csv(StringIO(p.read()))
df.columns = map(str.lower, df.columns)
p.close()
return(df) | 50806526e1f44e58c227472ced3b5884d8b9f2d5 | 5,662 |
import subprocess
def subprocess_run(*popenargs, input=None, timeout=None, check=False, **kwargs): # pylint: disable=redefined-builtin
"""Run command with arguments and return a CompletedProcess instance.
The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.
If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.
There is an optional argument "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.
The other arguments are the same as for the Popen constructor.
If universal_newlines=True is passed, the "input" argument must be a
string and stdout/stderr in the returned object will be strings rather than
bytes.
"""
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = subprocess.PIPE
with subprocess.Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except subprocess.TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
raise subprocess.TimeoutExpired(process.args, timeout, output=stdout,
stderr=stderr)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if check and retcode:
raise subprocess.CalledProcessError(retcode, process.args,
output=stdout)
return _CompletedProcess(process.args, retcode, stdout, stderr) | aef8e69ef7f09be414a941cc58a3d174242a1537 | 5,663 |
def _client_ip_address(request):
"""Return client ip address for flask `request`.
"""
if request.headers.getlist("X-PNG-Query-For"):
ip_addr = request.headers.getlist("X-PNG-Query-For")[0]
if ip_addr.startswith('::ffff:'):
ip_addr = ip_addr[7:]
elif request.headers.getlist("X-Forwarded-For"):
ip_addr = request.headers.getlist("X-Forwarded-For")[0]
if ip_addr.startswith('::ffff:'):
ip_addr = ip_addr[7:]
else:
ip_addr = request.remote_addr
return ip_addr | eb1b41ee707bac5aefaacdbb1958cd26a47fe288 | 5,664 |
def create_bb_points(vehicle):
"""
Extract the eight vertices of the bounding box from the vehicle.
Parameters
----------
vehicle : opencda object
Opencda ObstacleVehicle that has attributes.
Returns
-------
bbx : np.ndarray
3d bounding box, shape:(8, 4).
"""
bbx = np.zeros((8, 4))
extent = vehicle.bounding_box.extent
bbx[0, :] = np.array([extent.x, extent.y, -extent.z, 1])
bbx[1, :] = np.array([-extent.x, extent.y, -extent.z, 1])
bbx[2, :] = np.array([-extent.x, -extent.y, -extent.z, 1])
bbx[3, :] = np.array([extent.x, -extent.y, -extent.z, 1])
bbx[4, :] = np.array([extent.x, extent.y, extent.z, 1])
bbx[5, :] = np.array([-extent.x, extent.y, extent.z, 1])
bbx[6, :] = np.array([-extent.x, -extent.y, extent.z, 1])
bbx[7, :] = np.array([extent.x, -extent.y, extent.z, 1])
return bbx | 2cf2e2b1e9d64a246369ff9b182199ed64fb71b9 | 5,665 |
import tempfile
import os
import io
def mkstemp(
open_kwargs=None, # type: Optional[Dict[Text, Any]]
text=True, # type: bool
name_only=False, # type: bool
*args,
**kwargs):
# type: (...) -> Union[(IO[AnyStr], Text), Text]
"""
WARNING: the returned file object is strict about its input type,
make sure to feed it binary/text input in correspondence to the ``text`` argument
:param open_kwargs: keyword arguments for ``io.open``
:param text: open in text mode
:param name_only: close the file and return its name
:param args: tempfile.mkstemp args
:param kwargs: tempfile.mkstemp kwargs
"""
fd, name = tempfile.mkstemp(text=text, *args, **kwargs)
mode = 'w+'
if not text:
mode += 'b'
if name_only:
os.close(fd)
return name
return io.open(fd, mode, **open_kwargs or {}), name | c13b2d41a74e1ee8bbfd0e090d083d7e1446fb34 | 5,666 |
def featuredrep_set_groups(sender, **kwargs):
"""Set permissions to groups."""
app_label = sender.label
if (isinstance(app_label, basestring) and app_label != 'featuredrep'):
return True
perms = {'can_edit_featured': ['Admin', 'Council', 'Peers'],
'can_delete_featured': ['Admin', 'Council', 'Peers']}
add_permissions_to_groups('featuredrep', perms) | 354731d88ed6633d6cd62b73c6d1ea4cae97ca73 | 5,667 |
def download(loc, rem):
"""download rem to loc"""
# does the remote file exist
if not rem.exists():
return ReturnCode.NO_SOURCE
# does the local file exist
# if it doesnt, copy rem to loc, isLogged = False
if not loc.is_file():
return do_copy(rem, loc, False)
# is the local file older than remote
if not is_older_than(loc, rem):
return ReturnCode.NOT_OLDER
if outs.question_override(rem, loc):
return do_copy(rem, loc, False)
else:
return ReturnCode.USER_CANCEL | ff7421674f97a6923bbee6fa9be27d132d0095e3 | 5,668 |
from utils import publish_event
def read_dict (conf_dict = {}, filename = "SWIM_config"):
"""
Open and read a dictionary of key-value pairs from the file given by
filename. Use the read-in values to augment or update the dictionary passed
in, then return the new dictionary.
"""
try:
config_file = open(filename, "r")
if config_file:
line = config_file.readline().strip()
else:
line = ""
except:
message = "Unable to open config file " + filename
publish_event(message, topic = FSP_log, action = "halt_run")
print message
raise IOError, "Unable to open config file in read_dict"
try:
while line:
name, val = line.split("=")
name = name.strip()
val = val.strip()
conf_dict[name] = val
if config_file:
line = config_file.readline().strip()
else:
line = ""
config_file.close()
return conf_dict
except Exception, ex:
print "Unable to augment conf_dict in read_dict: %s" % ex
raise IOError, "Unable to augment conf_dict in read_dict" | 655699cf8c0c007c8e66f15a75bf778686c7d8d9 | 5,669 |
def ordinals_to_ordinals(s):
""" Example:
'third' -> '3rd'
Up to 31st (intended for dates)
"""
for val in ordinals.keys():
s = s.replace(val, ordinals[val])
return s | 4d45a9cfa0171a42deaf99d2e34e41dd5be6c96c | 5,670 |
def create_dataframe_schema():
"""
Create dataframe schema
"""
return pd.DataFrame(columns=['Station_id', 'Name']) | 9f15aa5fb72716e0e398554caabafb972261e5ca | 5,671 |
import requests
def shutdown_check_handler():
"""This checks the AWS instance data URL to see if there's a pending
shutdown for the instance.
This is useful for AWS spot instances. If there is a pending shutdown posted
to the instance data URL, we'll use the result of this function break out of
the processing loop and shut everything down ASAP before the instance dies.
Returns
-------
bool
- True if the instance is going to die soon.
- False if the instance is still safe.
"""
url = 'http://169.254.169.254/latest/meta-data/spot/instance-action'
try:
resp = requests.get(url, timeout=1.0)
resp.raise_for_status()
stopinfo = resp.json()
if 'action' in stopinfo and stopinfo['action'] in ('stop',
'terminate',
'hibernate'):
stoptime = stopinfo['time']
LOGWARNING('instance is going to %s at %s' % (stopinfo['action'],
stoptime))
resp.close()
return True
else:
resp.close()
return False
except HTTPError:
resp.close()
return False
except Exception:
resp.close()
return False | dd5a7c3b3ab856d72afe01a19b2389071c4e70f3 | 5,672 |
def cal_d(date=cal_date.today(), zf=True):
"""
Month, optionally left-padded with zeroes (default: pad)
"""
day_num = "d" if zf else "-d" # optionally zero fill
return date.strftime(f"%{day_num}") | c0501449035c10f3c4b05a8f9088b30d5789f662 | 5,673 |
from typing import Mapping
from typing import Sequence
from typing import Tuple
def get_max_total(map_of_maps: Mapping[Sequence[str], Mapping[Tuple, float]]) -> float:
"""
>>> df = get_sample_df()
>>> get_max_total(calculate_kls_for_attackers(df, [1]))
1.3861419037664793
>>> get_max_total(calculate_kls_for_attackers(df))
3.0817041659455104
"""
return max(get_map_of_totals(map_of_maps).values()) | 6480676c3960e36e434dd8b229ddbd840fbcaa7a | 5,674 |
import requests
def get_keeper_token(host: str, username: str, password: str) -> str:
"""Get a temporary auth token from LTD Keeper.
Parameters
----------
host : `str`
Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``).
username : `str`
Username.
password : `str`
Password.
Returns
-------
token : `str`
LTD Keeper API token.
Raises
------
KeeperError
Raised if the LTD Keeper API cannot return a token.
"""
token_endpoint = urljoin(host, "/token")
r = requests.get(token_endpoint, auth=(username, password))
if r.status_code != 200:
raise KeeperError(
"Could not authenticate to {0}: error {1:d}\n{2}".format(
host, r.status_code, r.json()
)
)
return r.json()["token"] | 4c1feb095b3409786c5bac62aed41939f31a1431 | 5,675 |
import json
def edit_comment(post_id, comment_id):
"""Edit a comment from a specific post"""
post = posts.get(post_id)
if not post:
return json.dumps({"error": "Post Not Found"}), 404
comments = post["comments"]
comment = comments.get(comment_id)
if not comment:
return json.dumps({"error": "Comment Not Found"}), 404
body = json.loads(request.data)
text = body.get("text")
if not text:
return json.dumps({"error": "Missing fields in the body"}), 400
comment["text"] = text
return json.dumps(comment), 200 | dba79ed0bbdbc48b804a5a9f446c289f47606a75 | 5,676 |
def update_transition_dirichlet(
pB, B, actions, qs, qs_prev, lr=1.0, return_numpy=True, factors="all"
):
"""
Update Dirichlet parameters that parameterize the transition model of the generative model
(describing the probabilistic mapping between hidden states over time).
Parameters
-----------
- pB [numpy nd.array, array-of-arrays (with np.ndarray entries), or Dirichlet (either single-modality or AoA)]:
The prior Dirichlet parameters of the generative model, parameterizing the agent's beliefs about the transition likelihood.
- B [numpy nd.array, object-like array of arrays, or Categorical (either single-modality or AoA)]:
The transition likelihood of the generative model.
- actions [tuple]:
A tuple containing the action(s) performed at a given timestep.
- Qs_curr [numpy 1D array, array-of-arrays (where each entry is a numpy 1D array), or Categorical (either single-factor or AoA)]:
Current marginal posterior beliefs about hidden state factors
- Qs_prev [numpy 1D array, array-of-arrays (where each entry is a numpy 1D array), or Categorical (either single-factor or AoA)]:
Past marginal posterior beliefs about hidden state factors
- eta [float, optional]:
Learning rate.
- return_numpy [bool, optional]:
Logical flag to determine whether output is a numpy array or a Dirichlet
- which_factors [list, optional]:
Indices (in terms of range(Nf)) of the hidden state factors to include in learning.
Defaults to 'all', meaning that transition likelihood matrices for all hidden state factors
are updated as a function of transitions in the different control factors (i.e. actions)
"""
pB = utils.to_numpy(pB)
if utils.is_arr_of_arr(pB):
n_factors = len(pB)
else:
n_factors = 1
if return_numpy:
pB_updated = pB.copy()
else:
pB_updated = utils.to_dirichlet(pB.copy())
if not utils.is_distribution(qs):
qs = utils.to_categorical(qs)
if factors == "all":
if n_factors == 1:
db = qs.cross(qs_prev, return_numpy=True)
db = db * (B[:, :, actions[0]] > 0).astype("float")
pB_updated = pB_updated + (lr * db)
elif n_factors > 1:
for f in range(n_factors):
db = qs[f].cross(qs_prev[f], return_numpy=True)
db = db * (B[f][:, :, actions[f]] > 0).astype("float")
pB_updated[f] = pB_updated[f] + (lr * db)
else:
for f_idx in factors:
db = qs[f_idx].cross(qs_prev[f_idx], return_numpy=True)
db = db * (B[f_idx][:, :, actions[f_idx]] > 0).astype("float")
pB_updated[f_idx] = pB_updated[f_idx] + (lr * db)
return pB_updated | 3b41320f20abee2cec4cfa651d932a388c4595c2 | 5,677 |
def MRP2Euler231(q):
"""
MRP2Euler231(Q)
E = MRP2Euler231(Q) translates the MRP
vector Q into the (2-3-1) euler angle vector E.
"""
return EP2Euler231(MRP2EP(q)) | 09929b4858eb0f8a755b623fa86b6d77333e9f6b | 5,678 |
def _convert_from_node_attribute(
G, attr_name, node_types, node_type_name=None, node_type_default=None, dtype="f"
):
"""
Transform the node attributes to feature vectors, for use with machine learning models.
Each node is assumed to have a numeric array stored in the attribute_name and
which is suitable for use in machine learning models.
Args:
G: NetworkX graph
attr_name: Name of node attribute to use for conversion
node_types: Node types in graph
node_type_name: (optional) The name of the node attribute specifying the type.
node_type_default: (optional) The node type of nodes without explicit type.
dtype: (optional) The numpy datatype to create the features array.
Returns:
index_map: a dictionary of node_type -> {node_id: node_index}
attribute_arrays: a dictionary of node_type -> numpy array storing the features
"""
attribute_arrays = {}
node_index_map = {}
# Enumerate all nodes in graph
nodes_by_type = {
# XXX: This lookup does not really make sense if node_type_name is not specified - why is it optional?
nt: [
n
for n, ndata in G.nodes(data=True)
if ndata.get(node_type_name, node_type_default) == nt
]
for nt in node_types
}
# Get the target values for each node type
for nt in node_types:
nt_node_list = nodes_by_type[nt]
# Add None to node list as ID of unknown nodes
nt_node_list.append(None)
# Create map between node id and index (including None)
node_index_map[nt] = {nid: ii for ii, nid in enumerate(nt_node_list)}
# The node data
attr_data = [
v if v is None else G.nodes[v].get(attr_name) for v in nt_node_list
]
# Get the size of the features
data_sizes = {
np.size(G.nodes[v].get(attr_name, []))
for v in nt_node_list
if v is not None
}
# Warn if nodes don't have the attribute
if 0 in data_sizes:
print(
"Warning: Some nodes have no value for attribute '{}', "
"using default value.".format(attr_name)
)
data_sizes.discard(0)
# Check all are the same for this node type
if len(data_sizes) > 1:
raise ValueError(
"Data sizes in nodes of type {} are inconsistent "
"for the attribute '{}' ".format(nt, attr_name)
)
# If some node_type have no nodes with the attribute, skip them
if len(data_sizes) == 0:
continue
# Create zero attribute array
data_size = data_sizes.pop()
# Dummy feature/target value for invalid nodes,
# this will be inserted into the array in two cases:
# 1. node ID of None (representing sampling for a missing neighbour)
# 2. node with no attribute
# TODO: Make these two cases more explicit, allow custom values.
default_value = np.zeros(data_size)
# Convert to numpy array
attribute_arrays[nt] = np.asarray(
[x if x is not None else default_value for x in attr_data]
)
return node_index_map, attribute_arrays | 1c1ae2ab8d1a3da31829ee48ee4017fb45ca73b0 | 5,679 |
import argparse
import socket
from pathlib import Path
import requests
def handle_args(parser: argparse.ArgumentParser, section: Text) -> argparse.Namespace:
""" Verify default arguments """
hostname = socket.gethostname()
hostname_short = socket.gethostname().split(".")[0]
host_config_name = f"{CONFIG_NAME}-{hostname}"
host_short_config_name = f"{CONFIG_NAME}-{hostname_short}"
if (Path(caep.get_config_dir(CONFIG_ID)) / host_config_name).is_file():
config_name = host_config_name
elif (Path(caep.get_config_dir(CONFIG_ID)) / host_short_config_name).is_file():
config_name = host_short_config_name
else:
config_name = CONFIG_NAME
args = caep.handle_args(parser, CONFIG_ID, config_name, section)
setup_logging(args.loglevel, args.logfile)
info(f"args: {args}")
info(f"config: {CONFIG_ID}/{config_name}")
args.chat_prefix = args.chat_prefix.strip()
if not args.server:
fatal("--server not specified")
if not args.user:
fatal("--user not specified")
if args.no_verify:
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
if args.password_pass_entry:
args.password = gettpassentry(args.password_pass_entry)
if not args.password:
fatal("Must specify either --password or --password-pass-entry")
return args | a150360799879f1fae56a94936d87473378d6f37 | 5,680 |
def get_entry_or_none(base: dict, target, var_type=None):
"""Helper function that returns an entry or None if key is missing.
:param base: dictionary to query.
:param target: target key.
:param var_type: Type of variable this is supposed to be (for casting).
:return: entry or None.
"""
if target not in base:
return None
if var_type is not None:
return var_type(base[target])
return base[target] | b3855be0c7d2c3bdd42e57ae959bb97409abe828 | 5,681 |
def group_list(request):
"""
List all gourps, or create a new group.
"""
if request.method == 'GET':
tasks = Group.objects.all()
serializer = GroupSerializer(tasks, many=True)
return Response(serializer.data)
elif request.method == 'POST':
unique_name = request.data.get("unique_name")
display_name = request.data.get("display_name")
if unique_name and display_name:
checkgoup = Group.objects.filter(unique_name=unique_name).first()
if checkgoup:
res = {"code": 400,
"message": "Ops!, Unique name already exists"}
return Response(data=res,
status=400)
else:
res = {"code": 400,
"message":
"Ops!, Unique name and display name can't be null"}
return Response(data=res,
status=400)
group = Group.create(unique_name, display_name)
group.save()
serializer = GroupSerializer(group, many=False)
return JsonResponse(serializer.data, safe=False) | 39e3d67aa88008541898aea2d21d5a811ec17699 | 5,682 |
def CreateBooleanSplit(meshesToSplit, meshSplitters, multiple=False):
"""
Splits a set of meshes with another set.
Args:
meshesToSplit (IEnumerable<Mesh>): A list, an array, or any enumerable set of meshes to be split. If this is null, None will be returned.
meshSplitters (IEnumerable<Mesh>): A list, an array, or any enumerable set of meshes that cut. If this is null, None will be returned.
Returns:
Mesh[]: A new mesh array, or None on error.
"""
url = "rhino/geometry/mesh/createbooleansplit-mesharray_mesharray"
if multiple: url += "?multiple=true"
args = [meshesToSplit, meshSplitters]
if multiple: args = list(zip(meshesToSplit, meshSplitters))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | 56f8956b9cce7bd9467ac23b80a7573a889c05bf | 5,683 |
def template14():
"""Simple ML workflow"""
script = """
## (Enter,datasets)
<< host = chemml
<< function = load_cep_homo
>> smiles 0
>> homo 4
## (Store,file)
<< host = chemml
<< function = SaveFile
<< format = smi
<< header = False
<< filename = smiles
>> 0 df
>> filepath 1
## (Represent,molecular descriptors)
<< host = chemml
<< function = RDKitFingerprint
>> 1 molfile
>> df 2
>> df 3
## (Store,file)
<< host = chemml
<< function = SaveFile
<< filename = fps_rdkfp
>> 2 df
## (Prepare,split)
<< host = sklearn
<< function = train_test_split
>> 3 dfx
>> 4 dfy
>> dfx_train 5
>> dfy_train 6
>> dfx_test 8
>> dfy_test 11
## (Model,regression)
<< host = sklearn
<< function = MLPRegressor
<< func_method = fit
>> 5 dfx
>> 6 dfy
>> api 7
## (Model,regression)
<< host = sklearn
<< function = MLPRegressor
<< func_method = predict
>> 7 api
>> 8 dfx
>> dfy_predict 9
>> dfy_predict 10
## (Store,file)
<< host = chemml
<< function = SaveFile
<< filename = dfy_predict
>> 9 df
## (Visualize,plot)
<< host = chemml
<< function = scatter2D
<< x = 0
<< y = 0
>> 10 dfx
>> 11 dfy
>> fig 12
## (Store,figure)
<< host = chemml
<< function = SavePlot
<< filename = dfy_actual_vs_dfy_predict
<< output_directory = .
>> 13 fig
## (Visualize,artist)
<< host = chemml
<< function = decorator
<< title = true vs. predicted HOMO energy
<< xlabel = predicted HOMO energy (eV)
<< ylabel = true HOMO energy (eV)
<< grid = True
<< grid_color = g
<< size = 18
>> 12 fig
>> fig 13
"""
return script.strip().split('\n') | d321d2016f0894d0a0538a09f6bc17f3f690317b | 5,684 |
def get_group_selector(*args):
"""
get_group_selector(grpsel) -> sel_t
Get common selector for a group of segments.
@param grpsel: selector of group segment (C++: sel_t)
@return: common selector of the group or 'grpsel' if no such group is
found
"""
return _ida_segment.get_group_selector(*args) | 6b750702186d70f2b21b64c13145d8da7bfd0b9c | 5,685 |
import textwrap
def wrap(text=cert_text) -> str:
"""Wraps the given text using '\n' to fit the desired width."""
wrapped_text = textwrap.fill(text, fit_char())
return wrapped_text | a6e42a7ca8fa78e7be89e31a920e8b3baa95245e | 5,686 |
def encode(data):
"""calls simplejson's encoding stuff with our needs"""
return simplejson.dumps(
data,
cls=CahootsEncoder,
ensure_ascii=False,
encoding='utf8',
indent=4
) | d3577e87b830b17222614d978d78eaa8329843bd | 5,687 |
def SE_HRNet_W48_C(pretrained=False, use_ssld=False, **kwargs):
"""
SE_HRNet_W48_C
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `SE_HRNet_W48_C` model depends on args.
"""
model = HRNet(width=48, has_se=True, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W48_C"], use_ssld)
return model | 0204ff852a18e6319c0454c5fd81d748626fcf78 | 5,688 |
from typing import Union
from typing import Any
from typing import List
def manifest(argument: Union[Any, List[Any]], data: bytearray) -> Union[Any, List[Any]]:
"""
Returns the manifestation of a `refinery.lib.argformats.LazyEvaluation`
on the given data. This function can change the data.
"""
if isinstance(argument, (list, tuple)):
return [manifest(x, data) for x in argument]
return argument(data) if isinstance(argument, LazyEvaluation) else argument | b8c5335494fda972c09a6d1937344783fb91ea80 | 5,689 |
def get_child_hwnd_by_class(hwnd: int, window_class: str) -> int:
"""Enumerates the child windows that belong to the specified parent window by passing the handle to
each child window.
:param hwnd: HWND in decimal
:param window_class: window class name
:return: window handle (HWND)
"""
def callback(hwnd, extra):
if extra['equals'] == win32gui.GetClassName(hwnd):
extra['res'] = hwnd
extra = {'res': None, 'equals': window_class}
win32gui.EnumChildWindows(hwnd, callback, extra)
return extra['res'] | 0d6b7cb56b483c88305611520e9787ed4321b6ac | 5,690 |
def uniq(lst):
"""
this is like list(set(lst)) except that it gets around
unhashability by stringifying everything. If str(a) ==
str(b) then this will get rid of one of them.
"""
seen = {}
result = []
for item in lst:
if str(item) not in seen:
result.append(item)
seen[str(item)]=True
return result | 706ec44f340fbfca36cb1a605391e9fc32d38ca0 | 5,691 |
def as_datetime(dct):
"""Decode datetime objects in data responses while decoding json."""
try:
type, val = dct['__jsonclass__']
if type == 'datetime':
# trac doesn't specify an offset for its timestamps, assume UTC
return dateparse(val).astimezone(utc)
except KeyError:
return dct | 858f3229ea8b14797a0ed1c4f45159881eb08fe4 | 5,692 |
def is_open(state: int) -> bool:
"""Return whether a given position (x, y) is open."""
return state == states_id.OPEN | b5f056929a5ffed8dea8167402a652b01e2b3202 | 5,693 |
import xmpp
def sendJabber(sender,
password,
receivers,
body,
senderDomain=NOTIFY_IM_DOMAIN_SENDER,
receiverDomain=NOTIFY_IM_DOMAIN_RECEIVER):
"""
Sends an instant message to the inputted receivers from the
given user. The senderDomain is an override to be used
when no domain is supplied, same for the receiverDomain.
:param sender <str>
:param password <str>
:param receivers <list> [ <str>, .. ]
:param body <str>
:param senderDomain <str>
:param receiverDomain <str>
:return <bool> success
"""
# make sure there is a proper domain as part of the sender
if '@' not in sender:
sender += '@' + senderDomain
# create a jabber user connection
user = xmpp.protocol.JID(sender)
# create a connection to an xmpp client
client = xmpp.Client(user.getDomain(), debug=[])
connection = client.connect(secure=0, use_srv=False)
if not connection:
text = 'Could not create a connection to xmpp (%s)' % sender
err = errors.NotifyError(text)
logger.error(err)
return False
# authenticate the session
auth = client.auth(user.getNode(), password, user.getResource())
if not auth:
text = 'Jabber not authenticated: (%s, %s)' % (sender, password)
err = errors.NotifyError(text)
logger.error(err)
return False
count = 0
# send the message to the inputted receivers
for receiver in receivers:
if '@' not in receiver:
receiver += '@' + receiverDomain
# create the message
msg = xmpp.protocol.Message(receiver, body)
# create the html message
html_http = {'xmlns': 'http://jabber.org/protocol/xhtml-im'}
html_node = xmpp.Node('html', html_http)
enc_msg = body.encode('utf-8')
xml = '<body xmlns="http://www.w3.org/1999/xhtml">%s</body>' % enc_msg
html_node.addChild(node=xmpp.simplexml.XML2Node(xml))
msg.addChild(node=html_node)
client.send(msg)
count += 1
return count > 0 | b38865b6414f3d4d88f65c49b3601000b9cee20a | 5,694 |
def directory_log_summary(config):
"""
Summarise the input and out diretories and key information as text log from matsim config
When submitting jobs via the Bitsim Orchestration
"""
message = []
# add the date
message.append(f"Date:{date.today()}")
# add paths of the input files
message.append("{:=^100s}".format("input files"))
message.append(f"network_path:{config['network']['inputNetworkFile']}")
message.append(f"plans_path:{config['plans']['inputPlansFile']}")
message.append(f"schedule_path:{config['transit']['transitScheduleFile']}")
message.append(f"vehicles_path:{config['transit']['vehiclesFile']}")
# add paths of the output diretory
message.append("{:=^100s}".format("output directory"))
message.append(f"output_directory:{config['controler']['outputDirectory']}")
# add mobsim setting summary
message.append("{:=^100s}".format("mobsim setting"))
message.append(f"mobsim:{config['controler']['mobsim']}")
message.append(f"Flow_Capacity_Factor:{config[config['controler']['mobsim']]['flowCapacityFactor']}")
message.append(f"Storage_Capacity_Factor:{config[config['controler']['mobsim']]['storageCapacityFactor']}")
return message | 47708958817b22f92f685ba380bf388b352a563d | 5,695 |
import json
def search():
"""
Searches for users with their name. Excludes the logged in user.
"""
data = json.loads(request.data)
search_term = data['search_term']
this_user = interface.get_user_by_id(get_jwt_identity())
users = interface.search_users(search_term)
result = [user.get_public_data() for user in users if not user.id == this_user.id]
return {"result": result}, 200 | 954288d19f29bbad7182f6b23e5c62b0e75df602 | 5,696 |
def get_optics_mode(optics_mode, energy=energy):
"""Return magnet strengths of a given opics mode."""
if optics_mode == 'M0':
# 2019-08-01 Murilo
# tunes fitted to [19.20433 7.31417] for new dipoles segmented model
qf_high_en = 1.65458216649285
qd_high_en = -0.11276026973021
qs_high_en = 0.0
sf_high_en = 11.30745884748409
sd_high_en = 10.52221952522381
qf_low_en = 1.65380213538720
qd_low_en = -0.00097311784326
qs_low_en = 0.0
sf_low_en = 11.32009586848142
sd_low_en = 10.37672159358045
else:
raise _pyacc_acc.AcceleratorException('Optics mode not recognized.')
coeff = (energy-0.15e9)/(3e9-0.15e9)
strengths = {
'qf' : qf_low_en + coeff*(qf_high_en - qf_low_en),
'qd' : qd_low_en + coeff*(qd_high_en - qd_low_en),
'qs' : qs_low_en + coeff*(qs_high_en - qs_low_en),
'sf' : sf_low_en + coeff*(sf_high_en - sf_low_en),
'sd' : sd_low_en + coeff*(sd_high_en - sd_low_en),
}
return strengths | 6a62bcaad3a6aa4a06d44072258aa09116c07107 | 5,697 |
import os
from datetime import datetime
def load_stock_order():
"""加载csv文件,导入并备份为 [.yyyy-mm-dd HH_MM_SS.bak 结尾的文件"""
base_dir = './auto_order_dir'
file_name_list = os.listdir(base_dir)
if file_name_list is None:
log.info('No file')
data_df = None
for file_name in file_name_list:
file_base_name, file_extension = os.path.splitext(file_name)
if file_extension != '.csv':
continue
file_path = os.path.join(base_dir, file_name)
data_df_tmp = pd.read_csv(file_path, index_col='CodeDigit', header=0, skipinitialspace=True)
if data_df is None:
data_df_tmp.index = ['%06d' % stock_code for stock_code in data_df_tmp.index]
data_df = data_df_tmp
else:
data_df = data_df.append(data_df_tmp)
backup_file_name = file_base_name + datetime.now().strftime('%Y-%m-%d %H_%M_%S') + file_extension + '.bak'
os.rename(file_path, os.path.join(base_dir, backup_file_name))
if data_df is not None:
has_error = False
# data_df.rename(columns={k1: k2 for k1, k2 in
# zip(data_df.columns, ['final_position', 'ref_price', 'wap_mode'])}, inplace=True)
# 重复数据检测
for name, index in data_df.groupby(level=0).groups.items():
if len(index) > 1:
has_error = True
log.error('%s 存在%d条重复数据', name, len(index))
col_name_set = set(data_df.columns)
for col_name in {'Lot', 'TargetPrice', 'Algo'}:
if col_name not in col_name_set:
has_error = True
log.error('stock_target_df should has %s column', col_name)
if has_error:
raise ValueError('csv 文件存在格式或内容问题')
data_df.rename(columns={
'Lot': 'final_position',
'TargetPrice': 'ref_price',
'Algo': 'wap_mode',
}, inplace=True)
return data_df | cb7472d2f403bf7f845c2b9af85f7181bf778563 | 5,698 |
from typing import Dict
def _get_last_block_in_previous_epoch(
constants: ConsensusConstants,
sub_height_to_hash: Dict[uint32, bytes32],
sub_blocks: Dict[bytes32, SubBlockRecord],
prev_sb: SubBlockRecord,
) -> SubBlockRecord:
"""
Retrieves the last block (not sub-block) in the previous epoch, which is infused before the last sub-block in
the epoch. This will be used for difficulty adjustment.
Args:
constants: consensus constants being used for this chain
sub_height_to_hash: sub-block height to header hash map for sub-blocks in peak path
sub_blocks: dict from header hash to sub-block of all relevant sub-blocks
prev_sb: last-sub-block in the current epoch.
prev epoch surpassed prev epoch started epoch sur. epoch started
v v v v
|.B...B....B. B....B...|......B....B.....B...B.|.B.B.B..|..B...B.B.B...|.B.B.B. B.|........
PREV EPOCH CURR EPOCH NEW EPOCH
The sub-blocks selected for the timestamps are the last sub-block which is also a block, and which is infused
before the final sub-block in the epoch. Block at height 0 is an exception.
# TODO: check edge cases here
"""
height_in_next_epoch = prev_sb.sub_block_height + constants.MAX_SUB_SLOT_SUB_BLOCKS + 3
height_epoch_surpass: uint32 = uint32(height_in_next_epoch - (height_in_next_epoch % constants.EPOCH_SUB_BLOCKS))
height_prev_epoch_surpass: uint32 = uint32(height_epoch_surpass - constants.EPOCH_SUB_BLOCKS)
if (height_in_next_epoch - height_epoch_surpass) > (3 * constants.MAX_SUB_SLOT_SUB_BLOCKS):
raise ValueError(
f"Height at {prev_sb.sub_block_height + 1} should not create a new epoch, it is far past the epoch barrier"
)
if height_prev_epoch_surpass == 0:
# The genesis block is an edge case, where we measure from the first block in epoch (height 0), as opposed to
# the last sub-block in the previous epoch, which would be height -1
return _get_blocks_at_height(sub_height_to_hash, sub_blocks, prev_sb, uint32(0))[0]
# If the prev slot is the first slot, the iterations start at 0
# We will compute the timestamps of the last block in epoch, as well as the total iterations at infusion
first_sb_in_epoch: SubBlockRecord
prev_slot_start_iters: uint128
prev_slot_time_start: uint64
fetched_blocks = _get_blocks_at_height(
sub_height_to_hash,
sub_blocks,
prev_sb,
uint32(height_prev_epoch_surpass - constants.MAX_SUB_SLOT_SUB_BLOCKS - 1),
uint32(2 * constants.MAX_SUB_SLOT_SUB_BLOCKS + 1),
)
# This is the last sb in the slot at which we surpass the height. The last block in epoch will be before this.
fetched_index: int = constants.MAX_SUB_SLOT_SUB_BLOCKS
last_sb_in_slot: SubBlockRecord = fetched_blocks[fetched_index]
fetched_index += 1
assert last_sb_in_slot.sub_block_height == height_prev_epoch_surpass - 1
curr_b: SubBlockRecord = fetched_blocks[fetched_index]
assert curr_b.sub_block_height == height_prev_epoch_surpass
# Wait until the slot finishes with a challenge chain infusion at start of slot
# Note that there are no overflow blocks at the start of new epochs
while curr_b.sub_epoch_summary_included is None:
last_sb_in_slot = curr_b
curr_b = fetched_blocks[fetched_index]
fetched_index += 1
# Backtrack to find the last block before the signage point
curr_b = sub_blocks[last_sb_in_slot.prev_hash]
while curr_b.total_iters > last_sb_in_slot.sp_total_iters(constants) or not curr_b.is_block:
curr_b = sub_blocks[curr_b.prev_hash]
return curr_b | 63742082b6cfb65c5683a0047531b039d3841219 | 5,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.