content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _is_domain_interval(val):
""" Check if a value is representing a valid domain interval
Args:
val: Value to check
Returns:
True if value is a tuple representing an interval
"""
if not isinstance(val, tuple):
return False
if not (is_int(val[0]) and is_int(val[1]) and (val[1] >= val[0])):
return False
vl = len(val)
if vl == 2:
return True
if vl == 3:
return val[2] == _HOLE_MARKER
return False | 3de16ddc26429be14ab84825f659dcf05f89f1f3 | 18,379 |
def rndcaps(n):
"""
Generates a string of random capital letters.
Arguments:
n: Length of the output string.
Returns:
A string of n random capital letters.
"""
return "".join([choice(_CAPS) for c in range(n)]) | 0661de89cc1abbbc678f7764f90674f3e5fb7282 | 18,380 |
def cutByWords(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of words,
with an option for an amount of overlap between chunks and a minim
um proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in words.
overlap: The number of words to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
splitText = splitKeepWhitespace(text)
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token in WHITESPACE:
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingWords(wordQueue=chunkSoFar, numWords=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
if (float(countWords(lastChunk)) / chunkSize) < lastProp: # If the proportion of the last chunk is too low
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList | 0767eeab983f0d21a9fa14527a3962405019e110 | 18,381 |
def dsu_sort2(list, index, reverse=False):
"""
This function sorts only based on the primary element, not on secondary elements in case of equality.
"""
for i, e in enumerate(list):
list[i] = e[index]
if reverse:
list.sort(reverse=True)
else:
list.sort()
for i, e in enumerate(list):
list[i] = e[1]
return list | 3fb614ac732eb790caf8f7d209c4e14022b8352a | 18,382 |
import functools
def roca_view(full, partial, **defaults):
"""
Render partal for XHR requests and full template otherwise
"""
templ = defaults.pop('template_func', template)
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if request.is_xhr:
tpl_name = partial
else:
tpl_name = full
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return templ(tpl_name, **tplvars)
elif result is None:
return templ(tpl_name, defaults)
return result
return wrapper
return decorator | eed45a5fc201667744cfe213ec61f5fba546d70b | 18,383 |
async def _shuffle(s, workers, dfs_nparts, dfs_parts, column):
"""
Parameters
----------
s: dict
Worker session state
workers: set
Set of ranks of all the participants
dfs_nparts: list of dict
List of dict that for each worker rank specifices the
number of partitions that worker has. If the worker doesn't
have any partitions, it is excluded from the dict.
E.g. `dfs_nparts[0][1]` is how many partitions of the "left"
dataframe worker 1 has.
dfs_parts: list of lists of Dataframes
List of inputs, which in this case are two dataframe lists.
column : label or list, or array-like
The bases of the rearrangement.
"""
assert s["rank"] in workers
df_parts = dfs_parts[0]
# Trimming such that all participanting workers get a rank within 0..len(workers)
trim_map = {}
for i in range(s["nworkers"]):
if i in workers:
trim_map[i] = len(trim_map)
rank = trim_map[s["rank"]]
eps = {trim_map[i]: s["eps"][trim_map[i]] for i in workers if i != s["rank"]}
df = df_concat(df_parts)
return await shuffle(len(workers), rank, eps, df, column) | 7b9d8e7dc6687ee5fb661bb0912c6288f3473af9 | 18,384 |
def play_game(board:GoBoard):
"""
Run a simulation game to the end fromt the current board
"""
while True:
# play a random move for the current player
color = board.current_player
move = GoBoardUtil.generate_random_move(board,color)
board.play_move(move, color)
# current player is passing
if move is None:
break
# get winner
winner = GoBoardUtil.opponent(color)
return winner | 3fe2d050e9835bdbabbc81b999edbce7fa0c96d1 | 18,385 |
def logical_array(ar):
"""Convert ndarray (int, float, bool) to array of 1 and 0's"""
out = ar.copy()
out[out!=0] = 1
return out | 74d96d519929ed7f5ddfd92b0fbcef4741a38359 | 18,386 |
from datetime import datetime
import requests
def otp_route(
in_gdf,
mode,
date_time = datetime.now(),
trip_name = '',
):
"""
Return a GeoDataFrame with detailed trip information for the best option.
Parameters
----------
in_gdf : GeoDataFrame
It should only contain two records, first record is origina and
the second record is destination. If more than two records only
the first two records are considered.
mode : string
Indicates transport modes. Modes that can be used
include 'public_transport', 'car_in_traffic', 'car_free_flow',
'walk', 'cycle'
trip_name : string
gives the trip a name which is stored in the trip_name in output
GeoDataFrame.
date_time : a datetime object
Sets the start time of a trip. Only important if the mode is
transit or a subset of transit.
Returns
-------
GeoDataFrame
Has the structure
-``trip_name`` the name given as an input to the trip.
-``leg_id`` A counter for each trip leg
-``mode`` returns the mode for each trip leg
-``from`` the shaply point data in WSG84 for the origin location
-``from_name`` the interim stop id on the network or 'Origin'
-``to`` the shaply point data in WSG84 for the destination location
-``to_name`` the interim stop id on the network or 'Destination'
-``route_id`` the route id for the trip leg if the mode is transit
-``trip_id`` the trip id for the trip leg if the mode is transit
-``distance`` Distance traveled in meters for the trip leg
-``duration`` Travel time for the trip leg in seconds
-``startTime`` time stamp for the start time of the trip leg
-``endTime`` time stamp for the end time of the trip leg
-``waitTime`` Wait time for the trip leg in seconds
-``geometry`` The goemetry of the trip leg in shaply object and WGS84
"""
# The mode parameter is not validated by the Maps API
# Check here to prevent silent failures.
if mode not in list(cs.otp_modes.keys()):
raise ValueError("{0} is an invalid travel mode.".format(mode))
if in_gdf.crs.name != 'WGS 84':
# Check the cooridnate is WGS84
raise ValueError("Invalid coordinate system.")
if mode == 'public_transport' and not date_time:
date_time = datetime.now()
#get from and to location from locations_gdf
orig = in_gdf['geometry'].iat[0]
dest = in_gdf['geometry'].iat[-1]
orig_text = "{0}, {1}".format(orig.y, orig.x)
dest_text = "{0}, {1}".format(dest.y, dest.x)
t = date_time.strftime("%H:%M%p")
d = date_time.strftime("%m-%d-%Y")
#send query to api
url = 'http://localhost:8080/otp/routers/default/plan'
query = {
"fromPlace":orig_text,
"toPlace":dest_text,
"time":t,
"date":d,
"mode":cs.otp_modes[mode],
}
r = requests.get(url, params=query)
#check for request error
r.raise_for_status()
#if error then return emptly GeoDataFrame
if not 'error' in r.json():
#convert request output ot a GeoDataFrame
df = pd.DataFrame(r.json()['plan']['itineraries'][0]['legs']).reset_index()
df = df.rename(columns={
'index': 'leg_id',
'mode': 'mode',
'routeId': 'route_id',
'tripId': 'trip_id',
'startTime': 'start_time',
'endTime': 'end_time',
'wait_time': 'waitTime',
})
df['geometry'] = df['legGeometry'].map(
lambda x: geom.LineString([(p['lng'], p['lat']) for p in decode_polyline(x['points'])])
)
df['from_name'] = df['from'].map(lambda x: x['stopId'] if 'stopId' in x else x['name'])
df['to_name'] = df['to'].map(lambda x: x['stopId'] if 'stopId' in x else x['name'])
df['from'] = df['from'].map(lambda x: geom.Point(x['lon'], x['lat']))
df['to'] = df['to'].map(lambda x: geom.Point(x['lon'], x['lat']))
df['start_time'] = df['start_time'].map(lambda x: datetime.fromtimestamp(x/1000))
df['end_time'] = df['end_time'].map(lambda x: datetime.fromtimestamp(x/1000))
#calculate wait time
df['wait_time'] = df['start_time'].shift(-1)
df['wait_time'] = df['wait_time']-df['end_time']
df['trip_name'] = trip_name
for column in cs.columns:
if column not in df.columns.values:
df[column] = ''
#reorder the fields
df = df[cs.columns].copy()
gdf = gpd.GeoDataFrame(df, crs = cs.WGS84)
else:
gdf = gpd.GeoDataFrame()
gdf = gdf[gdf['geometry'].notnull()].copy()
return gdf | d19ec8e46b1480697cf0c9c7a83f0a859651b344 | 18,387 |
def tall_clutter(files, config,
clutter_thresh_min=0.0002,
clutter_thresh_max=0.25, radius=1,
max_height=2000., write_radar=True,
out_file=None, use_dask=False):
"""
Wind Farm Clutter Calculation
Parameters
----------
files : list
List of radar files used for the clutter calculation.
config : str
String representing the configuration for the radar.
Such possible configurations are listed in default_config.py
Other Parameters
----------------
clutter_thresh_min : float
Threshold value for which, any clutter values above the
clutter_thres_min will be considered clutter, as long as they
are also below the clutter_thres_max.
clutter_thresh_max : float
Threshold value for which, any clutter values below the
clutter_thres_max will be considered clutter, as long as they
are also above the clutter_thres_min.
radius : int
Radius of the area surrounding the clutter gate that will
be also flagged as clutter.
max_height: float
Maximum height above the radar to mark a gate as clutter.
write_radar : bool
Whether to or not, to write the clutter radar as a netCDF file.
Default is True.
out_file : string
String of location and filename to write the radar object too,
if write_radar is True.
use_dask : bool
Use dask instead of running stats for calculation. The will reduce
run time.
Returns
-------
clutter_radar : Radar
Radar object with the clutter field that was calculated.
This radar only has the clutter field, but maintains all
other radar specifications.
"""
field_names = get_field_names(config)
refl_field = field_names["reflectivity"]
vel_field = field_names["velocity"]
ncp_field = field_names["normalized_coherent_power"]
def get_reflect_array(file, first_shape):
""" Retrieves a reflectivity array for a radar volume. """
try:
radar = pyart.io.read(file, include_fields=[refl_field,
ncp_field, vel_field])
reflect_array = deepcopy(radar.fields[refl_field]['data'])
ncp = radar.fields[ncp_field]['data']
height = radar.gate_z["data"]
up_in_the_air = height > max_height
the_mask = np.logical_or.reduce(
(ncp < 0.9, reflect_array.mask, up_in_the_air))
reflect_array = np.ma.masked_where(the_mask, reflect_array)
del radar
if reflect_array.shape == first_shape:
return reflect_array.filled(fill_value=np.nan)
except(TypeError, OSError):
print(file + ' is corrupt...skipping!')
return np.nan*np.zeros(first_shape)
if use_dask is False:
run_stats = _RunningStats()
first_shape = 0
for file in files:
try:
radar = pyart.io.read(file)
reflect_array = radar.fields[refl_field]['data']
ncp = deepcopy(radar.fields[ncp_field]['data'])
height = radar.gate_z["data"]
reflect_array = np.ma.masked_where(
np.logical_or(height > max_height, ncp < 0.8),
reflect_array)
if first_shape == 0:
first_shape = reflect_array.shape
clutter_radar = radar
run_stats.push(reflect_array)
if reflect_array.shape == first_shape:
run_stats.push(reflect_array)
del radar
except(TypeError, OSError):
print(file + ' is corrupt...skipping!')
continue
mean = run_stats.mean()
stdev = run_stats.standard_deviation()
clutter_values = stdev / mean
clutter_values = np.ma.masked_invalid(clutter_values)
clutter_values_no_mask = clutter_values.filled(
clutter_thresh_max + 1)
else:
cluster = LocalCluster(n_workers=20, processes=True)
client = Client(cluster)
first_shape = 0
i = 0
while first_shape == 0:
try:
radar = pyart.io.read(files[i])
reflect_array = radar.fields[refl_field]['data']
first_shape = reflect_array.shape
clutter_radar = radar
except(TypeError, OSError):
i = i + 1
print(file + ' is corrupt...skipping!')
continue
arrays = [delayed(get_reflect_array)(file, first_shape)
for file in files]
array = [da.from_delayed(a, shape=first_shape, dtype=float)
for a in arrays]
array = da.stack(array, axis=0)
print('## Calculating mean in parallel...')
mean = np.array(da.nanmean(array, axis=0))
print('## Calculating standard deviation...')
count = np.array(da.sum(da.isfinite(array), axis=0))
stdev = np.array(da.nanstd(array, axis=0))
clutter_values = stdev / mean
clutter_values = np.ma.masked_invalid(clutter_values)
clutter_values = np.ma.masked_where(np.logical_or(
clutter_values.mask, count < 20), clutter_values)
# Masked arrays can suck
clutter_values_no_mask = clutter_values.filled(
(clutter_thresh_max + 1))
shape = clutter_values.shape
mask = np.ma.getmask(clutter_values)
is_clutters = np.argwhere(
np.logical_and.reduce((clutter_values_no_mask > clutter_thresh_min,
clutter_values_no_mask < clutter_thresh_max,
)))
clutter_array = _clutter_marker(is_clutters, shape, mask, radius)
clutter_radar.fields.clear()
clutter_array = clutter_array.filled(0)
clutter_dict = _clutter_to_dict(clutter_array)
clutter_value_dict = _clutter_to_dict(clutter_values)
clutter_value_dict["long_name"] = "Clutter value (std. dev/mean Z)"
clutter_radar.add_field('ground_clutter', clutter_dict,
replace_existing=True)
clutter_radar.add_field('clutter_value', clutter_value_dict,
replace_existing=True)
if write_radar is True:
pyart.io.write_cfradial(out_file, clutter_radar)
del clutter_radar
return | 7eb26fbca4977e35f82f844f74d17269d7f80989 | 18,389 |
def serialize_bundle7(source_eid, destination_eid, payload,
report_to_eid=None, crc_type_primary=CRCType.CRC32,
creation_timestamp=None, sequence_number=None,
lifetime=300, flags=BlockProcFlag.NONE,
fragment_offset=None, total_adu_length=None,
hop_limit=30, hop_count=0, bundle_age=0,
previous_node_eid=None,
crc_type_canonical=CRCType.CRC16):
"""All-in-one function to encode a payload from a source EID
to a destination EID as BPbis bundle.
See create_bundle7 for a description of options."""
return bytes(create_bundle7(
source_eid, destination_eid, payload,
report_to_eid, crc_type_primary,
creation_timestamp, sequence_number,
lifetime, flags,
fragment_offset, total_adu_length,
hop_limit, hop_count, bundle_age,
previous_node_eid,
crc_type_canonical
)) | 712a2de8814e5a3bf7143b27aa2d0a6f360e7db4 | 18,390 |
def _get_chinese_week(localtime):
"""获取星期和提醒"""
chinese_week = ["一", "二", "三", "四", "五", "六", "日"]
tm_w_day = localtime.tm_wday
extra_msg = "<green>当前正是周末啦~</green>" if tm_w_day in [5, 6] else "Other"
if extra_msg == "Other":
go_week = 4 - tm_w_day
extra_msg = f"<yellow>还有 {go_week} 天周末</yellow>" if go_week != 0 else "<blue>明天就是周末啦~坚持摸鱼~</blue>"
return chinese_week[tm_w_day], extra_msg | 0a66bcf741c0d2e3cc9a238b5cb879c89333cc6b | 18,391 |
def resnext101_32x16d_swsl(cfg, progress=True, **kwargs):
"""Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised
image dataset and finetuned on ImageNet.
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 16
return _resnext(semi_weakly_supervised_model_urls['resnext101_32x16d'], Bottleneck,
[3, 4, 23, 3], cfg.pretrained, progress, **kwargs) | 8c99c076278adfacd7764db50824a196a29341e5 | 18,392 |
def leaderboard(players=None, N=DEFAULTN, filename="leaderboard.txt"):
""" Create a leaderboard, and optionally save it to a file """
logger.info("Generating a leaderboard for players: %r, N=%d", players, N)
ratings, allgames, players = get_ratings(players, N)
board, table = make_leaderboard(ratings, allgames, players)
print table
if filename:
logger.info("Saving leaderboard to file: %s", filename)
with open(filename,"w") as f:
f.write(table)
f.write('\n')
return board, table | e5ae7dcd1fd3c54e008b8e472d36b0af0de29463 | 18,393 |
def m_college_type(seq):
"""
获取学校的类型信息
当学校的类型是985,211工程院校时:
:param seq:【“985,211工程院校”,“本科”】
:return:“985工程院校”
当学校的类型是211工程院校时:
:param seq:【“211工程院校”,“硕士”】
:return:“211工程院校”
当学校的类型是普通本科或者专科时:
如果获取的某人的学历信息是博士、硕士和本科时
输出的学校类型为普通本科
:param seq:【“****”,“硕士”】
:return:“普通本科”
如果获取的某个人的学历信息时专科时:
输出的学校类型为专科
:param seq:【“****”,“专科”】
:return:“专科”
"""
if "985" in seq[0]:
tmp = "985,211工程院校"
return tmp
elif "211" in seq[0] and "985" not in seq[0]:
tmp = "211工程院校"
return tmp
else:
if seq[1] in ["博士", "硕士", "本科"]:
tmp = "本科"
return tmp
else:
tmp = "专科"
return tmp | bf72f60c51a67dd3e18a7dd1957bc2beb4f933fd | 18,394 |
from owslib.wcs import WebCoverageService
from lxml import etree
def get_raster_wcs(coordinates, geographic=True, layer=None):
"""Return a subset of a raster image from the local GeoServer via WCS 2.0.1 protocol.
For geoggraphic rasters, subsetting is based on WGS84 (Long, Lat) boundaries. If not geographic, subsetting based
on projected coordinate system (Easting, Northing) boundries.
Parameters
----------
coordinates : sequence
Geographic coordinates of the bounding box (left, down, right, up)
geographic : bool
If True, uses "Long" and "Lat" in WCS call. Otherwise uses "E" and "N".
layer : str
Layer name of raster exposed on GeoServer instance. E.g. 'public:CEC_NALCMS_LandUse_2010'
Returns
-------
bytes
A GeoTIFF array.
"""
(left, down, right, up) = coordinates
if geographic:
x, y = 'Long', 'Lat'
else:
x, y = 'E', 'N'
wcs = WebCoverageService('http://boreas.ouranos.ca/geoserver/ows', version='2.0.1')
try:
resp = wcs.getCoverage(identifier=[layer, ],
format='image/tiff',
subsets=[(x, left, right), (y, down, up)])
except Exception as e:
raise Exception(e)
data = resp.read()
try:
etree.fromstring(data)
# The response is an XML file describing the server error.
raise ChildProcessError(data)
except etree.XMLSyntaxError:
# The response is the DEM array.
return data | 5ae378077b3dbe480ef9fae37030d953e156936e | 18,395 |
def del_local_name(*args):
"""
del_local_name(ea) -> bool
"""
return _ida_name.del_local_name(*args) | 8db5674e8eb3917c21f189ebfa82525482ff712f | 18,396 |
def solve_google_pdp(data):
"""Entry point of the program."""
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
# Define cost of each arc.
def distance_callback(from_index, to_index):
"""Returns the manhattan distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['distance_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Add Distance constraint.
dimension_name = 'Distance'
routing.AddDimension(
transit_callback_index,
0, # no slack
3000, # vehicle maximum travel distance
True, # start cumul to zero
dimension_name)
distance_dimension = routing.GetDimensionOrDie(dimension_name)
# Define Transportation Requests.
for request in data['pickups_deliveries']:
pickup_index = manager.NodeToIndex(request[0])
delivery_index = manager.NodeToIndex(request[1])
routing.AddPickupAndDelivery(pickup_index, delivery_index)
routing.solver().Add(
routing.VehicleVar(pickup_index) == routing.VehicleVar(
delivery_index))
routing.solver().Add(
distance_dimension.CumulVar(pickup_index) <=
distance_dimension.CumulVar(delivery_index))
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION)
search_parameters.local_search_metaheuristic = (
routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
search_parameters.time_limit.seconds = 30
search_parameters.log_search = True # Turn on Log for Algorithms
assignment = routing.SolveWithParameters(search_parameters)
g_result = meta.Chromosome(_instance)
g_result.genes = []
if assignment:
total_distance = 0
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
plan_output = 'Route for vehicle {}:\n'.format(vehicle_id)
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} -> '.format(manager.IndexToNode(index))
previous_index = index
index = assignment.Value(routing.NextVar(index)) # Input Tasks
if manager.IndexToNode(index) != 0:
g_result.genes.append([manager.IndexToNode(index), vehicle_id + 1, False])
route_distance += routing.GetArcCostForVehicle(
previous_index, index, vehicle_id)
plan_output += '{}\n'.format(manager.IndexToNode(index))
plan_output += 'Distance of the route: {}m\n'.format(route_distance)
print(plan_output)
total_distance += route_distance
print('Total Distance of all routes: {}m'.format(total_distance))
meta.evaluate(g_result)
return g_result | 2b269dbca031c0cf98f51c2e6a493ba7df71a1d6 | 18,397 |
from typing import Any
def fetch_net(args: Any,
num_tasks: int,
num_cls: int,
dropout: float = 0.3):
"""
Create a nearal network to train
"""
if "mnist" in args.dataset:
inp_chan = 1
pool = 2
l_size = 80
elif args.dataset == "mini_imagenet":
inp_chan = 3
pool = 3
l_size = 320
elif "cifar" in args.dataset:
inp_chan = 3
pool = 2
l_size = 320
else:
raise NotImplementedError
if args.model == "wrn16_4":
net = WideResNetMultiTask(depth=16, num_task=num_tasks,
num_cls=num_cls, widen_factor=4,
drop_rate=dropout, inp_channels=inp_chan)
elif args.model == "conv":
net = SmallConv(num_task=num_tasks, num_cls=num_cls,
channels=inp_chan, avg_pool=pool,
lin_size=l_size)
else:
raise ValueError("Invalid network")
if args.gpu:
net.cuda()
return net | cbc3abb5140060ef52a69e44883a743249b5cd5e | 18,398 |
from typing import Dict
def are_models_specified(api_spec: Dict) -> bool:
"""
Checks if models have been specified in the API spec (cortex.yaml).
Args:
api_spec: API configuration.
"""
predictor_type = predictor_type_from_api_spec(api_spec)
if predictor_type == PythonPredictorType and api_spec["predictor"]["multi_model_reloading"]:
models = api_spec["predictor"]["multi_model_reloading"]
elif predictor_type != PythonPredictorType:
models = api_spec["predictor"]["models"]
else:
return False
return models is not None | 611ed794b45746e56bb8055a03251aa43d61d974 | 18,399 |
import json
def user_config(filename):
"""user-provided configuration file"""
try:
with open(filename) as file:
return json.loads(file.read(None))
except FileNotFoundError as fnf:
raise RuntimeError(f"File '{filename}' could not be found") from fnf
except json.JSONDecodeError as jsond:
raise RuntimeError(f"Error while parsing '{filename}'") from jsond | a6aa05d76b4aaa12c02ff97e4ab5ba4ba1245324 | 18,400 |
def _decomposed_dilated_conv2d(x, kernel_size, num_o, dilation_factor, name, top_scope, biased=False):
"""
Decomposed dilated conv2d without BN or relu.
"""
# padding so that the input dims are multiples of dilation_factor
H = tf.shape(x)[1]
W = tf.shape(x)[2]
pad_bottom = (dilation_factor - H % dilation_factor) if H % dilation_factor != 0 else 0
pad_right = (dilation_factor - W % dilation_factor) if W % dilation_factor != 0 else 0
pad = [[0, pad_bottom], [0, pad_right]]
# decomposition to smaller-sized feature maps
# [N,H,W,C] -> [N*d*d, H/d, W/d, C]
o = tf.space_to_batch(x, paddings=pad, block_size=dilation_factor)
# perform regular conv2d
num_x = x.shape[3].value
with tf.variable_scope(name) as scope:
w = tf.get_variable('weights', shape=[kernel_size, kernel_size, num_x, num_o])
s = [1, 1, 1, 1]
o = tf.nn.conv2d(o, w, s, padding='SAME')
if biased:
b = tf.get_variable('biases', shape=[num_o])
o = tf.nn.bias_add(o, b)
o = tf.batch_to_space(o, crops=pad, block_size=dilation_factor)
return o | 578d9308834294a80e778548faba9eb9fe0329c5 | 18,401 |
from datetime import datetime
from typing import Tuple
async def post_autodaily(text_channel: TextChannel, latest_message_id: int, change_mode: bool, current_daily_message: str, current_daily_embed: Embed, utc_now: datetime.datetime) -> Tuple[bool, bool, Message]:
"""
Returns (posted, can_post, latest_message)
"""
posted = False
if text_channel and current_daily_message:
error_msg_delete = f'could not delete message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]'
error_msg_edit = f'could not edit message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]'
error_msg_post = f'could not post a message in channel [{text_channel.id}] on guild [{text_channel.guild.id}]'
post_new = change_mode != server_settings.AutoDailyChangeMode.EDIT
can_post = True
latest_message: Message = None
use_embeds = await server_settings.get_use_embeds(None, bot=BOT, guild=text_channel.guild)
if use_embeds:
colour = utils.discord.get_bot_member_colour(BOT, text_channel.guild)
embed = current_daily_embed.copy()
embed.colour = colour
else:
embed = None
if can_post:
can_post, latest_message = await daily_fetch_latest_message(text_channel, latest_message_id)
if can_post:
if latest_message and latest_message.created_at.day == utc_now.day:
latest_message_id = latest_message.id
if change_mode == server_settings.AutoDailyChangeMode.DELETE_AND_POST_NEW:
try:
deleted = await utils.discord.try_delete_message(latest_message)
if deleted:
latest_message = None
utils.dbg_prnt(f'[post_autodaily] deleted message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
else:
print(f'[post_autodaily] could not delete message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
except errors.NotFound:
print(f'[post_autodaily] {error_msg_delete}: the message could not be found')
except errors.Forbidden:
print(f'[post_autodaily] {error_msg_delete}: the bot doesn\'t have the required permissions.')
can_post = False
except Exception as err:
print(f'[post_autodaily] {error_msg_delete}: {err}')
can_post = False
elif change_mode == server_settings.AutoDailyChangeMode.EDIT:
try:
if use_embeds:
await latest_message.edit(embed=embed)
else:
await latest_message.edit(content=current_daily_message)
posted = True
utils.dbg_prnt(f'[post_autodaily] edited message [{latest_message_id}] in channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
except errors.NotFound:
print(f'[post_autodaily] {error_msg_edit}: the message could not be found')
except errors.Forbidden:
print(f'[post_autodaily] {error_msg_edit}: the bot doesn\'t have the required permissions.')
can_post = False
except Exception as err:
print(f'[post_autodaily] {error_msg_edit}: {err}')
can_post = False
else:
post_new = True
if not posted and can_post and post_new:
try:
if use_embeds:
latest_message = await text_channel.send(embed=embed)
else:
latest_message = await text_channel.send(current_daily_message)
posted = True
utils.dbg_prnt(f'[post_autodaily] posted message [{latest_message.id}] in channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
except errors.Forbidden:
print(f'[post_autodaily] {error_msg_post}: the bot doesn\'t have the required permissions.')
can_post = False
except Exception as err:
print(f'[post_autodaily] {error_msg_post}: {err}')
can_post = False
else:
can_post = False
if latest_message:
return posted, can_post, latest_message
else:
return posted, can_post, None
else:
return posted, None, None | 309ad1c4554b2dcef6b2ddbcfd2d5652ea291488 | 18,402 |
def room_from_loc(env, loc):
"""
Get the room coordinates for a given location
"""
if loc == 'north':
return (1, 0)
if loc == 'south':
return (1, 2)
if loc == 'west':
return (0, 1)
if loc == 'east':
return (2, 1)
if loc == 'left':
return (1, 0)
if loc == 'right':
return (1, 2)
if loc == 'front':
return (2, 1)
if loc == 'behind':
return (0, 1)
# By default, use the central room
return (1, 1) | 75192c47fd8d4b56332b35ec7c3b355927e50ca2 | 18,403 |
def count_encoder(df, cols):
"""count encoding
Args:
df: カテゴリ変換する対象のデータフレーム
cols (list of str): カテゴリ変換する対象のカラムリスト
Returns:
pd.Dataframe: dfにカテゴリ変換したカラムを追加したデータフレーム
"""
out_df = pd.DataFrame()
for c in cols:
series = df[c]
vc = series.value_counts(dropna=False)
_df = pd.DataFrame(df[c].map(vc))
out_df = pd.concat([out_df, _df], axis=1)
out_df = out_df.add_suffix('_count_enc')
return pd.concat([df, out_df], axis=1) | c8e5b0995d2915871e7614099cf3260566f75f05 | 18,404 |
def wrap_response(response):
"""Wrap a tornado response as an open api response"""
mimetype = response.headers.get('Content-Type') or 'application/json'
return OpenAPIResponse(
data=response.body,
status_code=response.code,
mimetype=mimetype,
) | 38edef05e0d2d0ae80c235ed82f00080b86c6cb1 | 18,405 |
def shifted(x):
"""Shift x values to the range [-0.5, 0.5)"""
return -0.5 + (x + 0.5) % 1 | c40585748120af5d0acd85e4fed49f0575a92a3d | 18,406 |
def computeAlignmentError(pP1, pP2, etype = 2, doPlot = False):
"""
Compute area-based alignment error. Assume that the
warping paths are on the same grid
:param pP1: Mx2 warping path 1
:param pP2: Nx2 warping path 2
:param etype: Error type. 1 (default) is area ratio.
2 is L1 Hausdorff distance
:param doPlot: Whether to plot the results
"""
P1 = rasterizeWarpingPath(pP1)
P2 = rasterizeWarpingPath(pP2)
score = 0
if etype == 1:
M = np.max(P1[:, 0])
N = np.max(P1[:, 1])
A1 = np.zeros((M, N))
A2 = np.zeros((M, N))
for i in range(P1.shape[0]):
[ii, jj] = [P1[i, 0], P1[i, 1]]
[ii, jj] = [min(ii, M-1), min(jj, N-1)]
A1[ii, jj::] = 1.0
for i in range(P2.shape[0]):
[ii, jj] = [P2[i, 0], P2[i, 1]]
[ii, jj] = [min(ii, M-1), min(jj, N-1)]
A2[ii, jj::] = 1.0
A = np.abs(A1 - A2)
score = np.sum(A)/(float(M)*float(N))
if doPlot:
plt.imshow(A)
plt.hold(True)
plt.scatter(pP1[:, 1], pP1[:, 0], 5, 'c', edgecolor = 'none')
plt.scatter(pP2[:, 1], pP2[:, 0], 5, 'r', edgecolor = 'none')
plt.title("Score = %g"%score)
else:
C = getCSM(np.array(P1, dtype = np.float32), np.array(P2, dtype = np.float32))
score = (np.sum(np.min(C, 0)) + np.sum(np.min(C, 1)))/float(P1.shape[0]+P2.shape[0])
if doPlot:
plt.scatter(P1[:, 1], P1[:, 0], 20, 'c', edgecolor = 'none')
plt.scatter(P2[:, 1], P2[:, 0], 20, 'r', edgecolor = 'none')
idx = np.argmin(C, 1)
for i in range(len(idx)):
plt.plot([P1[i, 1], P2[idx[i], 1]], [P1[i, 0], P2[idx[i], 0]], 'k')
plt.title("Score = %g"%score)
return score | cce79f3f1fa83475bc18f18004d8e2c79a8e59fa | 18,407 |
def _cumulative_grad(grad_sum, grad):
"""Apply grad sum to cumulative gradient."""
add = ops.AssignAdd()
return add(grad_sum, grad) | cb2b3ab6131fb4e289df29d33876f69c265c6e62 | 18,408 |
def run_node(node):
"""Python multiprocessing works strangely in windows. The pool function needed to be
defined globally
Args:
node (Node): Node to be called
Returns:
rslts: Node's call output
"""
return node.run_with_loaded_inputs() | a0f52020db20b4b67e83599bc0fb6c86ec2f9514 | 18,409 |
def getitimer(space, which):
"""getitimer(which)
Returns current value of given itimer.
"""
with lltype.scoped_alloc(itimervalP.TO, 1) as old:
c_getitimer(which, old)
return itimer_retval(space, old[0]) | 86716d3faedab3436bc1fcb5f77b80129884cf2d | 18,410 |
def substitute_T_and_RH_for_interpolated_dataset(dataset):
"""
Input :
dataset : Dataset interpolated along height
Output :
dataset : Original dataset with new T and RH
Function to remove interoplated values of T and RH in the original dataset and
replace with new values of T and RH,
calculated from values of interpolated theta and q, respetively
"""
T = f3.calc_T_from_theta(dataset)
rh = f3.calc_rh_from_q(dataset, T=T)
dataset["ta"] = (dataset.p.dims, T)
dataset["rh"] = (dataset.p.dims, rh.values)
return dataset | fe2fca2ea3889fca17d2e676d1d2a95634ac1782 | 18,411 |
def get_base_required_fields():
""" Get required fields for base asset from UI.
Fields required for update only: 'id', 'uid', ['lastModifiedTimestamp', 'location', 'events', 'calibration']
Present in input, not required for output:
'coordinates', 'hasDeploymentEvent', 'augmented', 'deployment_numbers', 'deployment_number',
'Ref Des', 'depth',
2016-08-24: removed 'coordinates'
2016-08-26: removed 'augmented', 'Ref Des', 'remoteDocuments', 'hasDeploymentEvent',
2016-09-26: removed 'tense',
2016-10-11: removed 'tense',
"""
base_required_fields = [
'assetInfo',
'assetType',
'dataSource',
'deployment_numbers',
'deployment_number',
'depth',
'editPhase',
'latitude',
'longitude',
'manufactureInfo',
'mobile',
'notes',
'orbitRadius',
'partData',
'physicalInfo',
'purchaseAndDeliveryInfo',
'ref_des',
'remoteResources',
'uid'
]
return base_required_fields | 273c539d0c0b0da249e2bb171107aa775ce52ddf | 18,412 |
def reg_tab_ext(*model):
""" Performs weighted linear regression for various models building upon the model specified in section 4,
while additionally including education levels of a council candidate (university degree, doctoral/PhD degree)
A single model (i.e. function argument) takes on the form:
model=[df,polynomial, bw, dependant variable, bandwidth-type]
df: dataframe containing all relevant data
polynomial (str): "quadratic" includes quadratic values of "margin_1" and "inter_1" in regressionmodel;
default is "linear"
bw (float): specifying data to be included relative to cut-off point ("margin_1"=0)
dependant variable (str): name of dependant variable
bandwidth-type (str): method used to calculate bandwidth
:return: df containing results of regression
"""
# pd.set_option('mode.chained_assignment', None)
table = pd.DataFrame(
{'Model': [], 'Female Mayor': [], 'Std.err_Female Mayor': [], 'University': [], 'Std.err_University': [],
'PhD': [], 'Std.err_PhD': [], 'Bandwidth type': [], 'Bandwidth size': [], 'Polynomial': [],
'Observations': [], 'Elections': [], 'Municipalities': [],
'Mean': [], 'Std.err (Mean)': []})
table = table.set_index(['Model'])
for counter, i in enumerate(model):
data_i = subset_by_margin(i[0], i[2])
weight(data_i, i[2])
y = data_i[i[3]]
w = data_i["weight" + str(i[2]) + ""]
x = data_i[["female_mayor", "margin_1", "inter_1", 'university', 'phd']]
polynomial_i = str("Linear")
if i[1] == "quadratic":
x = data_i[["female_mayor", "margin_1", "inter_1", 'university', 'phd', "margin_2", "inter_2"]]
polynomial_i = str("Quadratic")
x = sm_api.add_constant(x)
wls_model = sm_api.WLS(y, x, missing='drop', weights=w)
results = wls_model.fit(cov_type='cluster', cov_kwds={'groups': data_i["gkz"]})
betas = [1, 2, 3]
cov = ["female_mayor", 'university', 'phd']
for j in cov:
betas[cov.index(j)] = significance_level(results.pvalues[j], results.params[(cov.index(j) + 1)].round(3))
bw_size_i = str(round(i[2], 2))
bw_type_i = str(i[4])
output = [betas[0], results.bse[1], betas[1], results.bse[4], betas[2], results.bse[5], bw_type_i, bw_size_i,
polynomial_i, results.nobs,
data_i["gkz_jahr"].value_counts().count(),
data_i["gkz"].value_counts().count(), y.mean().round(2), np.std(y)]
table.loc["(" + str(counter + 1) + ")"] = output
table = table.round(3)
return table | be61f85e918c2cf91b720b9495968c9c9f4f7b6e | 18,413 |
def load_pdf(filename: str) -> pd.DataFrame:
""" Read PDF dataset to pandas dataframe """
tables = tabula.read_pdf(basedir + '\\' + filename, pages="all")
merged_tables = pd.concat(tables[1:])
merged_tables.head()
return merged_tables | c3d7a1c5b78a62d6d6b822ecc2a90246e1c3a6aa | 18,414 |
def he_xavier(in_size: int, out_size: int, init_only=False):
"""
Xavier initialization according to Kaiming He in:
*Delving Deep into Rectifiers: Surpassing Human-Level
Performance on ImageNet Classification
(https://arxiv.org/abs/1502.01852)
"""
stddev = tf.cast(tf.sqrt(2 / in_size), tf.float32)
W = tf.random_normal([in_size, out_size], stddev=stddev)
b = tf.zeros([out_size])
if init_only:
return W, b
return tf.Variable(W, name="weights"), tf.Variable(b, name="biases") | f76501537a25226f1a3f3fcb0953438dbfaa996f | 18,415 |
def GET_v1_keyboards_build_log():
"""Returns a dictionary of keyboard/layout pairs. Each entry is a dictionary with the following keys:
* `works`: Boolean indicating whether the compile was successful
* `message`: The compile output for failed builds
"""
json_data = qmk_redis.get('qmk_api_configurator_status')
return jsonify(json_data) | b1e2c3f5da654987bdb7530be8f62effbf878613 | 18,416 |
def logprod(lst):
"""Computes the product of a list of numbers"""
return sum(log(i) for i in lst) | fd42df8ca7170f70453ef58d46035ec2ac6b6446 | 18,417 |
import torch
def nms(dets, iou_thr, device_id=None):
"""Dispatch to either CPU or GPU NMS implementations.
The input can be either a torch tensor or numpy array. GPU NMS will be used
if the input is a gpu tensor or device_id is specified, otherwise CPU NMS
will be used. The returned type will always be the same as inputs.
Arguments:
dets (torch.Tensor or np.ndarray): bboxes with scores.
iou_thr (float): IoU threshold for NMS.
device_id (int, optional): when `dets` is a numpy array, if `device_id`
is None, then cpu nms is used, otherwise gpu_nms will be used.
Returns:
tuple: kept bboxes and indice, which is always the same data type as
the input.
"""
# convert dets (tensor or numpy array) to tensor
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id)
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError(
'dets must be either a Tensor or numpy array, but got {}'.format(
type(dets)))
# execute cpu or cuda nms
if dets_th.shape[0] == 0:
inds = dets_th.new_zeros(0, dtype=torch.long)
else:
if dets_th.is_cuda:
if dets_th.shape[1] == 7:
inds = nms_cuda.nms_3d(dets_th, iou_thr)
elif dets_th.shape[1] == 5:
inds = nms_cuda.nms(dets_th, iou_thr)
else:
inds = nms_cpu.nms(dets_th, iou_thr)
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds | 6a00022a6903fc73429cb0f893de3b5f018315e9 | 18,418 |
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(
ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context,
ctx.app,
) | a712c5db36a0ed512411c592c695ddff8a51e8fb | 18,419 |
def XCL(code, error, mag=0.0167, propagation='random', NEV=True, **kwargs):
"""
Dummy function to manage the ISCWSA workbook not correctly defining the
weighting functions.
"""
tortuosity = kwargs['tortuosity']
if code == "XCLA":
return XCLA(
code, error, mag=mag, propagation=propagation, NEV=NEV,
tortuosity=tortuosity
)
else:
return XCLH(
code, error, mag=mag, propagation=propagation, NEV=NEV,
tortuosity=tortuosity
) | 90efd4d07c66923d2b98739fd7684ac1ee5141e8 | 18,421 |
def to_bgr(image):
"""Convert image to BGR format
Args:
image: Numpy array of uint8
Returns:
bgr: Numpy array of uint8
"""
# gray scale image
if image.ndim == 2:
bgr = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
return bgr
# BGRA format
if image.shape[2] == 4:
bgr = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)
return bgr
bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return bgr | 51d8455bb4060ff2db662a34846a4f957d33af81 | 18,422 |
import pathlib
import yaml
def load(path: pathlib.Path) -> dict:
"""Load a YAML file, returning its contents.
:raises: RuntimeError
"""
with path.open() as handle:
try:
return yaml.safe_load(handle)
except scanner.ScannerError as error:
LOGGER.critical('Failed to parse YAML from %s: %s',
path, error)
raise RuntimeError('YAML parse failure') | d91ba626619ec25e2dbdbe35202b26cd43d27dc3 | 18,423 |
import warnings
def validate_settings(raw_settings):
"""Return cleaned settings using schemas collected from INSTALLED_APPS."""
# Perform early validation on Django's INSTALLED_APPS.
installed_apps = raw_settings['INSTALLED_APPS']
schemas_mapping = raw_settings.get('CONFIT_SCHEMAS', {})
# Create schema instance using INSTALLED_APPS.
settings_schema = composite_schema(
installed_apps=installed_apps,
mapping=schemas_mapping)
# Actually validate settings.
cleaned_settings = settings_schema.deserialize(raw_settings)
# Highlight differences between raw and cleaned settings.
# Warn users when raw settings contain directives that are not used in
# schemas.
raw_keys = set(raw_settings.keys())
cleaned_keys = set(cleaned_settings.keys())
unused_keys = raw_keys.difference(cleaned_keys)
if unused_keys:
warnings.warn(
'The following settings are mentioned in your configuration, but '
'are not in cleaned settings. They may be missing in '
'configuration schemas, or you do not need to set them up: \n'
'- {settings}'.format(settings='\n- '.join(unused_keys)),
Warning)
# Return.
return cleaned_settings | ba764cc54eee27a8317f21d6bd69ee85b9deadbf | 18,425 |
from torch.cuda.amp import autocast
def is_autocast_module_decorated(module: nn.Module):
"""
Return `True` if a nn.Module.forward was decorated with
torch.cuda.amp.autocast
"""
try:
decorators = _get_decorators(module.forward)
for d in decorators:
if isinstance(d, autocast):
return True
except:
pass
return False | 9f4db5438cb7e14ae20fd552a54121d9ce6c0d46 | 18,426 |
def timestamp_format_is_valid(timestamp: str) -> bool:
"""
Determines if the supplied timestamp is valid for usage with Graylog.
:param timestamp: timestamp that is to be checked
:return: whether the timestamp is valid (True) or invalid (False)
"""
try:
get_datetime_from_timestamp(timestamp)
except ValueError:
return False
return True | 9c1623bede646c9ebbd5cd4200db193437728590 | 18,427 |
def indices(n, dtype):
"""Indices of each element in upper/lower triangle of test matrix."""
size = tri.tri_n(n - 1)
return np.arange(size, dtype=dtype) | 0ecdcad0d66e268125826e0e415b410a07143b6c | 18,428 |
def _sample(n, k):
""" Select k number out of n without replacement unless k is greater than n
"""
if k > n:
return np.random.choice(n, k, replace=True)
else:
return np.random.choice(n, k, replace=False) | cde012459ddb64dc7700ec0238e222fd4d26d3a2 | 18,429 |
def set_price_filter(request, category_slug):
"""Saves the given price filter to session. Redirects to the category with
given slug.
"""
req = request.POST if request.method == 'POST' else request.GET
try:
min_val = lfs.core.utils.atof(req.get("min", "0"))
except (ValueError):
min_val = 0
try:
max_val = lfs.core.utils.atof(req.get("max", "99999"))
except:
max_val = 0
try:
float(min_val)
except (TypeError, ValueError):
min_val = "0"
try:
float(max_val)
except (TypeError, ValueError):
max_val = "0"
request.session["price-filter"] = {"min": min_val, "max": max_val}
url = reverse("lfs_category", kwargs={"slug": category_slug})
return HttpResponseRedirect(url) | 1adf1798e7fbf290d98e1b47b94cd9c9038732fe | 18,430 |
def _get_config():
"""Returns a dictionary with server parameters, or ask them to the user"""
# tries to figure if we can authenticate using a configuration file
data = read_config()
# this does some sort of validation for the "webdav" data...
if "webdav" in data:
if (
"server" not in data["webdav"]
or "username" not in data["webdav"]
or "password" not in data["webdav"]
):
raise KeyError(
'If the configuration file contains a "webdav" '
"section, it should contain 3 variables defined inside: "
'"server", "username", "password".'
)
else:
# ask the user for the information, in case nothing available
logger.warn(
"Requesting server information for webDAV operation. "
"(To create a configuration file, and avoid these, follow "
"the Setup subsection at our Installation manual.)"
)
webdav_data = dict()
webdav_data["server"] = input("The base address of the server: ")
webdav_data["username"] = input("Username: ")
webdav_data["password"] = input("Password: ")
data["webdav"] = webdav_data
return data["webdav"] | c2105753cb4ae551bea53c0a2aaf0432dd275422 | 18,431 |
import requests
def lastfmcompare(text, nick, bot,):
"""[user] ([user] optional) - displays the now playing (or last played) track of LastFM user [user]"""
api_key = bot.config.get("api_keys", {}).get("lastfm")
if not api_key:
return "No last.fm API key set."
if not text:
return "please specify a lastfm username to compare"
try:
user1, user2 = text.split()
except:
user2 = text
user1 = nick
user2_check = get_account(user2)
if user2_check:
user2 = user2_check
user1_check = get_account(user1)
if user1_check:
user1 = user1_check
params = {
'method': 'tasteometer.compare',
'api_key': api_key,
'type1': 'user',
'value1': user1,
'type2': 'user',
'value2': user2
}
request = requests.get(api_url, params=params)
if request.status_code != requests.codes.ok:
return "Failed to fetch info ({})".format(request.status_code)
data = request.json()
if 'error' in data:
return "Error: {}.".format(data["message"])
score = float(data["comparison"]["result"]["score"])
score = float("{:.3f}".format(score * 100))
if score == 0:
return "{} and {} have no common listening history.".format(user2, user1)
level = "Super" if score > 95 else "Very High" if score > 80 else "High" if score > 60 else \
"Medium" if score > 40 else "Low" if score > 10 else "Very Low"
# I'm not even going to try to rewrite this line
artists = [f["name"] for f in data["comparison"]["result"]["artists"]["artist"]] if \
type(data["comparison"]["result"]["artists"]["artist"]) == list else \
[data["comparison"]["result"]["artists"]["artist"]["name"]] if "artist" \
in data["comparison"]["result"]["artists"] else ""
artist_string = "\x02In Common:\x02 " + \
", ".join(artists) if artists else ""
return "Musical compatibility between \x02{}\x02 and \x02{}\x02: {} (\x02{}%\x02) {}".format(user1, user2, level,
score, artist_string) | 42b23961f210b4004aac987ca1146ee748392949 | 18,432 |
def fourier_ellipsoid(inp, size, n=-1, axis=-1, output=None):
"""
Multidimensional ellipsoid Fourier filter.
The array is multiplied with the fourier transform of a ellipsoid of
given sizes.
Parameters
----------
inp : array_like
The inp array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the inp is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the inp is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : Tensor, optional
If given, the result of filtering the inp is placed in this array.
None is returned in this case.
Returns
-------
fourier_ellipsoid : Tensor
The filtered inp.
Notes
-----
This function is implemented for arrays of rank 1, 2, or 3.
"""
inp = np.asarray(inp)
output = _get_output_fourier(output, inp)
axis = normalize_axis_index(axis, inp.ndim)
sizes = cndsupport._normalize_sequence(size, inp.ndim)
sizes = np.asarray(sizes, dtype=np.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
cndi.fourier_filter(inp, sizes, n, axis, output, 2)
return output | 41128d7972bdb0cb6100991c1dc22031b7f3e6b3 | 18,433 |
def gaussian1D(x: np.ndarray, amplitude: Number, center: Number, stdev: Number) -> np.ndarray:
"""A one dimensional gaussian distribution.
= amplitude * exp(-0.5 (x - center)**2 / stdev**2)
"""
return amplitude * np.exp(-0.5 * (x - center)**2 / stdev**2) | 5c5c36ea71a08aec3246a2ca1dedf1d62c3fd331 | 18,434 |
def BuildImportLibs(flags, inputs_by_part, deffiles):
"""Runs the linker to generate an import library."""
import_libs = []
Log('building import libs')
for i, (inputs, deffile) in enumerate(zip(inputs_by_part, deffiles)):
libfile = 'part%d.lib' % i
flags_with_implib_and_deffile = flags + ['/IMPLIB:%s' % libfile,
'/DEF:%s' % deffile]
RunLinker(flags_with_implib_and_deffile, i, inputs, 'implib', None)
import_libs.append(libfile)
return import_libs | f29bcf16917cf8509662cf44c7881f8c7282b37d | 18,435 |
def gather_sparse(a, indices, axis=0, mask=None):
"""
SparseTensor equivalent to tf.gather, assuming indices are sorted.
:param a: SparseTensor of rank k and nnz non-zeros.
:param indices: rank-1 int Tensor, rows or columns to keep.
:param axis: int axis to apply gather to.
:param mask: boolean mask corresponding to indices. Computed if not provided.
:return gathered_a: SparseTensor masked along the given axis.
:return values_mask: bool Tensor indicating surviving values, shape [nnz].
"""
in_size = _square_size(a.dense_shape)
out_size = tf.size(indices)
if mask is None:
mask = ops.indices_to_mask(indices, in_size)
inverse_map = _indices_to_inverse_map(indices, in_size)
return _boolean_mask_sparse(
a, mask, axis=axis, inverse_map=inverse_map, out_size=out_size
) | 87e68a99c660448a11d32fa090ca3921552cd122 | 18,436 |
def Window(node, size=-1, full_only=False):
"""Lazy wrapper to collect a window of values. If a node is executed 3 times,
returning 1, 2, 3, then the window node will collect those values in a list.
Arguments:
node (node): input node
size (int): size of windows to use
full_only (bool): only return if list is full
"""
def foo(node=node, size=size, full_only=full_only):
if size == 0:
return node.value()
if ret._accum is None:
ret._accum = [node.value()]
elif ret.dependencyIsDirty(node):
ret._accum.append(node.value())
if size > 0:
ret._accum = ret._accum[-size:]
if full_only and len(ret._accum) == size:
return ret._accum
elif full_only:
return None
return ret._accum
# make new node
ret = node._gennode("Window[{}]".format(size if size > 0 else "∞"), foo, [node])
ret._accum = None
return ret | 1f85b576455f3b379e41a7247ff486281bf21f8f | 18,437 |
def fixed_rate_loan(amount, nrate, life, start, freq='A', grace=0,
dispoints=0, orgpoints=0, prepmt=None, balloonpmt=None):
"""Fixed rate loan.
Args:
amount (float): Loan amount.
nrate (float): nominal interest rate per year.
life (float): life of the loan.
start (int, tuple): init period for the loan.
pyr (int): number of compounding periods per year.
grace (int): number of periods of grace (without payment of the principal)
dispoints (float): Discount points of the loan.
orgpoints (float): Origination points of the loan.
prepmt (pandas.Series): generic cashflow representing prepayments.
balloonpmt (pandas.Series): generic cashflow representing balloon payments.
Returns:
A object of the class ``Loan``.
>>> pmt = cashflow(const_value=0, start='2016Q1', periods=11, freq='Q')
>>> pmt['2017Q4'] = 200
>>> fixed_rate_loan(amount=1000, nrate=10, life=10, start='2016Q1', freq='Q',
... grace=0, dispoints=0,
... orgpoints=0, prepmt=pmt, balloonpmt=None) # doctest: +NORMALIZE_WHITESPACE
Amount: 1000.00
Total interest: 129.68
Total payment: 1129.68
Discount points: 0.00
Origination points: 0.00
<BLANKLINE>
Beg_Ppal_Amount Nom_Rate Tot_Payment Int_Payment Ppal_Payment \\
2016Q1 1000.000000 10.0 0.000000 0.000000 0.000000
2016Q2 1000.000000 10.0 114.258763 25.000000 89.258763
2016Q3 910.741237 10.0 114.258763 22.768531 91.490232
2016Q4 819.251005 10.0 114.258763 20.481275 93.777488
2017Q1 725.473517 10.0 114.258763 18.136838 96.121925
2017Q2 629.351591 10.0 114.258763 15.733790 98.524973
2017Q3 530.826618 10.0 114.258763 13.270665 100.988098
2017Q4 429.838520 10.0 314.258763 10.745963 303.512800
2018Q1 126.325720 10.0 114.258763 3.158143 111.100620
2018Q2 15.225100 10.0 15.605727 0.380627 15.225100
2018Q3 0.000000 10.0 0.000000 0.000000 0.000000
<BLANKLINE>
End_Ppal_Amount
2016Q1 1000.000000
2016Q2 910.741237
2016Q3 819.251005
2016Q4 725.473517
2017Q1 629.351591
2017Q2 530.826618
2017Q3 429.838520
2017Q4 126.325720
2018Q1 15.225100
2018Q2 0.000000
2018Q3 0.000000
"""
if not isinstance(float(nrate), float):
TypeError('nrate must be a float.')
nrate = interest_rate(const_value=nrate, start=start, periods=life+grace+1, freq=freq)
if prepmt is None:
prepmt = cashflow(const_value=0, start=start, periods=len(nrate), freq=freq)
else:
verify_period_range([nrate, prepmt])
if balloonpmt is None:
balloonpmt = nrate.copy()
balloonpmt[:] = 0
else:
verify_period_range([nrate, balloonpmt])
# present value of the balloon payments
if balloonpmt is not None:
balloonpv = timevalue(cflo=balloonpmt, prate=nrate, base_date=grace)
else:
balloonpv = 0
pyr = getpyr(nrate)
pmt = pvpmt(pmt=None, pval=-amount+balloonpv, nrate=nrate[0], nper=len(nrate)-1, pyr=pyr)
pmts = nrate.copy()
pmts[:] = 0
for time in range(1, life + 1):
pmts[grace + time] = pmt
# balance
begppalbal = nrate.copy()
intpmt = nrate.copy()
ppalpmt = nrate.copy()
totpmt = nrate.copy()
endppalbal = nrate.copy()
begppalbal[:] = 0
intpmt[:] = 0
ppalpmt[:] = 0
totpmt[:] = 0
endppalbal[:] = 0
# payments per period
for time, _ in enumerate(totpmt):
totpmt[time] = pmts[time] + balloonpmt[time] + prepmt[time]
# balance calculation
for time in range(grace + life + 1):
if time == 0:
begppalbal[0] = amount
endppalbal[0] = amount
totpmt[time] = amount * (dispoints + orgpoints) / 100
### intpmt[time] = amount * dispoints / 100
else:
begppalbal[time] = endppalbal[time - 1]
if time <= grace:
intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100
totpmt[time] = intpmt[time]
endppalbal[time] = begppalbal[time]
else:
intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100
ppalpmt[time] = totpmt[time] - intpmt[time]
if ppalpmt[time] < 0:
capint = - ppalpmt[time]
ppalpmt[time] = 0
else:
capint = 0
endppalbal[time] = begppalbal[time] - ppalpmt[time] + capint
if endppalbal[time] < 0:
totpmt[time] = begppalbal[time] + intpmt[time]
ppalpmt[time] = begppalbal[time]
endppalbal[time] = begppalbal[time] - ppalpmt[time]
pmts[time] = 0
prepmt[time] = 0
data = {'Beg_Ppal_Amount':begppalbal}
result = Loan(life=life, amount=amount, grace=grace, nrate=nrate,
dispoints=dispoints, orgpoints=orgpoints,
data=data)
result['Nom_Rate'] = nrate
result['Tot_Payment'] = totpmt
result['Int_Payment'] = intpmt
result['Ppal_Payment'] = ppalpmt
result['End_Ppal_Amount'] = endppalbal
return result | 10681a99ec381ec64517891d8d1101ed5eae78f4 | 18,438 |
import json
def get_results():
"""
Returns the scraped results for a set of inputs.
Inputs:
The URL, the type of content to scrap and class/id name.
This comes from the get_results() function in script.js
Output:
Returns a JSON list of the results
"""
# Decode the json data and turn it into a python dict
post_data = json.loads(request.data.decode())
# Extract the inputs from the JSON data
req_url = post_data.get('url')
req_type = post_data.get('type')
req_selector = post_data.get('selector')
results = []
# Each of the types of extraction is handled here
if req_type == 'head':
results = Webpage(req_url).get_head_tag()
elif req_type == 'content':
results = Webpage(req_url).get_all_contents()
elif req_type == 'class':
results = Webpage(req_url).get_content_by_class(req_selector)
elif req_type == 'id':
results = Webpage(req_url).get_content_by_id(req_selector)
elif req_type == 'images':
results = Webpage(req_url).get_all_images()
# The scraped results are turned into JSON format
# and sent to the frontend
serialized = json.dumps(results)
return serialized | 59af271fc024854258c488f17489383f424dfae3 | 18,439 |
def _mocked_presets(*args, **kwargs):
"""Return a list of mocked presets."""
return [MockPreset("1")] | ebf48fb23dff67b2d1a9faac6e72764f2a5f8f0a | 18,440 |
def play(context, songpos=None):
"""
*musicpd.org, playback section:*
``play [SONGPOS]``
Begins playing the playlist at song number ``SONGPOS``.
The original MPD server resumes from the paused state on ``play``
without arguments.
*Clarifications:*
- ``play "-1"`` when playing is ignored.
- ``play "-1"`` when paused resumes playback.
- ``play "-1"`` when stopped with a current track starts playback at the
current track.
- ``play "-1"`` when stopped without a current track, e.g. after playlist
replacement, starts playback at the first track.
*BitMPC:*
- issues ``play 6`` without quotes around the argument.
"""
if songpos is None:
return context.core.playback.play().get()
elif songpos == -1:
return _play_minus_one(context)
try:
tl_track = context.core.tracklist.slice(songpos, songpos + 1).get()[0]
return context.core.playback.play(tl_track).get()
except IndexError:
raise exceptions.MpdArgError('Bad song index') | fc2cee0b3cca2df33b844004ecaaaa1b0eaa5347 | 18,441 |
from typing import Union
def SMLB(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""
Taken from Minerbo, G. N. and Levy, M. E., "Inversion of Abel’s integral equation by means of orthogonal polynomials.",
SIAM J. Numer. Anal. 6, 598-616 and swapped to satisfy SMLB(0) = 0.
"""
return (np.where((x > 0.00000000001), 1.241 * np.multiply(np.power(2 * x - x ** 2, -1.5),
np.exp(1.21 * (1 - np.power(2 * x - x ** 2, -1)))),
0)) / 0.9998251040790366 | ed9b59ccbf99458796d11521825ba6ab0215144d | 18,442 |
def add_colon(in_str):
"""Add colon after every 4th character."""
return ':'.join([in_str[i:i+4] for i in range(0, len(in_str), 4)]) | fa4258aa9d684a087d2a81ae09a2702d6e58e3e1 | 18,443 |
def fetch_partial_annotations():
""" Returns the partial annotations as an array
Returns:
partial_annotations: array of annotation data - [n_annotations, 5]
row format is [T, L, X, Y, Z]
"""
raw_mat = loadmat(PARTIAL_ANNOTATIONS_PATH)
annotations = raw_mat['divisionAnnotations']
# chop extra mystery column
return annotations[:, :-1] | 69d57df06576af141dcc0eb9b00c7834e1a4a2c2 | 18,444 |
def get_alt_pos_info(rec):
"""Returns info about the second-most-common nucleotide at a position.
This nucleotide will usually differ from the reference nucleotide, but it
may be the reference (i.e. at positions where the reference disagrees with
the alignment's "consensus").
This breaks ties arbitrarily.
Parameters
==========
rec: dict
pysamstats record for a given position in an alignment produced
by stat_variation().
Returns
=======
(cov, alt nt freq, alt nt): tuple of (int, int, str)
Describes the second-most-common nucleotide at a position.
The first entry in this tuple is the (mis)match coverage at this
position. This is an integer defined as the sum of A, C, G, T
nucleotides at this position (note that this excludes degenerate
nucleotides like N -- we could change this in the future if that'd be
useful, I suppose). Note that this coverage could be zero, if no reads
are aligned to this specific position.
The second entry is the raw frequency of this nucleotide
at this position: this will be an integer greater than or equal to 0.
This is also referred to in the paper, etc. as alt(pos).
The third entry is just the alternate nucleotide (one of A, C, G, T),
represented as a string. This is returned for reference -- as of
writing this isn't actually needed for Pleuk itself, but I have other
code outside of Pleuk that benefits from this!
"""
cov = rec["A"] + rec["C"] + rec["G"] + rec["T"]
ordered_nts = sorted("ACGT", key=rec.get)
# The literal nucleotide used in the numerator of freq(pos): one of A, C,
# G, T
alt_nt = ordered_nts[-2]
# The raw frequency (in counts) of alt_nt. An integer >= 0.
alt_nt_freq = rec[alt_nt]
return (cov, alt_nt_freq, alt_nt) | 3abe3fcbbf0ddbccb44025f2e476f77dc3e8abf9 | 18,445 |
import torch
def accuracy(output, target, topk=(1,)):
""" Computes the accuracy over the k top predictions for the specified
values of k.
"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res | d2edbbff872670f1637696e63fe448a749138985 | 18,446 |
def grim(n, mu, prec=2, n_items=1):
"""
Test that a mean mu reported with a decimal precision prec is possible, given a number of observations n and a
number of items n_items.
:param n: The number of observations
:param mu: The mean
:param prec: The precision (i.e., number of decimal places) of the mean
:param n_items: The number of scale items that were averaged. Default is 1.
:return: True if the mean is possible, False otherwise.
"""
if n*n_items >= 10**prec:
warn("The effective number of data points is such that GRIM will always find a solution.")
cval = np.round(mu * n * n_items, 0)
valid = np.round(cval/n/n_items, prec) == np.round(mu, prec)
return valid | 093fdea1b59157b477642b31afa8388192188020 | 18,447 |
def split_dataframe(df:pd.DataFrame,split_index:np.ndarray):
"""
Split out the continuous variables from a dataframe \n
Params:
df : Pandas dataframe
split_index : Indices of continuous variables
"""
return df.loc[:,split_index].values | 842f9b04d0d546b8bef28f0b110e7d570eb8f0a0 | 18,448 |
def user_select_columns():
"""
Useful columns from the users table, omitting authentication-related
columns like password.
"""
u = orm.User.__table__
return [
u.c.id,
u.c.user_name,
u.c.email,
u.c.first_name,
u.c.last_name,
u.c.org,
u.c.created_at,
u.c.updated_at,
u.c.sign_in_count,
u.c.last_sign_in_at
] | faf7ffd18a2fc6c55c1f8a4c19d176a34f79e19f | 18,449 |
def remove_query_param(url, key):
"""
Given a URL and a key/val pair, remove an item in the query
parameters of the URL, and return the new URL.
"""
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
query_dict = urlparse.parse_qs(query)
query_dict.pop(key, None)
query = urlparse.urlencode(sorted(list(query_dict.items())), doseq=True)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment)) | 4a7ac5b2b1767a6fbc082e7e5b4f2d10dbd87926 | 18,450 |
def test_ap_hs20_sim(dev, apdev):
"""Hotspot 2.0 with simulated SIM and EAP-SIM"""
if not hlr_auc_gw_available():
return "skip"
hs20_simulated_sim(dev[0], apdev[0], "SIM")
dev[0].request("INTERWORKING_SELECT auto freq=2412")
ev = dev[0].wait_event(["INTERWORKING-ALREADY-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Timeout on already-connected event") | cbda3f0ebc33c3d0e8ee6a99a088a61c57655480 | 18,451 |
def FreshReal(prefix='b', ctx=None):
"""Return a fresh real constant in the given context using the given prefix.
>>> x = FreshReal()
>>> y = FreshReal()
>>> eq(x, y)
False
>>> x.sort()
Real
"""
ctx = _get_ctx(ctx)
return ArithRef(Z3_mk_fresh_const(ctx.ref(), prefix, RealSort(ctx).ast), ctx) | afc312fbd85387adcfe72c81c5af5b06fe0ccee1 | 18,452 |
import threading
def handle_readable(client):
"""
Return True: The client is re-registered to the selector object.
Return False: The server disconnects the client.
"""
data = client.recv(1028)
if data == b'':
return False
client.sendall(b'SERVER: ' + data)
print(threading.active_count())
return True | 9a77bb893a5da4e76df5593feb6ecf49022e6ef3 | 18,454 |
from datetime import datetime
import logging
def fund_wallet():
"""
---
post:
summary: fund a particular wallet
description: sends funds to a particular user given the user id the amount will be
removed from the wallet with the respective currency, if not it falls to the default wallet.
if the sender is admin no money will be deducted from any wallet else an amount will be deducted
from the sender wallet with the respective currency. so that means that admin can geenrate cash while
other users can perform transactions between wallets.
requestBody:
required: true
content:
application/json:
schema: Fund
responses:
'200':
description: success
content:
application/json:
schema: TransactionResponse
tags:
- user
- admin
"""
try:
required = ["currency", "amount", "receiver"]
data = request.get_json()
if not all([rq in data.keys() for rq in required]):
return jsonify(status=error, message="Missing Required JSON Field!")
amount = data["amount"]
currency = data["currency"]
receiver_id = data["receiver"]
if not CurrencyUtils.iscurrency_valid(currency):
return jsonify(status=error, message="Please Enter a valid Currency code"), 400
if g.user.role.name != "Admin":
sender_wallet = g.user.wallet.filter_by(currency=currency).first()
if sender_wallet is None:
sender_wallet = g.user.wallet.filter_by(
currency=g.user.main_currency)
if CurrencyUtils.convert_currency(sender_wallet.currency.upper(), currency.upper(), sender_wallet.balance) < amount:
return jsonify(status=error, message="Insufficient fund!"), 403
amount = CurrencyUtils.convert_currency(
sender_wallet.currency.upper(), currency.upper(), amount)
else:
if sender_wallet.balance < amount:
return jsonify(status=error, message="Insufficient fund!"), 403
receiver = User.query.filter_by(id=receiver_id).first()
if not receiver:
return jsonify(status=error, message=f"Sorry User with id {receiver_id} does not exsits!"), 400
if receiver.role.name == "Admin":
return jsonify(status=unauthorized, message="Sorry Admin account can't be funded!"), 403
receiver_wallet = receiver.wallet.filter_by(currency=currency).first()
if receiver_wallet is None:
if receiver.role.name == "Elite":
new_wallet = Wallet(currency=currency, user_id=receiver.id)
db.session.add(new_wallet)
db.session.commit()
receiver_wallet = new_wallet
elif receiver.role.name == "Noob":
receiver_wallet = receiver.wallet.filter_by(
currency=receiver.main_currency.lower()).first()
if g.user.role.name == "Admin":
tx = Transaction(receiver=receiver_wallet.id, sender=None,
amount=amount, currency=currency, at=datetime.datetime.utcnow())
else:
tx = Transaction(receiver=receiver_wallet.id, sender=sender_wallet.id,
amount=amount, currency=currency, at=datetime.datetime.utcnow())
if receiver.role.name == "Noob":
tx.isapproved = False
db.session.add(tx)
db.session.commit()
return jsonify(status=ok, data=tx.serialize), 200
except SyntaxError as e:
logging.error(e)
return jsonify(status=error, message=str(e)), 400 | 55955335f4462ad118fb792f3335db8090f7439e | 18,455 |
import numpy
def create_objective(dist, abscissas):
"""Create objective function."""
abscissas_ = numpy.array(abscissas[1:-1])
def obj(absisa):
"""Local objective function."""
out = -numpy.sqrt(dist.pdf(absisa))
out *= numpy.prod(numpy.abs(abscissas_ - absisa))
return out
return obj | c63eeadffd067c2a94470ddbf03fb009265fbbbc | 18,456 |
def get_party_to_seats(year, group_id, party_to_votes):
"""Give votes by party, compute seats for party."""
eligible_party_list = get_eligible_party_list(
group_id,
party_to_votes,
)
if not eligible_party_list:
return {}
n_seats = YEAR_TO_REGION_TO_SEATS[year][group_id]
n_seats_bonus = 0 if (group_id == COUNTRY_ID) else 1
n_seats_non_bonus = n_seats - n_seats_bonus
winning_party = sorted(party_to_votes.items(), key=lambda x: -x[1],)[
0
][0]
party_to_seats = {winning_party: n_seats_bonus}
relevant_num = sum(
list(
map(
lambda party: party_to_votes[party],
eligible_party_list,
)
)
)
party_r = []
n_seats_r = n_seats_non_bonus
resulting_num = (int)(relevant_num / n_seats_non_bonus)
for party in eligible_party_list:
seats_r = party_to_votes[party] / resulting_num
seats_non_bonus_whole = (int)(seats_r)
party_to_seats[party] = (
party_to_seats.get(party, 0) + seats_non_bonus_whole
)
party_r.append((party, seats_r % 1))
n_seats_r -= seats_non_bonus_whole
party_r = sorted(party_r, key=lambda x: -x[1])
for i in range(0, n_seats_r):
party = party_r[i][0]
party_to_seats[party] = party_to_seats.get(party, 0) + 1
return party_to_seats | 02270cfeefb87b3da0ec4fa88dfb692a4645df5e | 18,457 |
def get_product(name, version):
"""Get info about a specific version of a product"""
product = registry.get_product(name, version)
return jsonify(product.to_dict()) | 0c461d672ef4d07578b098b3cb937027ad8946f1 | 18,458 |
def _is_segment_in_block_range(segment, blocks):
"""Return whether the segment is in the range of one of the blocks."""
for block in blocks:
if block.start <= segment.start and segment.end <= block.end:
return True
return False | e7509f18f0a72cf90fb1aa643c77c2e13154f0d0 | 18,459 |
def compute_snes_color_score(img):
""" Returns the ratio of SNES colors to the total number of colors in the image
Parameters:
img (image) -- Pillow image
Returns:
count (float) -- ratio of SNES colors
"""
score = _get_distance_between_palettes(img, util.get_snes_color_palette())
return score
# colors, snes_color_count = get_color_count(img, util.get_snes_color_palette())
w, h = img.size
colors = np.array(img.getcolors(maxcolors=w * h))
total_color_count = len(colors)
invalid_color_count = np.sum([((r & 0x03) & (g & 0x03) & (b & 0x03)) for (_, (r, g, b)) in colors]) # zero out valid bits, leaving only invalid bits
snes_color_count = total_color_count - invalid_color_count # count remaining colors with invalid bits
return snes_color_count / total_color_count | b52ae8d7d98700f455e126cfb447e41c1762528c | 18,462 |
def get_assignments_for_team(user, team):
""" Get openassessment XBlocks configured for the current teamset """
# Confirm access
if not has_specific_team_access(user, team):
raise Exception("User {user} is not permitted to access team info for {team}".format(
user=user.username,
team=team.team_id
))
# Limit to team-enabled ORAs for the matching teamset in the course
return modulestore().get_items(
team.course_id,
qualifiers={'category': 'openassessment'},
settings={'teams_enabled': True, 'selected_teamset_id': team.topic_id}
) | 67b72b34b8549127728c33dfac8599d979d09f6f | 18,463 |
def is_flexible_uri(uri: Uri_t) -> bool:
"""Judge if specified `uri` has one or more flexible location.
Args:
uri: URI pattern to be judged.
Returns:
True if specified `uri` has one or more flexible location,
False otherwise.
"""
for loc in uri:
if isinstance(loc, FlexibleLocation):
return True
return False | fd5138d3dfc44c36e7b5ccfe911f6640e22bc7f2 | 18,464 |
def load_frame_gray(img_path, gray_flag=False):
"""Load image at img_path, and convert the original image to grayscale if gray_flag=True.
Return image and grayscale image if gray_flag=True; otherwise only return original image.
img_path = a string containing the path to an image file readable by cv.imread
"""
try:
img = cv.imread(img_path)
except Exception as err:
print(f"The following error occurred when reading the image file at {img_path}: \n{err}")
img = None
if gray_flag and isinstance(img, np.ndarray):
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
else:
gray = None
return (img, gray) if gray_flag else img | 8b792f2bf5f22f34e0b880c934019c238a3cc360 | 18,465 |
def read_vocab_file(path):
""" Read voc file.
This reads a .voc file, stripping out empty lines comments and expand
parentheses. It returns each line as a list of all expanded
alternatives.
Args:
path (str): path to vocab file.
Returns:
List of Lists of strings.
"""
LOG.warning("read_vocab_file is deprecated! "
"use SkillResources class instead")
vocab = []
with open(path, 'r', encoding='utf8') as voc_file:
for line in voc_file.readlines():
if line.startswith('#') or line.strip() == '':
continue
vocab.append(expand_options(line.lower()))
return vocab | cdf230f1fbeafbcc3839a02ce86b33719dfcf806 | 18,466 |
import re
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)] | f49aca918e4efc2f5e7f6541df2d5329bc2752f7 | 18,467 |
def generate_episode(sim, policy, horizon=200):
"""
Generate an episode from a policy acting on an simulation.
Returns: sequence of state, action, reward.
"""
obs = sim.reset()
policy.reset() # Reset the policy too so that it knows its the beginning of the episode.
states, actions, rewards = [], [], []
states.append(obs)
for _ in range(horizon):
action = policy.act(obs)
obs, reward, done, _ = sim.step(action)
states.append(obs)
actions.append(action)
rewards.append(reward)
if done:
break
states.pop() # Pop off the terminating state
return states, actions, rewards | 73a0bbb2703c047d3305e93dd2a340c83db12277 | 18,469 |
import asyncio
async def _ensure_meadowrun_vault(location: str) -> str:
"""
Gets the meadowrun key vault URI if it exists. If it doesn't exist, also creates the
meadowrun key vault, and tries to assign the Key Vault Administrator role to the
current user.
"""
subscription_id = await get_subscription_id()
vault_name = get_meadowrun_vault_name(subscription_id)
resource_group_path = await ensure_meadowrun_resource_group(location)
vault_path = (
f"{resource_group_path}/providers/Microsoft.KeyVault/vaults/{vault_name}"
)
try:
vault = await azure_rest_api("GET", vault_path, "2019-09-01")
return vault["properties"]["vaultUri"]
except ResourceNotFoundError:
# theoretically key_vault_client.vaults.get_deleted() should be faster,
# but that requires specifying a location and there's no way to know what
# location the key vault may have been originally created in.
deleted_vault_found = False
async for page in azure_rest_api_paged(
"GET",
f"/subscriptions/{subscription_id}/providers/Microsoft.KeyVault/"
f"deletedVaults",
"2019-09-01",
):
for vault in page["value"]:
if vault["name"] == vault_name:
deleted_vault_found = True
break
if deleted_vault_found:
break
if deleted_vault_found:
# if we have a deleted vault, then we should try to recover it
create_mode = "recover"
print(f"The meadowrun Key Vault {vault_name} was deleted, recovering")
else:
create_mode = "default"
print(
f"The meadowrun Key Vault {vault_name} does not exist, creating it "
"now"
)
# if we're creating or recreating the Key Vault, assume that we need to add the
# current user to the Key Vault Administrator role so that the current user can
# access secrets.
assign_role_task = asyncio.create_task(
assign_role_to_principal(
"Key Vault Administrator", await get_current_user_id(), location
)
)
# Now we can create/recover the Key Vault.
# https://docs.microsoft.com/en-us/rest/api/keyvault/keyvault/vaults/create-or-update#vaultproperties
vault, _ = await wait_for_poll(
await azure_rest_api_poll(
"PUT",
vault_path,
"2019-09-01",
"AsyncOperationJsonStatus",
json_content={
"location": location,
"properties": {
"tenantId": await get_tenant_id(),
"sku": {"family": "A", "name": "Standard"},
"enableRbacAuthorization": True,
"createMode": create_mode,
},
},
)
)
try:
await assign_role_task
except Exception as e:
print(
"Warning: we were not able to assign the Key Vault Administrator role "
f"to the current user. You may not be able to create/read secrets: {e}"
)
return vault["properties"]["vaultUri"] | 9e16940a56ae83b47d42d7583ad6efc9c5d63d23 | 18,470 |
from typing import Iterable
from typing import Dict
from typing import List
from typing import Union
from pathlib import Path
from typing import Tuple
import logging
def get_dataset_splits(
datasets: Iterable[HarmonicDataset],
data_dfs: Dict[str, pd.DataFrame] = None,
xml_and_csv_paths: Dict[str, List[Union[str, Path]]] = None,
splits: Iterable[float] = (0.8, 0.1, 0.1),
seed: int = None,
) -> Tuple[List[List[HarmonicDataset]], List[List[int]], List[List[Piece]]]:
"""
Get datasets representing splits of the data in the given DataFrames.
Parameters
----------
datasets : Iterable[HarmonicDataset]
An Iterable of HarmonicDataset class objects, each representing a different type of
HarmonicDataset subclass to make a Dataset from. These are all passed so that they will
have identical splits.
data_dfs : Dict[str, pd.DataFrame]
If using dataframes, a mapping of 'files', 'measures', 'chords', and 'notes' dfs.
xml_and_csv_paths : Dict[str, List[Union[str, Path]]]
If using the MusicXML ('xmls') and label csvs ('csvs'), a list of paths of the
matching xml and csv files.
splits : Iterable[float]
An Iterable of floats representing the proportion of pieces which will go into each split.
This will be normalized to sum to 1.
seed : int
A numpy random seed, if given.
Returns
-------
dataset_splits : List[List[HarmonicDataset]]
An iterable, the length of `dataset` representing the splits for each given dataset type.
Each element is itself an iterable the length of `splits`.
split_ids : List[List[int]]
A list the length of `splits` containing the file_ids for each data point in each split.
split_pieces : List[List[Piece]]
A list of the pieces in each split.
"""
split_ids, split_pieces = get_split_file_ids_and_pieces(
data_dfs=data_dfs,
xml_and_csv_paths=xml_and_csv_paths,
splits=splits,
seed=seed,
)
dataset_splits = np.full((len(datasets), len(splits)), None)
for split_index, (split_prop, pieces) in enumerate(zip(splits, split_pieces)):
if len(pieces) == 0:
logging.warning(
"Split %s with prop %s contains no pieces. Returning None for those.",
split_index,
split_prop,
)
continue
for dataset_index, dataset_class in enumerate(datasets):
dataset_splits[dataset_index][split_index] = dataset_class(pieces)
return dataset_splits, split_ids, split_pieces | c1389dad05aa2911735b1e9099acda9e2a8a1c05 | 18,471 |
from PilotErrors import PilotException
from movers import JobMover
from movers.trace_report import TraceReport
import traceback
def put_data_es(job, jobSite, stageoutTries, files, workDir=None, activity=None):
"""
Do jobmover.stageout_outfiles or jobmover.stageout_logfiles (if log_transfer=True)
or jobmover.stageout_logfiles_os (if special_log_transfer=True)
:backward compatible return: (rc, pilotErrorDiag, rf, "", filesNormalStageOut, filesAltStageOut)
"""
tolog("Mover put data started [new implementation]")
si = getSiteInformation(job.experiment)
si.setQueueName(jobSite.computingElement) # WARNING: SiteInformation is singleton: may be used in other functions! FIX me later
workDir = workDir or os.path.dirname(job.workdir)
mover = JobMover(job, si, workDir=workDir, stageoutretry=stageoutTries)
eventType = "put_es"
mover.trace_report = TraceReport(pq=jobSite.sitename, localSite=jobSite.sitename, remoteSite=jobSite.sitename, dataset="", eventType=eventType)
mover.trace_report.init(job)
error = None
storageId = None
try:
if not activity:
activity = "es_events"
file = files[0]
if file.storageId and file.storageId != -1:
storageId = file.storageId
copytools = [('objectstore', {'setup': ''})]
else:
copytools = None
transferred_files, failed_transfers = mover.stageout(activity=activity, files=files, copytools=copytools)
except PilotException, e:
error = e
except Exception, e:
tolog("ERROR: Mover put data failed [stageout]: exception caught: %s" % e)
tolog(traceback.format_exc())
error = PilotException('STAGEOUT FAILED, exception=%s' % e, code=PilotErrors.ERR_STAGEOUTFAILED, state='STAGEOUT_FAILED')
if error:
## send trace
mover.trace_report.update(clientState=error.state or 'STAGEOUT_FAILED', stateReason=error.message, timeEnd=time())
mover.sendTrace(mover.trace_report)
return error.code, error.message, None
tolog("Mover put data finished")
# prepare compatible output
# keep track of which files have been copied
not_transferred = [e.lfn for e in files if e.status not in ['transferred']]
if not_transferred:
err_msg = 'STAGEOUT FAILED: not all output files have been copied: remain files=%s, errors=%s' % ('\n'.join(not_transferred), ';'.join([str(ee) for ee in failed_transfers]))
tolog("Mover put data finished: error_msg=%s" % err_msg)
return PilotErrors.ERR_STAGEOUTFAILED, err_msg, None
return 0, "", storageId | b3841dc487e19ca989575e37b95a9e8f2949258b | 18,472 |
def floor(data):
"""
Returns element-wise largest integer not greater than x.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, and float32
Returns:
tvm.tensor.Tensor, has the same shape as data and type of int32.
"""
vc_util.ops_dtype_check(data.dtype, vc_util.DtypeForDavinci.ALL_FLOAT)
shape = [x.value for x in data.shape]
vc_util.check_shape(shape)
if utils.product_is_mini() and data.dtype == "float32":
# solve the problem of 87==floor(86.9996) when high_precision is needed.
# problem is caused by such as fp16(86.9996)==87.
# detect problem by fp32(86.9996) - fp32(floor(fp16(86.9996))) < 0
# floor could only apply on float16
data_fp16 = akg.lang.cce.cast_to(data, "float16")
floor_data = akg.lang.cce.floor(data_fp16)
floor_fp16 = akg.lang.cce.cast_to(floor_data, "float16")
floor_fp32 = akg.lang.cce.cast(floor_fp16, "float32")
# if diff=1e-7, we cannot get right sign of fp16(diff)
# but we can get right sign of 10000*diff = 1e-3, which has the same
# sign as diff
diff = (data - floor_fp32) * 10000
diff_fp16 = akg.lang.cce.cast_to(diff, "float16")
# if diff < 0 and floor == ceil, then it's 87 = floor(86.99999)
res = akg.tvm.compute(shape,
lambda *i: akg.tvm.expr.Select(
diff_fp16(*i) < tvm.const(0, "float16"),
floor_fp16(*i) - tvm.const(1, "float16"),
floor_fp16(*i)),
name="res")
res = akg.lang.cce.cast_to(res, "int32")
else:
res = akg.lang.cce.floor(data)
return res | 3d553d54330c3237908b33600fae560a92f20975 | 18,473 |
def pixel_link_model(inputs, config):
""" PixelLink architecture. """
if config['model_type'] == 'mobilenet_v2_ext':
backbone = mobilenet_v2(inputs, original_stride=False,
weights_decay=config['weights_decay'])
elif config['model_type'] == 'ka_resnet50':
backbone = keras_applications_resnet50(inputs)
elif config['model_type'] == 'ka_vgg16':
backbone = keras_applications_vgg16(inputs)
elif config['model_type'] == 'ka_mobilenet_v2_1_0':
backbone = keras_applications_mobilenetv2(inputs, alpha=1.0)
elif config['model_type'] == 'ka_mobilenet_v2_1_4':
backbone = keras_applications_mobilenetv2(inputs, alpha=1.4)
elif config['model_type'] == 'ka_xception':
backbone = keras_applications_xception(inputs)
segm_logits = fcn_head(backbone, num_classes=2, name='segm_logits',
weights_decay=config['weights_decay'])
link_logits = fcn_head(backbone, num_classes=16, name='link_logits_',
weights_decay=config['weights_decay'])
new_shape = tf.shape(link_logits)[1], tf.shape(link_logits)[2], 8, 2
link_logits = tf.keras.layers.Reshape(new_shape, name='link_logits')(link_logits)
return tf.keras.Model(inputs, [segm_logits, link_logits]) | 0bf606e5b06d94bce98865147fb6a1cf45b04560 | 18,474 |
def ingresar_datos():
"""Pide al usuario los datos para calcular el precio de la compra
de boletos.
:return: tipo, cantidad
:rtype: tuple
"""
text_align("Datos de la compra", width=35)
tipo: str = choice_input(tuple(TIPO.keys()))
cantidad: int = int_input("Ingrese el número de boletos: ", min=1, max=12)
return tipo, cantidad | eb9b1c90fbc44a639a7760848723f5579eced4df | 18,475 |
def trim_spectrum(freqs, power_spectra, f_range):
"""Extract a frequency range from power spectra.
Parameters
----------
freqs : 1d array
Frequency values for the power spectrum.
power_spectra : 1d or 2d array
Power spectral density values.
f_range: list of [float, float]
Frequency range to restrict to, as [lowest_freq, highest_freq].
Returns
-------
freqs_ext : 1d array
Extracted frequency values for the power spectrum.
power_spectra_ext : 1d or 2d array
Extracted power spectral density values.
Notes
-----
This function extracts frequency ranges >= f_low and <= f_high.
It does not round to below or above f_low and f_high, respectively.
Examples
--------
Using a simulated spectrum, extract a frequency range:
>>> from fooof.sim import gen_power_spectrum
>>> freqs, powers = gen_power_spectrum([1, 50], [1, 1], [10, 0.5, 1.0])
>>> freqs, powers = trim_spectrum(freqs, powers, [3, 30])
"""
# Create mask to index only requested frequencies
f_mask = np.logical_and(freqs >= f_range[0], freqs <= f_range[1])
# Restrict freqs & spectra to requested range
# The if/else is to cover both 1d or 2d arrays
freqs_ext = freqs[f_mask]
power_spectra_ext = power_spectra[f_mask] if power_spectra.ndim == 1 \
else power_spectra[:, f_mask]
return freqs_ext, power_spectra_ext | a522a384033fc38d3bba5e7d91ca8debfdedec68 | 18,476 |
def _get_variable_for(v):
"""Returns the ResourceVariable responsible for v, or v if not necessary."""
if v.op.type == "VarHandleOp":
for var in ops.get_collection(ops.GraphKeys.RESOURCES):
if (isinstance(var, resource_variable_ops.ResourceVariable)
and var.handle.op is v.op):
return var
raise ValueError("Got %s but could not locate source variable." % (str(v)))
return v | 5e8f4b83495c89f728c30148e9b05e06713d6b82 | 18,477 |
def load_prefixes(filepath):
"""Dado um arquivo txt contendo os prefixos utilizados na SPARQL, é
devolvida uma string contendo os prefixos e uma lista de tuplas contendo
os prefixos.
Parameters
----------
filepath : str
Caminho do arquivo txt contendo o conjunto de prefixos.
Returns
-------
tuple of str
Uma tupla contendo os prefixos carregados na forma de string e uma
lista de tuplas, onde a primeira posição é o nome dado ao URI e a
segunda contém a URI correspondente.
Examples
--------
.. code-block:: python
>>> from QApedia.io import load_prefixes
>>> filename = "prefixes.txt"
>>> prefixes = load_prefixes(filename)
>>> for uri_name, uri in prefixes[1]:
... print(uri_name, uri)
...
owl: http://www.w3.org/2002/07/owl#
xsd: http://www.w3.org/2001/XMLSchema#
rdfs: http://www.w3.org/2000/01/rdf-schema#
rdf: http://www.w3.org/1999/02/22-rdf-syntax-ns#
foaf: http://xmlns.com/foaf/0.1/
dc: http://purl.org/dc/elements/1.1/
dbpedia2: http://dbpedia.org/property/
dbpedia: http://dbpedia.org/
skos: http://www.w3.org/2004/02/skos/core#
"""
f = open(filepath, "r")
lines = f.readlines()
f.close()
prefixes = "\n".join(line.rstrip() for line in lines)
list_of_prefixes = convert_prefixes_to_list(prefixes)
return prefixes, list_of_prefixes | a6c2f3c014dbfae73718c579da914f840489e701 | 18,478 |
def build_convolutional_box_predictor(is_training,
num_classes,
conv_hyperparams_fn,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
apply_sigmoid_to_scores=False,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None):
"""Builds the ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
apply_sigmoid_to_scores: If True, apply the sigmoid on the output
class_predictions.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: Constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
Returns:
A ConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
apply_sigmoid_to_scores=apply_sigmoid_to_scores,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise)
other_heads = {}
return convolutional_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth) | 3e3b79cbd6e99b8b9d3da5ff2545e17a92ef3f38 | 18,479 |
def parse_command_line():
"""
:return:
"""
parser = argp.ArgumentParser(prog='TEPIC/findBackground.py', add_help=True)
ag = parser.add_argument_group('Input/output parameters')
ag.add_argument('--input', '-i', type=str, dest='inputfile', required=True,
help='Path to input file. First three columns in file'
' are expected to be chrom - start - end.')
ag.add_argument('--genome', '-g', type=str, dest='genome', required=True,
help='Path to genome reference file in 2bit format.')
ag.add_argument('--output', '-o', type=str, dest='outputfile', default='stdout',
help='Path to output file or stdout. Default: stdout')
ag = parser.add_argument_group('Runtime parameters')
ag.add_argument('--workers', '-w', type=int, dest='workers', default=1,
help='Number of CPU cores to use. 1 CPU core'
' processes 1 chromosome at a time. Default: 1')
ag.add_argument('--time-out', '-to', type=int, dest='timeout', default=3,
help='Maximal number of minutes to spend searching for'
' background regions per chromosome. Default: 3 minutes')
ag.add_argument('--threshold', '-th', type=int, dest='threshold', default=90,
help='Stop searching after having found more than <THRESHOLD>%%'
' matches per chromosome. Default: 90%%')
ag.add_argument('--eps-init', '-ei', type=float, dest='epsinit', default=1.,
help='Init value for epsilon. Error tolerance in percentage points'
' for similarity matching. Default: 1.0 ppt')
ag.add_argument('--eps-step', '-es', type=float, dest='epsstep', default=0.5,
help='Increment epsilon at each iteration by this value. Default: 0.5')
ag.add_argument('--eps-max', '-em', type=float, dest='epsmax', default=2.,
help='Maximal value for epsilon. After reaching this value, restart'
' search with different starting positions. Default: 2.0')
return parser.parse_args() | 1f12e08cf4c86f40a84a09303d75a5d3506a3a14 | 18,481 |
import torch
def disparity_to_idepth(K, T_right_in_left, left_disparity):
"""Function athat transforms general (non-rectified) disparities to inverse
depths.
"""
assert(len(T_right_in_left.shape) == 3)
# assert(T_right_in_left.shape[0] == self.batch_size)
assert(T_right_in_left.shape[1] == 4)
assert(T_right_in_left.shape[2] == 4)
assert(len(K.shape) == 3)
# assert(K.shape[0] == self.batch_size)
assert(K.shape[1] == 4)
assert(K.shape[2] == 4)
batch_size = K.shape[0]
rows = left_disparity.shape[-2]
cols = left_disparity.shape[-1]
# Create tensor of homogeneous pixel coordinates of size (batch, 3, rows*cols).
y_grid, x_grid = torch.meshgrid(torch.arange(0, rows, device=left_disparity.device),
torch.arange(0, cols, device=left_disparity.device))
xys = torch.cat([x_grid.reshape(-1, rows * cols).float(),
y_grid.reshape(-1, rows * cols).float()], dim=0)
xys = xys.unsqueeze(0).repeat(batch_size, 1, 1)
ones = torch.ones(batch_size, 1, rows * cols, dtype=torch.float32, device=xys.device)
xyz_pix = torch.cat([xys, ones], 1)
Kinv = torch.inverse(K)
T_left_in_right = torch.inverse(T_right_in_left)
R_left_in_right = T_left_in_right[:, :3, :3]
KRKinv = torch.matmul(K[:, :3, :3], torch.matmul(R_left_in_right, Kinv[:, :3, :3]))
KRKinv3 = KRKinv[:, 2, :] # (batch, 3)
KRKinv3_rep = torch.unsqueeze(KRKinv3, dim=2).repeat(1, 1, rows*cols) # (batch, 3, rows*cols)
KT_left_in_right = torch.matmul(K, T_left_in_right)
Kt = KT_left_in_right[:, :3, 3] # (batch, 3)
Kt_rep = torch.unsqueeze(Kt, dim=2).repeat(1, 1, rows*cols) # (batch, 3, rows*cols)
# (batch, rows*cols)
left_disparity_flat = left_disparity.reshape(batch_size, -1)
# Compute pixels at infinite depth.
pix_inf = torch.matmul(KRKinv, xyz_pix) # (batch, 3, rows*cols)
pix_inf[:, 0, :] /= pix_inf[:, 2, :]
pix_inf[:, 1, :] /= pix_inf[:, 2, :]
pix_inf[:, 2, :] /= pix_inf[:, 2, :]
# Compute epipolar lines (must point from far to near depth).
pix_far = torch.matmul(KRKinv, xyz_pix * 1e2)
pix_far += Kt_rep
pix_far[:, 0, :] /= pix_far[:, 2, :]
pix_far[:, 1, :] /= pix_far[:, 2, :]
pix_far[:, 2, :] /= pix_far[:, 2, :]
epi_diff = pix_far[:, :2, :] - pix_inf[:, :2, :]
epi_norm = torch.sqrt(torch.sum(epi_diff**2, dim=1))
epiline = epi_diff[:, :2, :] # (batch, 2, rows*cols)
epiline[:, 0, :] /= (epi_norm + 1e-6)
epiline[:, 1, :] /= (epi_norm + 1e-6)
mask = epi_norm < 1e-6
mask = mask.reshape(batch_size, 1, rows, cols)
# Convert disparity to idepth.
# (batch, rows*cols)
w = KRKinv3_rep[:, 0, :] * xyz_pix[:, 0, :] + \
KRKinv3_rep[:, 1, :] * xyz_pix[:, 1, :] + \
KRKinv3_rep[:, 2, :]
# (batch, rows*cols)
A0 = Kt_rep[:, 0, :] - Kt_rep[:, 2, :]*(pix_inf[:, 0, :] + left_disparity_flat * epiline[:, 0, :])
A1 = Kt_rep[:, 1, :] - Kt_rep[:, 2, :]*(pix_inf[:, 1, :] + left_disparity_flat * epiline[:, 1, :])
b0 = w * left_disparity_flat * epiline[:, 0, :]
b1 = w * left_disparity_flat * epiline[:, 1, :]
ATA = A0 * A0 + A1 * A1
ATb = A0 * b0 + A1 * b1
left_idepthmap = ATb / ATA
left_idepthmap = left_idepthmap.reshape(batch_size, 1, rows, cols)
# Set bad points to 0 idepth.
left_idepthmap = (~mask).float() * left_idepthmap
return left_idepthmap | 454bda2fd9ec4e4ef5615dbdb054c2f3b454f31a | 18,482 |
def auto_apilado(datos,target,agrupacion,porcentaje=False):
"""
Esta función recibe un set de datos DataFrame,
una variable target, y la variable
sobre la que se desean agrupar los datos (eje X).
Retorna un grafico de barras apilado.
"""
total = datos[[target,agrupacion]].groupby(agrupacion).count()
tabla = pd.DataFrame([])
fig = go.Figure()
#Creamos una traza
for value in datos[target].unique():
trace = datos[[target,agrupacion]].loc[datos[target]==value].groupby(agrupacion).count()
if porcentaje: #Las columnas deben tener el mismo nombre
trace = 100*trace/total
y_title ='Porcentaje (Individuos)'
trace.rename(columns={target:str(value)},inplace=True)
tabla = pd.concat([tabla, trace],axis = 1)
#Creación de la figura
fig.add_trace(go.Bar(
x = tabla.index,
y = tabla[str(value)],
name=str(value),
# marker_color='rgb(26, 118, 255)'
))
y_title='Conteo (Individuos)'
fig.update_layout(
title='Conteo de '+str(target)+' agrupado por '+str(agrupacion),
xaxis_tickfont_size=14,
yaxis=dict(
title=y_title,
titlefont_size=16,
tickfont_size=14,
),
xaxis=dict(
title=str(agrupacion)
))
fig.update_layout(barmode='stack')
return fig, tabla | b3b13e0e5bd56628971004c0d6d5171929ab6de3 | 18,484 |
from datetime import datetime
def month_from_string(month_str: str) -> datetime.date:
"""
Accepts year-month strings with hyphens such as "%Y-%m"
"""
return datetime.datetime.strptime(month_str, "%Y-%m").date() | cfb901f6676d40398bd6f49c438541f00e5389e3 | 18,485 |
import hashlib
def get_isomorphic_signature(graph: DiGraph) -> str:
"""
Generate unique isomorphic id with pynauty
"""
nauty_graph = pynauty.Graph(len(graph.nodes), directed=True, adjacency_dict=nx.to_dict_of_lists(graph))
return hashlib.md5(pynauty.certificate(nauty_graph)).hexdigest() | 8dfd7dd44409fee7dddd88f21681ed93232f1dba | 18,486 |
def _encode_raw_string(str):
"""Encodes a string using the above encoding format.
Args:
str (string): The string to be encoded.
Returns:
An encoded version of the input string.
"""
return _replace_all(str, _substitutions) | bb33875b276fe822c2b43ec3ebcc57b0d2f4c7b9 | 18,488 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.