content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _aggregate(df, variable, components=None, method=np.sum):
"""Internal implementation of the `aggregate` function"""
# list of variables require default components (no manual list)
if islistable(variable) and components is not None:
raise ValueError(
"Aggregating by list of variables does not support `components`!"
)
mapping = {}
msg = "Cannot aggregate variable '{}' because it has no components!"
# if single variable
if isstr(variable):
# default components to all variables one level below `variable`
components = components or df._variable_components(variable)
if not len(components):
logger.info(msg.format(variable))
return
for c in components:
mapping[c] = variable
# else, use all variables one level below `variable` as components
else:
for v in variable if islistable(variable) else [variable]:
_components = df._variable_components(v)
if not len(_components):
logger.info(msg.format(v))
continue
for c in _components:
mapping[c] = v
# rename all components to `variable` and aggregate
_df = df._data[df._apply_filters(variable=mapping.keys())]
_df.index = replace_index_values(_df, "variable", mapping)
return _group_and_agg(_df, [], method) | 25c36e6180aa5509ced7513c841eb9cc4450b41b | 17,325 |
import inspect
def get_attributes(klass):
"""Get all class attributes.
"""
attributes = list()
for attr, value in inspect.\
getmembers(klass, lambda x: not inspect.isroutine(x)):
if not (attr.startswith("__") and attr.endswith("__")):
attributes.append(attr)
return attributes | 6a72db39a9982b6a4ad5462ff9a4695f9cca6ce0 | 17,326 |
import io
def render(html):
"""Convert HTML to a PDF"""
output = io.BytesIO()
surface = cairo.PDFSurface(output, 595, 842)
ctx = cairo.Context(surface)
cffictx = cairocffi.Context._from_pointer(cairocffi.ffi.cast('cairo_t **', id(ctx) + object.__basicsize__)[0], incref=True)
html = etree.parse(io.StringIO(html), etree.HTMLParser())
for pdf in html.xpath("//img[substring(@src, string-length(@src) - 3)=\'.pdf\']"):
for prev in pdf.xpath("preceding-sibling::*"):
pdf.getparent().remove(prev)
pdfsrc = pdf.get("src")
pdf.getparent().remove(pdf)
section = deepcopy(html)
for nextpdf in section.xpath("//img[substring(@src, string-length(@src) - 3)=\'.pdf\']"):
for nextel in nextpdf.xpath("following-sibling::*"):
nextpdf.getparent().remove(nextel)
nextpdf.getparent().remove(nextpdf)
html_pages = weasyprint.HTML(tree=section).render().pages
surface.set_size(html_pages[0].width * 72 / 96.0, html_pages[0].height * 72 / 96.0)
if pdfsrc != "blank.pdf":
with weasyprint.default_url_fetcher(str(pdfsrc))['file_obj'] as fetch:
pdf_pages = Poppler.Document.new_from_stream(Gio.MemoryInputStream.new_from_bytes(GLib.Bytes.new_take(fetch.read())), -1, None, None)
else:
pdf_pages = None
for pageno in range(max(pdf_pages.get_n_pages() if pdf_pages else 0, len(html_pages))):
if pdf_pages and pageno < pdf_pages.get_n_pages():
pdf_pages.get_page(pageno).render_for_printing(ctx)
if pageno < len(html_pages):
html_pages[pageno].paint(cffictx, scale=72 / 96.0)
ctx.show_page()
surface.finish()
return output.getbuffer() | 9b0d4c252b7b7bf8dcdcab32e13cf183cef0312d | 17,327 |
def computeFlowImage(u,v,logscale=True,scaledown=6,output=False):
"""
topleft is zero, u is horiz, v is vertical
red is 3 o'clock, yellow is 6, light blue is 9, blue/purple is 12
"""
colorwheel = makecolorwheel()
ncols = colorwheel.shape[0]
radius = np.sqrt(u**2 + v**2)
if output:
print("Maximum flow magnitude: %04f" % np.max(radius))
if logscale:
radius = np.log(radius + 1)
if output:
print("Maximum flow magnitude (after log): %0.4f" % np.max(radius))
radius = radius / scaledown
if output:
print("Maximum flow magnitude (after scaledown): %0.4f" % np.max(radius))
rot = np.arctan2(-v, -u) / np.pi
fk = (rot+1)/2 * (ncols-1) # -1~1 maped to 0~ncols
k0 = fk.astype(np.uint8) # 0, 1, 2, ..., ncols
k1 = k0+1
k1[k1 == ncols] = 0
f = fk - k0
ncolors = colorwheel.shape[1]
img = np.zeros(u.shape+(ncolors,))
for i in range(ncolors):
tmp = colorwheel[:,i]
col0 = tmp[k0]
col1 = tmp[k1]
col = (1-f)*col0 + f*col1
idx = radius <= 1
# increase saturation with radius
col[idx] = 1 - radius[idx]*(1-col[idx])
# out of range
col[~idx] *= 0.75
img[:,:,i] = np.floor(255*col).astype(np.uint8)
return img.astype(np.uint8) | 87690e34ae1509a63df982b68e35346be8b5d8dd | 17,328 |
def day_display(year, month, all_month_events, day):
"""
Returns the events that occur on the given day.
Works by getting all occurrences for the month, then drilling
down to only those occurring on the given day.
"""
# Get a dict with all of the events for the month
count = CountHandler(year, month, all_month_events).get_count()
pks = [x[1] for x in count[day]] # list of pks for events on given day
# List enables sorting.
# See the comments in EventMonthView in views.py for more info
day_events = list(Event.objects.filter(pk__in=pks).order_by(
'start_date').prefetch_related('cancellations'))
day_events.sort(key=lambda x: x.l_start_date.hour)
return day_events | e17df37bb8908a557b9cf1175c3567b460a35385 | 17,329 |
import math
def decimal_to_octal(num):
"""Convert a Decimal Number to an Octal Number."""
octal = 0
counter = 0
while num > 0:
remainder = num % 8
octal = octal + (remainder * math.pow(10, counter))
counter += 1
num = math.floor(num / 8) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return "{0:g}".format(float(octal)) | e6bbc23a2235812c1e2298e8a0be8396c06b1c1f | 17,330 |
def get_formatted_dates(date_ranges):
"""Returns list of dates specified by date_ranges, formtted for Swiftly API use.
date_ranges is a list of dict, with each dict specifying a range of dates
in string format. sample dict for Tue/Wed/Thu in Sep/Oct:
{
"start_date": "09-01-2019",
"end_date": "10-31-2019",
"include_days": [0, 1, 1, 1, 0, 0, 0]
}
"""
final_date_list = []
for date_range in date_ranges:
timestamp_list = pd.bdate_range(
start=date_range["start_date"],
end=date_range["end_date"],
weekmask=date_range["include_days"],
freq="C"
).to_list()
final_date_list += [ts.strftime("%m-%d-%Y") for ts in timestamp_list]
return final_date_list | db20459ffacb8cb621acdf40b0bdcbc203787680 | 17,331 |
def read_img(img: str, no_data: float, mask: str = None, classif: str = None, segm: str = None) ->\
xr.Dataset:
"""
Read image and mask, and return the corresponding xarray.DataSet
:param img: Path to the image
:type img: string
:type no_data: no_data value in the image
:type no_data: float
:param mask: Path to the mask (optional): 0 value for valid pixels, !=0 value for invalid pixels
:type mask: string
:param classif: Path to the classif (optional)
:type classif: string
:param segm: Path to the mask (optional)
:type segm: string
:return: xarray.DataSet containing the variables :
- im : 2D (row, col) xarray.DataArray float32
- msk : 2D (row, col) xarray.DataArray int16, with the convention defined in the configuration file
:rtype: xarray.DataSet
"""
img_ds = rasterio_open(img)
data = img_ds.read(1)
if np.isnan(no_data):
no_data_pixels = np.where(np.isnan(data))
else:
no_data_pixels = np.where(data == no_data)
# We accept nan values as no data on input image but to not disturb cost volume processing as stereo computation
# step,nan as no_data must be converted. We choose -9999 (can be another value). No_data position aren't erased
# because stored in 'msk'
if no_data_pixels[0].size != 0 and np.isnan(no_data):
data[no_data_pixels] = -9999
no_data = -9999
dataset = xr.Dataset({'im': (['row', 'col'], data.astype(np.float32))},
coords={'row': np.arange(data.shape[0]),
'col': np.arange(data.shape[1])})
# Add image conf to the image dataset
dataset.attrs = {'no_data_img': no_data,
'valid_pixels': 0, # arbitrary default value
'no_data_mask': 1} # arbitrary default value
if classif is not None:
input_classif = rasterio_open(classif).read(1)
dataset['classif'] = xr.DataArray(np.full((data.shape[0], data.shape[1]), 0).astype(np.int16),
dims=['row', 'col'])
dataset['classif'].data = input_classif
if segm is not None:
input_segm = rasterio_open(segm).read(1)
dataset['segm'] = xr.DataArray(np.full((data.shape[0], data.shape[1]), 0).astype(np.int16),
dims=['row', 'col'])
dataset['segm'].data = input_segm
# If there is no mask, and no data in the images, do not create the mask to minimize calculation time
if mask is None and no_data_pixels[0].size == 0:
return dataset
# Allocate the internal mask (!= input_mask)
# Mask convention:
# value : meaning
# dataset.attrs['valid_pixels'] : a valid pixel
# dataset.attrs['no_data_mask'] : a no_data_pixel
# other value : an invalid_pixel
dataset['msk'] = xr.DataArray(np.full((data.shape[0], data.shape[1]),
dataset.attrs['valid_pixels']).astype(np.int16), dims=['row', 'col'])
# Mask invalid pixels if needed
# convention: input_mask contains information to identify valid / invalid pixels.
# Value == 0 on input_mask represents a valid pixel
# Value != 0 on input_mask represents an invalid pixel
if mask is not None:
input_mask = rasterio_open(mask).read(1)
# Masks invalid pixels
# All pixels that are not valid_pixels, on the input mask, are considered as invalid pixels
dataset['msk'].data[np.where(input_mask > 0)] = dataset.attrs['valid_pixels'] + \
dataset.attrs['no_data_mask'] + 1
# Masks no_data pixels
# If a pixel is invalid due to the input mask, and it is also no_data, then the value of this pixel in the
# generated mask will be = no_data
dataset['msk'].data[no_data_pixels] = int(dataset.attrs['no_data_mask'])
return dataset | 2074269e47092313f1cb01dc81004b7ce9c8f411 | 17,333 |
def any_user(password=None, permissions=[], groups=[], **kwargs):
"""
Shortcut for creating Users
Permissions could be a list of permission names
If not specified, creates active, non superuser
and non staff user
"""
is_active = kwargs.pop('is_active', True)
is_superuser = kwargs.pop('is_superuser', False)
is_staff = kwargs.pop('is_staff', False)
user = any_model(User, is_active = is_active, is_superuser = is_superuser,
is_staff = is_staff, **kwargs)
for group_name in groups :
group = Group.objects.get(name=group_name)
user.groups.add(group)
for permission_name in permissions:
app_label, codename = permission_name.split('.')
permission = Permission.objects.get(
content_type__app_label=app_label,
codename=codename)
user.user_permissions.add(permission)
if password:
user.set_password(password)
user.save()
return user | 914bbb58b68aad9b19a77f2dec7ea1f0e91508bd | 17,334 |
def devices_to_use():
"""Returns the device objects for the accel. we are the most likely to use.
Returns:
List of logical devices of the accelerators we will use.
"""
if tf.config.list_logical_devices("TPU"):
devices = tf.config.list_logical_devices("TPU")
elif tf.config.list_logical_devices("GPU"):
devices = tf.config.list_logical_devices("GPU")
else:
devices = tf.config.list_logical_devices("CPU")
devices.sort()
return devices | aca8cbd28ff46e79655b47e34334c12406cc94e8 | 17,335 |
def barcode_density(bars, length):
"""
calculates the barcode density (normalized average cycle lifetime)
of a barcode
"""
densities = np.zeros(len(bars))
nums = np.array([len(bars[i][1]) for i in range(len(bars))])
num_infs = np.zeros(len(bars))
for i in range(len(bars)):
tot = 0
intervals = bars[i][1]
for intr in intervals:
if np.isinf(intr[1]):
num_infs[i] += 1
tot += (length-intr[0])/(length-1)
else:
tot += (intr[1] - intr[0])/(length-1)
densities[i] = tot
normed_density = densities/nums
normed_density[np.isnan(normed_density)] = 0
return np.stack([densities, nums, normed_density, num_infs]) | 4b585338cef3fd8b8ca91f89a1ae0532450b6209 | 17,336 |
def genRankSurvey(readername, candidates, binsize, shareWith=None):
"""
readername (str)
candidates (iterable)
binsize (int)
shareWith (str) optional
"""
# connect and craete survey
c = cornellQualtrics()
surveyname = "Ranking Survey for {}".format(readername)
surveyId = c.createSurvey(surveyname)
desc = (
u"This survey is for: {0}.\n\n"
u"Rank students into the top 50%-ile bins. "
u"Put exactly {1} students in each bin. "
u"All uncategorized students will automatically "
u"be placed in the bottom 50%-ile. Ordering within a bin "
u"does not matter.".format(readername, binsize)
)
choices = {}
for j, choice in enumerate(candidates):
choices[str(j + 1)] = {"Display": choice}
choiceOrder = list(range(1, len(choices) + 1))
questionDef = {
"QuestionText": desc,
"DefaultChoices": False,
"DataExportTag": "Q1",
"QuestionID": "QID1",
"QuestionType": "PGR",
"Selector": "DragAndDrop",
"SubSelector": "Columns",
"Configuration": {
"QuestionDescriptionOption": "UseText",
"Stack": False,
"StackItemsInGroups": False,
},
"QuestionDescription": desc,
"Choices": choices,
"ChoiceOrder": choiceOrder,
"Validation": {
"Settings": {
"ForceResponse": "ON",
"Type": "GroupChoiceRange",
"MinChoices": "{}".format(binsize),
"MaxChoices": "{}".format(binsize),
}
},
"GradingData": [],
"Language": [],
"NextChoiceId": len(choices) + 1,
"NextAnswerId": 6,
"Groups": ["Top 10%", "Top 20%", "Top 30%", "Top 40%", "Top 50%"],
"NumberOfGroups": 5,
"QuestionText_Unsafe": desc,
}
c.addSurveyQuestion(surveyId, questionDef)
if shareWith:
c.shareSurvey(surveyId, shareWith)
c.publishSurvey(surveyId)
c.activateSurvey(surveyId)
link = "https://cornell.qualtrics.com/jfe/form/%s" % surveyId
return link | e94f782389de86a8cbfb9c77aa078f004ac061c9 | 17,338 |
def _get_badge_status(
self_compat_res: dict,
google_compat_res: dict,
dependency_res: dict) -> BadgeStatus:
"""Get the badge status.
The badge status will determine the right hand text and the color of
the badge.
Args:
self_compat_res: a dict containing a package's self compatibility
status for py2 and py3. See _get_self_compatibility_dict().
google_compat_res: a dict containing a package's pair compatibility
status for py2 and py3. See _get_pair_compatibility_dict().
dependency_res: a dict containing a package's dependency status.
See _get_dependency_dict().
Returns:
The cumulative badge status.
"""
statuses = []
for pyver in ['py2', 'py3']:
statuses.append(self_compat_res[pyver]['status'])
statuses.append(google_compat_res[pyver]['status'])
statuses.append(dependency_res['status'])
return BadgeStatus.get_highest_status(statuses) | f367f75321c62a7c86b4ef26be446072e5eaca7c | 17,339 |
import yaml
def _get_yaml_as_string_from_mark(marker):
"""Gets yaml and converts to text"""
testids_mark_arg_no = len(marker.args)
if testids_mark_arg_no > 1:
raise TypeError(
'Incorrect number of arguments passed to'
' @pytest.mark.test_yaml, expected 1 and '
'received {}'.format(testids_mark_arg_no))
else:
yaml_object = yaml.load(marker.args[0])
yaml_text_block = '\n---\n' \
+ yaml.dump(yaml_object, default_flow_style=False) \
+ '...'
indented_yaml_text_block = '\n '.join(yaml_text_block.split('\n'))
return indented_yaml_text_block | 034dab9c5380035d2303df7ea7243b84baff47a0 | 17,340 |
def combine_dicts(w_dict1, w_dict2, params, model):
"""
Combine two dictionaries:
"""
w_dict = w_dict1 + w_dict2
eps = params[0]
params[0] = 0
P_w = []
w_dict = md.remove_duplicates_w_dict(P_w,w_dict,params,model)
return w_dict | 21c2de003cca0165b5404431178450bf6e6c549c | 17,341 |
def geocode_mapping(row, aian_ranges, aian_areas, redefine_counties, strong_mcd_states):
"""
Maps an RDD row to a tuple with format (state, AIAN_bool, AIANNHCE, county, place/MCD, tract, block), where
place/MCD is the five digit MCD in MCD-strong states and 5 digit place otherwise
AIAN_bool is '1' if the block is inside the AIAN area and '0' otherwise.
:param row: An RDD row with format (state, AIANNHCE, county, place, MCD, tract, block)
:param aian_ranges: a dictionary with keys given by the AIAN type and values given by a tuple with two elements
that indicate the starting and ending AIANNHCE values for the AIAN area catagory.
:param aian_areas: a specification of AIANNHCE code groups that should be used to define AIAN areas; see also
make_grfc_ids().
:param redefine_counties: specifies that counties inside of AIAN areas should be redefined as incorporated places or
MCDs "in_strong_MCDs", "everywhere", or "nowhere"
:param strong_mcd_states: a tuple of the state geoids that are strong MCD states
:return res: a tuple with format (state, AIAN_bool, AIANNHCE, county, place/MCD, tract, block)
"""
state, aiannhce, county, place, cousub, tract, block = row
county = '10' + county
is_strong_MCD = state in strong_mcd_states
# The following AIANNHCE values are not in the universe of possible AIANNHCE codes:
assert aiannhce not in [str(x) for x in range(4990, 5000)], "AIANNHCE codes cannot be between 4990 and 4999"
if aiannhce == '9999':
# Not in any of the AIAN area catagories:
aian = '0'
else:
# Check if AIAN area catagory is included in the user's specification of AIAN areas:
for aian_definition, aian_range in aian_ranges.items():
if aiannhce <= aian_range[1] and aiannhce >= aian_range[0]:
aian = '1' if aian_definition in aian_areas else '0'
# If the user wishes to bypass from the county geounit to the individual AIAN areas, do so here:
if aian_definition in aian_areas and ((redefine_counties == 'in_strong_MCDs' and is_strong_MCD) or redefine_counties == 'everywhere'):
county = '0' + aiannhce
break
# An alternative would be to remove the second condition in the next if statement to increase accuracy in MCDs:
if is_strong_MCD and aian == '0':
mcd_or_place = cousub
else:
mcd_or_place = place
das_aian_area_code = aiannhce if (aian == '1') else '9999'
return state, aian, county, mcd_or_place, tract, block, das_aian_area_code | 6d2dcd7aa5acb5bff71120d957f520d0eec79790 | 17,342 |
from typing import Dict
def get_stan_input(
scores: pd.DataFrame,
priors: Dict,
likelihood: bool,
) -> Dict:
"""Get an input to cmdstanpy.CmdStanModel.sample.
:param measurements: a pandas DataFrame whose rows represent measurements
:param model_config: a dictionary with keys "priors", "likelihood" and
"x_cols".
"""
return {
**priors,
**{
"N": len(scores),
"N_skater": scores["name"].nunique(),
"N_grade": N_GRADE,
"skater": one_encode(scores["name"]).values,
"y": scores["score"].astype(int).add(6).values,
"N_test": len(scores),
"skater_test": one_encode(scores["name"]).values,
"y_test": scores["score"].astype(int).add(6).values,
"likelihood": int(likelihood),
},
} | d8e11401c1c86bb3306652f6f3b1aaebe47ef2d8 | 17,343 |
def get_mph(velocity):
"""
Returns
-------
convert m/s to miles per hour [mph].
"""
velocity = velocity * 3600 /1852
return velocity | f4a1922712ef2d8cfeba5650f410405956a39c31 | 17,344 |
import json
def _load_jsonl(input_path) -> list:
"""
Read list of objects from a JSON lines file.
"""
data = []
with open(input_path, 'r', encoding='utf-8') as f:
for line in f:
data.append(json.loads(line.rstrip('\n|\r')))
print('[LoadJsonl] Loaded {} records from {}'.format(len(data), input_path))
return data | 2cd35ff8afa7c325688046165517746e2b120b77 | 17,345 |
import types
import importlib
def reload(name: str) -> types.ModuleType:
"""
Finalize and reload a plugin and any plugins that (transitively) depend on it. We try to run all finalizers in
dependency order, and only load plugins that were successfully unloaded, and whose dependencies have been
successfully reloaded. If a plugin fails to initialize, we run any finalizers it managed to register, and the plugin
is not loaded. Any exceptions raised will be reraised together. Returns the module object of the requested plugin if
successful.
"""
reloads = deps.subgraph_paths_to(name)
logger.info("Reloading {} with dependencies: {}".format(name,
", ".join(dep for dep in reloads.topo_sort_fwd() if dep != name)))
unload_success = set()
reload_success = set()
unload_gen = reloads.topo_sort_fwd()
reload_gen = reloads.topo_sort_bck()
def cont_reload() -> None:
try:
for dep in reload_gen:
if dep == name:
continue
elif dep not in unload_success:
logger.info("Not reloading {} because it was not unloaded properly".format(name))
elif not all(m in reload_success
for m in reloads.edges_from(dep)):
logger.info("Not reloading {} because its dependencies were not reloaded properly".format(name))
else:
importlib.import_module(dep)
reload_success.add(dep)
except:
cont_reload()
raise
def cont_unload() -> types.ModuleType:
try:
for dep in unload_gen:
if dep == name:
continue
unsafe_unload(dep)
unload_success.add(dep)
except:
cont_unload()
raise
try:
unsafe_unload(name)
except:
cont_reload()
raise
try:
ret = importlib.import_module(name)
reload_success.add(name)
finally:
cont_reload()
return ret
return cont_unload() | ea00d2139b51e80239960f61c0dc91dfe45de7d9 | 17,346 |
import json
def load_from_config(config_path, **kwargs):
"""Load from a config file. Config options can still be overwritten with kwargs"""
with open(config_path, "r") as config_file:
config = json.load(config_file)
config.update(kwargs)
return TokenizationConfig(**config) | 66ea64a334b265ae216413a043044767da0fd61c | 17,347 |
import collections
def get_tecogan_monitors(monitor):
"""
Create monitors for displaying and storing TECOGAN losses.
"""
monitor_vgg_loss = MonitorSeries(
'vgg loss', monitor, interval=20)
monitor_pp_loss = MonitorSeries(
'ping pong', monitor, interval=20)
monitor_sum_layer_loss = MonitorSeries(
'd layer loss', monitor, interval=20)
monitor_adv_loss = MonitorSeries(
'adversarial loss', monitor, interval=20)
monitor_disc_loss = MonitorSeries(
'discriminator loss', monitor, interval=20)
monitor_tb = MonitorSeries(
'tb', monitor, interval=20)
Monitor_tecogan = collections.namedtuple('Monitor_tecogan',
['monitor_vgg_loss', 'monitor_pp_loss', 'monitor_sum_layer_loss',
'monitor_adv_loss', 'monitor_disc_loss', 'monitor_tb'])
return Monitor_tecogan(monitor_vgg_loss, monitor_pp_loss, monitor_sum_layer_loss, monitor_adv_loss, monitor_disc_loss, monitor_tb) | 472605e4ff7a0e487fd868a573fbecf5acd977ba | 17,348 |
def user_based_filtering_recommend(new_user,user_movies_ids,movies_num,n_neighbor,movies_ratings):
""" This function return number of recommended movies based on user based filtering using
cosine similarity to find the most similar users to the new user
it returns movies_num of movies from the top ranked movies of n_neighbour users
who are the most similar to the new user"""
#pivot the dataframe
users_inDB = movies_ratings.pivot_table(index='userId', columns='movieId', values='rating')
list_id_movies = movies_ratings['movieId'].unique()
new_user_vector = pd.DataFrame(new_user, index=list_id_movies).T
#fill Nans with 3 rating
users_inDB = users_inDB.fillna(3.0)
new_user_vector_filled = new_user_vector.fillna(3.0)
#for cosine similarity we have to center the data in order to have a magnitude(0-1)
users_inDB = (users_inDB - 3.0)/2.0
new_user = (new_user_vector_filled - 3.0)/2.0
#label the new user that we want to recommend for:
new_user.index=['new_user']
#add the new use to the original df
users_matrix = pd.concat([users_inDB,new_user])
#calculate cosine similarity
users_similarity_matrix = cosine_similarity(users_matrix)
users_similarity_matrix = pd.DataFrame(users_similarity_matrix,index=users_matrix.index,columns=users_matrix.index)
#we get here (users_num*users_num) similarity matrix
#print(users_matrix_similarity)
# get the new user similarities row: except the last column value(similarity with himself=1)
new_user_similarity = users_similarity_matrix['new_user'].iloc[:-1]
# take the n_neighbors nearest users (N users who have the most similarity with the new user)
similar_users = new_user_similarity.nlargest(n_neighbor).index.values
#print(similar_users)
#we will get (movies_num*n_neighbor*2) movies to choose
recommended_movieIds = []
scores = []
for user in similar_users:
recommended_movieIds.extend(users_inDB.loc[user].nlargest(movies_num*2).index)
scores.extend(users_inDB.loc[user].nlargest(movies_num*2).values)
recommended_movies_dic = {'movie_id':recommended_movieIds,'score':scores}
recommended_movies_df = pd.DataFrame(recommended_movies_dic)
#print(recommended_movies_df)
#Shuffle the movies
recommended_movies_df = sklearn.utils.shuffle(recommended_movies_df)
#Order movies by score
recommended_movies_df = recommended_movies_df.sort_values(by='score',ascending=False)
recommended_movies_ids = recommended_movies_df['movie_id'].unique()
#get the final recommendation: retrn movies_num of movies which the user hasn't rated
top_recommended_movies = []
for movie_id in recommended_movies_ids:
if (movie_id not in user_movies_ids) and (len(top_recommended_movies) < movies_num) :
top_recommended_movies.append(movie_id)
#finally return the movies titles
top_recommended_movies = movieId_to_title(top_recommended_movies,movies_ratings)
return top_recommended_movies | 4fa86b9966024e0d89969566d85ccf0b0a44bfcc | 17,349 |
def query_ps_from_wcs(w):
"""Query PanStarrs for a wcs.
"""
nra,ndec = w.array_shape[1:]
dra,ddec = w.wcs.cdelt[:2]
c = wcs.utils.pixel_to_skycoord(nra/2.,ndec/2.,w)
ddeg = np.linalg.norm([dra*nra/2,ddec*ndec/2])
pd_table = query(c.ra.value,c.dec.value,ddeg)
# Crop sources to those in the cube limits
scat = wcs.utils.skycoord_to_pixel(
SkyCoord(pd_table['raMean'],pd_table['decMean'], unit="deg"),
w,
origin=0,
mode='all'
)
mask = (scat[0] < nra)*(scat[1] < ndec)*(scat[0] > 0)*(scat[1] > 0)
pd_table = pd_table[mask]
pd_table['x'] = scat[0][mask]
pd_table['y'] = scat[1][mask]
return pd_table | 806baf87722213ab021e1e3889322539069a3b55 | 17,350 |
import torch
def permute(x, in_shape='BCD', out_shape='BCD', **kw):
""" Permute the dimensions of a tensor.\n
- `x: Tensor`; The nd-tensor to be permuted.
- `in_shape: str`; The dimension shape of `x`. Can only have characters `'B'` or `'C'` or `'D'`,
which stand for Batch, Channel, or extra Dimensions. The default value `'BCD'` means
the input tensor `x` should be at lest 2-d with shape `(Batch, Channel, Dim0, Dim1, Dim2, ...)`,
where `Dim0, Dim1, Dim2 ...` stand for any number of extra dimensions.
- `out_shape: str or tuple or None`; The dimension shape of returned tensor. Default: `'BCD'`.
If a `str`, it is restricted to the same three characters `'B'`, `'C'` or `'D'` as the `in_shape`.
If a `tuple`, `in_shape` is ignored, and simply `x.permute(out_shape)` is returned.
If `None`, no permution will be performed.
- `return: Tensor`; Permuted nd-tensor. """
if (in_shape == out_shape) or (out_shape is None):
return x
if isinstance(out_shape, (list, tuple, torch.Size)):
return x.permute(*out_shape)
if isinstance(in_shape, str) and isinstance(out_shape, str) :
assert set(in_shape) == set(out_shape) <= {'B', 'C', 'D'}, 'In and out shapes must have save set of chars among B, C, and D.'
in_shape = in_shape.lower().replace('d', '...')
out_shape = out_shape.lower().replace('d', '...')
return torch.einsum(f'{in_shape}->{out_shape}', x)
return x | e74594df581c12891963e931999563374cd89c7d | 17,351 |
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=False, bottom=False,
labeltop=True, labelbottom=True)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=0, ha="center",
rotation_mode="anchor")
# Turn spines off and create white grid.
ax.spines[:].set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", axis="x", color="black", linestyle='--', linewidth=1)
ax.grid(which="minor", axis="y", color="black", linestyle='-', linewidth=3)
ax.tick_params(which="minor", top=False, left=False)
return im | 51c60139f9f2668f8ba31859c036f48a3e8faf63 | 17,352 |
import zlib
import struct
def assert_is_normal_rpyc(f):
"""
Analyze the structure of a single rpyc file object for correctness.
Does not actually say anything about the _contents_ of that section, just that we were able
to slice it out of there.
If succesful, returns the uncompressed contents of the first storage slot.
"""
f.seek(0)
header = f.read(1024)
f.seek(0)
if header[:10] != "RENPY RPC2":
# either legacy, or someone messed with the header
# assuming legacy, see if this thing is a valid zlib blob
raw_data = f.read()
f.seek(0)
try:
uncompressed = zlib.decompress(raw_data)
except zlib.error:
raise ValueError("Did not find RENPY RPC2 header, but interpretation as legacy file failed")
return uncompressed
else:
if len(header) < 46:
# 10 bytes header + 4 * 9 bytes content table
return ValueError("File too short")
a,b,c,d,e,f,g,h,i = struct.unpack("<IIIIIIIII", header[10: 46])
# does the header format match default ren'py generated files?
if not (a == 1 and b == 46 and d == 2 and (g, h, i) == (0, 0, 0) and b + c == e):
return ValueError("Header data is abnormal, did the format gain extra fields?")
f.seek(b)
raw_data = f.read(c)
f.seek(0)
if len(raw_data) != c:
return ValueError("Header data is incompatible with file length")
try:
uncompressed = zlib.decompress(raw_data)
except zlib.error:
return ValueError("Slot 1 did not contain a zlib blob")
if not uncompressed.endswith("."):
return ValueError("Slot 1 did not contain a simple pickle")
return uncompressed | f7db901dd99b0ac9036d6569093068e8f6b3e675 | 17,353 |
def substract_li(cfg, data, lats, lons, future_exp):
"""Difference between historical and future fields."""
pathlist = data.get_path_list(short_name='pr', exp='historical')
ar_diff_rain = np.zeros((len(lats), len(lons), len(pathlist)))
mism_diff_rain = np.zeros(len(pathlist))
mwp_hist_rain = np.zeros(len(pathlist))
ar_hist_rain = np.zeros((len(lats), len(lons), len(pathlist)))
ar_diff_ua = np.zeros((len(lats), len(lons), len(pathlist)))
ar_diff_va = np.zeros((len(lats), len(lons), len(pathlist)))
datasets = []
for iii, dataset_path in enumerate(pathlist):
# Substract historical experiment from rcp85 experiment
datasets.append(data.get_info(n.DATASET, dataset_path))
ar_diff_rain[:, :, iii] = (data.get_data(short_name='pr',
exp=future_exp,
dataset=datasets[iii]) -
data.get_data(short_name='pr',
exp='historical',
dataset=datasets[iii])) / \
(data.get_data(short_name='ts',
exp=future_exp, dataset=datasets[iii]) -
data.get_data(short_name='ts',
exp='historical', dataset=datasets[iii]))
# ISM (60◦ –95◦ E, 10◦ –30◦ N)
mism_diff_rain[iii] = \
np.mean((ar_diff_rain[:,
get_latlon_index(lons, 60, 95),
iii])[get_latlon_index(lats, 10, 30), :])
ar_hist_rain[:, :, iii] = data.get_data(
short_name='pr', exp='historical', dataset=datasets[iii])
# Western pacific (140◦ E–170◦ W, 12◦ S–12◦ N)
mwp_hist_rain[iii] = \
np.mean((ar_hist_rain[:,
get_latlon_index(lons, 140, 170),
iii])[get_latlon_index(lats, -12, 12), :])
ar_diff_ua[:, :, iii] = (data.get_data(short_name='ua',
exp=future_exp,
dataset=datasets[iii]) -
data.get_data(short_name='ua',
exp='historical',
dataset=datasets[iii])) / \
(data.get_data(short_name='ts',
exp=future_exp, dataset=datasets[iii]) -
data.get_data(short_name='ts',
exp='historical', dataset=datasets[iii]))
ar_diff_va[:, :, iii] = (data.get_data(short_name='va',
exp=future_exp,
dataset=datasets[iii]) -
data.get_data(short_name='va',
exp='historical',
dataset=datasets[iii])) / \
(data.get_data(short_name='ts',
exp=future_exp, dataset=datasets[iii]) -
data.get_data(short_name='ts',
exp='historical', dataset=datasets[iii]))
plot_rain_and_wind(cfg, datasets[iii],
{'ar_diff_rain': ar_diff_rain[:, :, iii],
'ar_diff_ua': ar_diff_ua[:, :, iii],
'ar_diff_va': ar_diff_va[:, :, iii],
'lats': lats, 'lons': lons}, future_exp)
return {
"datasets": datasets,
"ar_diff_rain": ar_diff_rain,
"ar_diff_ua": ar_diff_ua,
"ar_diff_va": ar_diff_va,
"ar_hist_rain": ar_hist_rain,
"mism_diff_rain": mism_diff_rain,
"mwp_hist_rain": mwp_hist_rain
} | 40506221fbdf5a9b0e2174e0fe144958dd57c93b | 17,355 |
def identify_jobs_to_update(file_path, jobs):
"""identify jobs to update."""
name_map = {}
for job in jobs:
cluster = get_desired_cluster(file_path, job)
if cluster != job.get("cluster", ""):
name_map[job["name"]] = cluster
return name_map | be9b8bd38ed90c96ac185195a79a43ffbec5e7d5 | 17,356 |
def bootstrap_storage_bucket(project_id, bucket_name, google_credentials):
"""
Bootstrap the bucket used to store Terraform state for projects.
Args:
project_id:
The ID of the project to create the bucket in.
bucket_name:
The name of the bucket to create.
google_credentials:
The credentials authorizing the creation of the bucket.
Returns:
An object containing information about the bucket.
"""
print(f"Attempting to retrieve existing bucket: {bucket_name}'")
service = googleapiclient.discovery.build(
"storage", "v1", credentials=google_credentials
)
request = service.buckets().get(bucket=bucket_name)
try:
bucket = request.execute()
print("Bucket exists.\n")
return bucket
except googleapiclient.errors.HttpError as e:
if e.resp['status'] != '404':
raise
print("Bucket does not exist yet. Creating it...")
bucket_body = {
"name": bucket_name,
"versioning": {
"enabled": True,
},
}
request = service.buckets().insert(
body=bucket_body,
predefinedAcl="projectPrivate",
predefinedDefaultObjectAcl="projectPrivate",
project=project_id
)
bucket = request.execute()
print("Done.\n")
return bucket | acdd72fbcb160d5c6347f1f41b6661fcf28ebdc2 | 17,357 |
def ValidateBucketForCertificateAuthority(bucket_name):
"""Validates that a user-specified bucket can be used with a Private CA.
Args:
bucket_name: The name of the GCS bucket to validate.
Returns:
A BucketReference wrapping the given bucket name.
Raises:
InvalidArgumentException: when the given bucket can't be used with a CA.
"""
messages = storage_util.GetMessages()
client = storage_api.StorageClient(messages=messages)
try:
bucket = client.GetBucket(
bucket_name,
messages.StorageBucketsGetRequest.ProjectionValueValuesEnum.full)
if not _BucketAllowsPublicObjectReads(bucket):
# Show a warning but don't fail, since this could be intentional.
log.warning(
'The specified bucket does not publicly expose new objects by '
'default, so some clients may not be able to access the CA '
'certificate or CRLs. For more details, see '
'https://cloud.google.com/storage/docs/access-control/making-data-public'
)
return storage_util.BucketReference(bucket_name)
except storage_api.BucketNotFoundError:
raise exceptions.InvalidArgumentException(
'gcs-bucket', 'The given bucket does not exist.') | b28e501b7747f8a4d417b156c2e627d8ca524aee | 17,358 |
def load_train_val(seq_len, batch_size, dataset="hollywood2"):
"""
This returns two dataloaders correponding to the train and validation sets. Each
iterator yields tensors of shape (N, 3, L, H, W) where N is the batch size, L is
the sequence length, and H and W are the height and width of the frame.
The batch size is always 1 in the validation set. The frames are always cropped
to (128, 128) windows in the training set. The frames in the validation set are
not cropped if they are smaller than 360x480; otherwise, they are cropped so the
maximum returned size is 360x480.
"""
train = DataLoader(VideoDataset(
"%s/train" % dataset,
crop_size=(160, 160),
seq_len=seq_len,
), shuffle=True, num_workers=16, batch_size=batch_size, pin_memory=True)
val = DataLoader(VideoDataset(
"%s/val" % dataset,
crop_size=False,
seq_len=seq_len,
), shuffle=False, batch_size=1, pin_memory=True)
return train, val | 628a2c0db01b30c4736e482dbc81789afcbdc92a | 17,359 |
import json
import yaml
def read_params_file(config_path: str) -> json:
"""Read the and open the params.yaml file
Args:
config_path (str): yaml config file
Returns:
yaml: yaml file
"""
with open(config_path) as yaml_file:
config = yaml.safe_load(yaml_file)
return config | b8a4bf0f70d1b4e2096ebd6d96568fc7ee757e16 | 17,361 |
import re
def fullmatch(regex, string, flags=0):
"""Emulate python-3.4 re.fullmatch()."""
matched = re.match(regex, string, flags=flags)
if matched and matched.span()[1] == len(string):
return matched
return None | 72de0abe5c15dd17879b439562747c9093d517c5 | 17,362 |
from typing import Any
from typing import Type
import inspect
def _is_class(module: Any, member: Type, clazz: Type) -> bool:
"""
Validates if a module member is a class and an instance of a CoreService.
:param module: module to validate for service
:param member: member to validate for service
:param clazz: clazz type to check for validation
:return: True if a valid service, False otherwise
"""
if not inspect.isclass(member):
return False
if not issubclass(member, clazz):
return False
if member.__module__ != module.__name__:
return False
return True | 5792fadcc93068fa8d7050de7d84ee2bbe1fb0f1 | 17,364 |
def word_boundary(queries, count, degree, parallel=True, **kwargs):
"""
run augmentation on list of sentences
:param queries: sentences to augment
:type queries: list
:param count: number of output for each query
:type count: int
:param degree: degree of augmentation, takes value between 0 and 1
:type degree: float
:param parallel: run in parallel
:type parallel: bool
:param kwargs:
:return:
"""
if parallel:
function = partial(
__word_boundary__,
**kwargs,
degree=degree,
count=count,
)
return run_parallel(queries, function)
else:
return [
__word_boundary__(word, degree=degree, count=count, **kwargs)
for word in queries
] | 7ca4172d2900c773322d54380bde6780f2580597 | 17,365 |
def myFunction(objectIn):
"""What you are supposed to test."""
return objectIn.aMethodToMock() + 2 | 1907db338a05f2d798ccde63366d052404324e6f | 17,366 |
def read_config(filename, section):
""" Reads a section from a .ini file and returns a dict object
"""
parser = ConfigParser()
parser.read(filename)
dic = {}
if parser.has_section(section):
items = parser.items(section)
for item in items:
dic[item[0]] = item[1]
else:
raise Exception('{0} not found in the {1} file'.format(section, filename))
return dic | 3eb84afc13b0ad40bcaf434d4a38712cedb4502a | 17,367 |
def get_training_set_count(disc):
"""Returns the total number of training sets of a discipline and all its
child elements.
:param disc: Discipline instance
:type disc: models.Discipline
:return: sum of training sets
:rtype: int
"""
training_set_counter = 0
for child in disc.get_descendants(include_self=True):
training_set_counter += child.training_sets.count()
return training_set_counter | 9b28a9e51e04b559f05f1cc0255a6c65ca4a0980 | 17,368 |
def lazy_import(module_name, callback=None):
"""Returns a proxy module object that will lazily import the given module the first
time it is used.
Example usage::
# Lazy version of `import tensorflow as tf`
tf = lazy_import("tensorflow")
# Other commands
# Now the module is loaded
tf.__version__
Args:
module_name: the fully-qualified module name to import
callback (None): a callback function to call before importing the
module
Returns:
a proxy module object that will be lazily imported when first used
"""
return LazyModule(module_name, callback=callback) | bc94a18b4a8a2714d2cffd743de2a202ecb5af78 | 17,370 |
def _top_k(array, k):
"""Returns top k values and their indices along the last axis of the array.
This function serves the same purpose as jax.lax.top_k, but in a more XLA
friendly manner for TPUs:
(1) On TPUs, we use one-hot matrix multiplications to select the top k values.
This convoluted way of obtaining the top k values is generally faster on
TPUs.
(2) Otherwise, we fall back to jax.lax.top_k (and its underlying scatter op).
Args:
array: Source array.
k: Number of top values to select.
Returns:
- Top k values
- Associated top k indices.
"""
if _favor_one_hot_slices():
top_k_indices = jax.lax.top_k(array, k)[-1]
top_k_values = _take_along_axis(array, top_k_indices, axis=-1)
return top_k_values, top_k_indices
else:
return jax.lax.top_k(array, k) | 74c7c705b6b972d227c10146f0b5209f62c1d59f | 17,371 |
def time_to_accuracy(raw_metrics, tag, threshold):
"""Calculate the amount of time for accuracy to cross a given threshold.
Args:
raw_metrics: dict mapping TensorBoard tags to list of MetricPoint.
tag: string name of accuracy metric.
threshold: the desired model accuracy.
Returns:
float, amount of time in seconds to reach the desired accuracy.
"""
values = raw_metrics.get(tag)
if not values:
raise ValueError('No values found for time to accuracy tag: {}. '
'Possible tags were: {}'.format(tag, raw_metrics.keys()))
# MetricPoints should be sorted by timestamp with earlier events first.
start_wall_time = values[0].wall_time
try:
end_wall_time = next(
v.wall_time for v in values
if v.metric_value >= threshold)
return MetricPoint(end_wall_time - start_wall_time, end_wall_time)
except StopIteration:
max_accuracy = max(v.metric_value for v in values)
raise ValueError(
'Accuracy metric `{}` was never high enough to satisfy the '
'`time_to_accuracy` settings from the config. Max accuracy: {}. '
'Target accuracy: {}. Config for `time_to_accuracy`: {}'.format(
tag, max_accuracy, threshold)) | 5ce2727a538a25f195c0d9ab3de2c2dcdbb56f88 | 17,372 |
def create_stencil(image_shape, smooth):
"""The stencil is a mask that will enable a smooth transition between blocks. blocks will be multiplied
by the stencil so that when they are blitted to the image, transition between them are smoothed out.
image 1: 1 1 1 1 1 1 1 , image 2: 2 2 2 2 2 2 2, stencil: .25 .75 1 1 1 .75 .25
image 1 * stencil: .25 .75 1 1 1 .75 .25
image 2 * stencil: .5 1.5 2 2 2 1.5 .5
adding them: .25 .75 1 1 1 1.25 1.75 2 2 2 1.5 .5
"""
stencil = np.ones(image_shape, dtype=np.float32)
# 2 * smooth because we need to blend the inside of the block with the outside of the other block
# for smooth = 4, i1; inside image 1, o1: outside image 1
# o1 o1 o1 o1 | i1 i1 i1 i1
# i1 i1 i1 i1 | o1 o1 o1 o1
factors = np.linspace(0, 1, 2*smooth+1, endpoint=False)[1:]
for i, f in enumerate(factors):
stencil[i, :, :] *= f
stencil[:, i, :] *= f
for i, f in enumerate(factors):
stencil[image_shape[0] - i - 1, :, :] *= f
stencil[:, image_shape[1] - i - 1, :] *= f
return stencil | 49aca2fb63ea6bef134c0872520fd203ce21bfef | 17,373 |
def a_m_to_P(a, m):
"""Compute the orbital period given the semi-major axis and total mass.
Parameters
----------
{a}
{m}
"""
return 2*np.pi * np.sqrt(a**3 / (G * m)) | 734332ff83c06830388ceeecd64315ee738756f1 | 17,374 |
def _async_attr_mapper(attr_name, val):
"""The `async` attribute works slightly different than the other bool
attributes. It can be set explicitly to `false` with no surrounding quotes
according to the spec."""
if val in [False, 'False']:
return ' {}=false'.format(attr_name)
elif val:
return ' {}'.format(attr_name)
else:
return '' | 79e72067b244d705df9aa09a78db656f0847938c | 17,375 |
from typing import Any
from typing import Type
def wrap(val: Any) -> Value:
"""Wraps the given native `val` as Protobuf `Value` message.
Supports converting collection/array of primitives types to `Value` message:
* numpy array of primitives.
* list of primitives.
* generator of finite no. of primitives.
Generally, wrapping only supports wrapping of collection of primitives
if all primitives share the same native primitive types. However, some
native type mixing is allowed as supported by `np.asarray()`, although
doing so is not recommended.
If the given `val` is already a Protobuf `Value` message, returns `val` as is.
Args:
val: The native value to wrap as a protobuf message. The value should
be native primitive, array of primitives.
Returns:
Wrapped `Value` protobuf message.
Throws:
TypeError: If the given native value is not of a supported type.
"""
# return as is if val is already value protobuf
if isinstance(val, Value):
return val
# try to wrap value as primitive
try:
return wrap_primitive(val)
except TypeError:
pass
# check that we are not trying to convert None
if val is None:
raise TypeError("Wrapping None is Value proto is not supported")
# extract values from if generator
if isgenerator(val):
val = list(val)
# extract flatten list of primitive protos from collect of primitives
val_arr = np.asarray(val)
primitives = [wrap_primitive(v) for v in val_arr.flatten()]
# resolve element data type and build value proto
element_type = primitives[0].data_type.primitive
return Value(
data_type=Type(
array=Type.Array(
dimensions=val_arr.shape,
element_type=element_type,
)
),
array=Value.Array(values=[p.primitive for p in primitives]),
) | 9208a2afd7b256ec791044531b13fe8c8b9fa2c8 | 17,376 |
def to_transform_msg(transform):
"""Convert a `Transform` object to a Transform message."""
msg = geometry_msgs.msg.Transform()
msg.translation = to_vector3_msg(transform.translation)
msg.rotation = to_quat_msg(transform.rotation)
return msg | c471ec8dfed03caa9f7096ab3294589477cf6d39 | 17,377 |
def print_pos_neg(num):
"""Print if positive or negative in polarity level
>>> print_pos_neg(0.8)
'positive'
>>> print_pos_neg(-0.5)
'negative'
"""
if num > 0:
return "positive"
elif num == 0:
return "neutral"
else:
return "negative" | 414aa98f54a2f01af24d591ae47ec4f394adf682 | 17,378 |
def delete_volume_op(name: str, namespace: str):
"""
Creates a kfp.dsl.ContainerOp that deletes a volume (Kubernetes Resource).
Parameters
----------
name : str
namespace : str
Returns
-------
kfp.dsl.ContainerOp
"""
kind = "PersistentVolumeClaim"
return kubernetes_resource_delete_op(
name=f"vol-{name}",
kind=kind,
namespace=namespace,
) | d947905e01de29061895512fbfd1fbefb024110d | 17,379 |
def distal(combo):
""" Returns the distal subspecies from a combo
:param combo: int representation of origin combination
:return: int representation of the distal origin
>>> distal(combine(CAS, DOM)) == DOM
True
"""
return combo & _DISTAL_MASK | 163875c1b4b081027344a3bc1f05bd0cb60a58d8 | 17,380 |
def get_eval_dataset(files, ftDict, axes = [2], splits = None, one_hot = None, moments = None, **kwargs):
"""
Get the preprocessed evaluation dataset
Args:
files (list): list of tfrecords to be used for evaluation
Returns:
A tf.data.Dataset of evaluation data.
"""
dataset = get_dataset(files, ftDict, axes, splits, one_hot, moments, **kwargs)
dataset = dataset.batch(1)
return dataset | 73476bf1273923e77bf5f4e6d415191cf83023cc | 17,381 |
def getTopApSignals(slot_to_io):
""" HLS simulator requires that there is an ap_done at the top level """
# find which slot has the s_axi_control
for slot, io_list in slot_to_io.items():
if any('s_axi' in io[-1] for io in io_list):
# note the naming convention
ap_done_source = [f'{io[-1]}_in' for io in io_list if 'ap_done' in io[-1]]
ap_start_source = [f'{io[-1]}_out' for io in io_list if 'ap_start' in io[-1]]
top_ap_signals = []
top_ap_signals.append(f'wire ap_done = ' + ' & '.join(ap_done_source) + ';')
top_ap_signals.append('wire ap_idle = ap_done;')
top_ap_signals.append('wire ap_ready = ap_done;')
top_ap_signals.append(f'wire ap_start = {ap_start_source[0]};') # only need 1 ap_start
return top_ap_signals
assert False | e40a8fb7797653ee7414c0120ceb29e49e9dfd84 | 17,382 |
def get_line_style(image: Image = None) -> int:
"""
Get line style of the specified image.
The line style will be used when drawing lines or shape outlines.
:param image: the target image whose line style is to be gotten. None means it is the target image
(see set_target() and get_target())
:return: line style used by the specified image
"""
image = _get_target_image(image)
return image.get_line_style() | cc1b9285fbd3b168f40e66969e0a4b1ae9ee234a | 17,383 |
def make_polygon_for_earth(lat_bottom_left, lon_bottom_left, lat_top_right, lon_top_right):
"""
Divides the region into two separate regions (if needed) so as to handle the cases where the regions
cross the international date
:param lat_bottom_left: float (-90 to 90)
:param lon_bottom_left: float (-180 to 180)
:param lat_top_right: float (-90 to 90)
:param lon_top_right: float (-180 to 180)
:return:
------------ <-----(lon top right, lat top right)
| |
| |
| |
| |
------------
^
|
---- (lon bottom left, lat bottom left)
"""
focus_regions = []
# case where region starts around 180 longitude and then wraps around to -180 longitude (complete cylinder)
# international date line crossed
if lon_bottom_left > lon_top_right: # overlap of latitudes
# we need two polygons.
focus_region1 = Polygon([
[lon_bottom_left, lat_bottom_left],
[lon_bottom_left, lat_top_right],
[180, lat_top_right],
[180, lat_bottom_left]])
focus_region2 = Polygon([
[-180, lat_bottom_left],
[-180, lat_top_right],
[lon_top_right, lat_top_right],
[lon_top_right, lat_bottom_left]])
focus_regions = [focus_region1, focus_region2]
else: # international dateline not crossed
focus_region1 = Polygon([
[lon_bottom_left, lat_bottom_left],
[lon_bottom_left, lat_top_right],
[lon_top_right, lat_top_right],
[lon_top_right, lat_bottom_left]])
focus_regions = [focus_region1]
return focus_regions | 6f73cc35c11cd16eea0c80aa7921ff1680ee75b6 | 17,384 |
def first_nonzero_coordinate(data, start_point, end_point):
"""Coordinate of the first nonzero element between start and end points.
Parameters
----------
data : nD array, shape (N1, N2, ..., ND)
A data volume.
start_point : array, shape (D,)
The start coordinate to check.
end_point : array, shape (D,)
The end coordinate to check.
Returns
-------
coordinates : array of int, shape (D,)
The coordinates of the first nonzero element along the ray, or None.
"""
shape = np.asarray(data.shape)
length = np.linalg.norm(end_point - start_point)
length_int = np.round(length).astype(int)
coords = np.linspace(start_point, end_point, length_int + 1, endpoint=True)
clipped_coords = np.clip(np.round(coords), 0, shape - 1).astype(int)
nonzero = np.flatnonzero(data[tuple(clipped_coords.T)])
if len(nonzero) == 0:
return None
else:
return clipped_coords[nonzero[0]] | 5db67cf49c3638a80695fd76a1a16eeec992d725 | 17,386 |
def l1_distance(prediction, ground_truth):
"""L1 distance difference between two vectors."""
if prediction.shape != ground_truth.shape:
prediction, ground_truth = np.squeeze(prediction), np.squeeze(ground_truth)
min_length = min(prediction.size, ground_truth.size)
return np.abs(prediction[:min_length] - ground_truth[:min_length]) | aaf79b386efa5f1b8726adda8d8e7dc66a502e87 | 17,387 |
from typing import Match
import base64
def decode(match_id: str) -> Match:
"""Decode a match ID and return a Match.
>>> decode("QYkqASAAIAAA")
Match(cube_value=2, cube_holder=<Player.ZERO: 0>, player=<Player.ONE: 1>, crawford=False, game_state=<GameState.PLAYING: 1>, turn=<Player.ONE: 1>, double=False, resign=<Resign.NONE: 0>, dice=(5, 2), length=9, player_0_score=2, player_1_score=4)
"""
match_bytes: bytes = base64.b64decode(match_id)
match_key: str = "".join([format(b, "08b")[::-1] for b in match_bytes])
return Match(
cube_value=2 ** int(match_key[0:4][::-1], 2),
cube_holder=Player(int(match_key[4:6][::-1], 2)),
player=Player(int(match_key[6])),
crawford=bool(int(match_key[7])),
game_state=GameState(int(match_key[8:11][::-1], 2)),
turn=Player(int(match_key[11])),
double=bool(int(match_key[12])),
resign=Resign(int(match_key[13:15][::-1], 2)),
dice=(int(match_key[15:18][::-1], 2), int(match_key[18:21][::-1], 2)),
length=int(match_key[21:36][::-1], 2),
player_0_score=int(match_key[36:51][::-1], 2),
player_1_score=int(match_key[51:66][::-1], 2),
) | a48fae652650d03259fd003af16add381f2729f3 | 17,388 |
def _valid_proto_paths(transitive_proto_path):
"""Build a list of valid paths to build the --proto_path arguments for the ScalaPB protobuf compiler
In particular, the '.' path needs to be stripped out. This mirrors a fix in the java proto rules:
https://github.com/bazelbuild/bazel/commit/af3605862047f7b553b7d2c19fa645714ea19bcf
This is explained in this issue: https://github.com/bazelbuild/rules_scala/issues/687
"""
return depset([path for path in transitive_proto_path if path != "."]) | cb834a58fa091249f16d5cdfccf536229dacd3d0 | 17,389 |
def update_stats_objecness(obj_stats, gt_bboxes, gt_labels, pred_bboxes, pred_labels, pred_scores, mask_eval=False,
affordance_stats=None, gt_masks=None, pred_masks=None, img_height=None, img_width=None, iou_thres=0.3):
"""
Updates statistics for object classification and affordance detection.
:param obj_stats: accumulated statistics for object classification
:param gt_bboxes: ground truth normalized bounding boxes (batch_size, num_gt_bboxes, 4)
:param gt_labels: ground truth labels for gt_boxes (batch_size, num_gt_bboxes)
:param pred_bboxes: predicted normalized bounding boxes (batch_size, num_pred_bboxes, 4)
:param pred_labels: predicted labels for pred_bboxes (batch_size, num_pred_bboxes)
:param pred_scores: predicted scores for pred_bboxes (batch_size, num_pred_bboxes)
:param mask_eval: True if there are predicted masks, False otherwise
:param affordance_stats: accumulated statistics for affordance evaluation
:param gt_masks: ground truth masks (batch_size, num_gt_bboxes, orig_mask_height, orig_mask_width)
:param pred_masks: predicted masks with prob for each pixel for each class (batch_size, num_pred_bboxes, train_mask_size, train_mask_size, num_affordance_classes)
:param img_height: image height
:param img_width: image width
:returns: jsons with updated statistics for object classification and affordance detection
"""
# create empty mask to accumulate masks for all bboxes in one single mask
final_gt_mask = np.zeros((img_height, img_width))
final_pred_mask = np.zeros((img_height, img_width))
# iou for each pred_bbox wrt each gt_box
iou_map, zero_iou = bbox_utils.generate_iou_map(pred_bboxes, gt_bboxes)
# update stats only if there are some iou that are not 0
if not zero_iou:
# take max iou for each pred_bbox and its corresponding gt_box indices
merged_iou_map = tf.reduce_max(iou_map, axis=-1)
max_indices_each_gt = tf.argmax(iou_map, axis=-1, output_type=tf.int32)
sorted_ids = tf.argsort(merged_iou_map, direction="DESCENDING")
# Add total of true labels for each class to stats
count_holder = tf.unique_with_counts(tf.reshape(gt_labels, (-1,)))
for i, gt_label in enumerate(count_holder[0]):
if gt_label == -1:
continue
# gt_label = int(gt_label)
if int(gt_label) > 0:
gt_label = 1
obj_stats[gt_label]["total"] += int(count_holder[2][i])
for batch_id, m in enumerate(merged_iou_map):
true_labels = []
for i, sorted_id in enumerate(sorted_ids[batch_id]):
pred_label = pred_labels[batch_id, sorted_id]
if pred_label == 0:
continue
iou = merged_iou_map[batch_id, sorted_id]
gt_id = max_indices_each_gt[batch_id, sorted_id]
gt_label = int(gt_labels[batch_id, gt_id])
pred_label = int(pred_label)
score = pred_scores[batch_id, sorted_id]
obj_stats[pred_label]["scores"].append(score)
obj_stats[pred_label]["tp"].append(0)
obj_stats[pred_label]["fp"].append(0)
if int(gt_label) > 0:
gt_label = 1
# correct detection
if iou >= iou_thres and pred_label == gt_label and gt_id not in true_labels:
obj_stats[pred_label]["tp"][-1] = 1
true_labels.append(gt_id)
if mask_eval:
final_gt_mask, final_pred_mask = update_final_masks(final_gt_mask, final_pred_mask, gt_bboxes[batch_id, gt_id],
gt_masks[batch_id, gt_id].numpy(), pred_masks[batch_id, sorted_id],
img_height, img_width)
else:
obj_stats[pred_label]["fp"][-1] = 1
if mask_eval:
affordance_stats = update_stats_affordances(affordance_stats, final_gt_mask, final_pred_mask)
return obj_stats, affordance_stats | c07d57921a6f3f3d2d97c9d84afb5dcbcb885ea6 | 17,390 |
from typing import Dict
from pathlib import Path
import inspect
import json
def load_schema(rel_path: str) -> Dict:
"""
Loads a schema from a relative path of the caller of this function.
:param rel_path: Relative path from the caller. e.g. ../schemas/schema.json
:return: Loaded schema as a `dict`.
"""
caller_path = Path((inspect.stack()[1])[1]).parent
fp = (caller_path / rel_path).resolve()
with open(fp, "r") as fh:
data = json.loads(fh.read())
return data | 297e0e01dd2f4af071ab99ebaf203ddb64525c89 | 17,391 |
def bquantize(x, nsd=3, abstol=eps, reltol=10 * eps):
"""Bidirectionally quantize a 1D vector ``x`` to ``nsd`` signed digits.
This method will terminate early if the error is less than the specified
tolerances.
The quantizer details are repeated here for the user's convenience:
The quantizer is ideal, producing integer outputs centered about zero.
Quantizers with an even number of levels are of the mid-rise type and
produce outputs which are odd integers. Quantizers with an odd number
of levels are of the mid-tread type and produce outputs which are even
integers.
.. image:: ../doc/_static/quantizer_model.png
:align: center
:alt: Quantizer model
**Parameters:**
x : array_like or sequence
the data to be quantized.
nsd : int, optional
The number of signed digits.
abstol and reltol : floats, optional
If not supplied, the absolute tolerance and the relative
tolerance default to ``eps`` and ``10*eps``, resp.
**Returns:**
y : list
List of objects described below.
``y`` is a list of instances with the same length as ``x`` and the
following attributes:
* ``y[i].val`` is the quantized value in floating-point form,
* ``y[i].csd`` is a 2-by-nsd (or less) matrix containing
the powers of two (first row) and their signs (second row).
.. seealso::
:func:`bunquantize`, :func:`ds_quantize`
"""
n = x.shape[0] if isinstance(x, np.ndarray) else len(x)
#q = np.zeros((2*n, nsd)) in the original source #rep?
y = [empty() for i in range(n)]
offset = -np.log2(0.75)
for i in range(n):
xp = x[i]
y[i].val = 0.
y[i].csd = np.zeros((2, 0), dtype='int16')
for _ in range(nsd):
error = np.abs(y[i].val - x[i])
if error <= abstol and error <= np.abs(x[i]) * reltol: # rep? in the orig: or
break
p = mfloor(np.log2(np.abs(xp)) + offset)
p2 = 2 ** p
sx = np.sign(xp)
xp = xp - sx * p2
y[i].val = y[i].val + sx * p2
addme = np.array((p, sx)).reshape((2, 1))
y[i].csd = np.concatenate((y[i].csd, addme), axis=1)
return y | 2a2e5fb71f3198099a07d84e9ad83ba6849b38d0 | 17,392 |
def seg_to_bdry(seg, connectivity=1):
"""Given a borderless segmentation, return the boundary map."""
strel = generate_binary_structure(seg.ndim, connectivity)
return maximum_filter(seg, footprint=strel) != \
minimum_filter(seg, footprint=strel) | dc4e66a7e6f86d2984a23a2e7a7297403502b51d | 17,394 |
def depthwise_conv2d(x, filters, strides, padding, data_format="NHWC", dilations=1):
"""Computes a 2-D depthwise convolution given 4-D input x and filters arrays.
Parameters
----------
x
Input image *[batch_size,h,w,d]*.
filters
Convolution filters *[fh,fw,d]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list indicating the per-dimension
paddings.
data_format
NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
Returns
-------
ret
The result of the convolution operation.
"""
return _cur_framework(x).depthwise_conv2d(
x, filters, strides, padding, data_format, dilations
) | cc09b910d06b8fd9d1b5b00a80c6d376cf7f6005 | 17,395 |
def OUTA():
"""
The OUTA Operation
"""
control_signal = gen_control_signal_dict()
opcode_addr = gen_opcode_addr_component_dict()
mc_step_addr = gen_microcode_step_addr_component_dict()
input_sig_addr = gen_input_signal_addr_component_dict()
templates = []
# Step 2 - A -> OUT
addresses = rom_programmer.combine_address_components([
mc_step_addr[2],
opcode_addr["OUTA"]
])
data = rom_programmer.combine_data_components([
control_signal["A_OUT"],
control_signal["OUT_IN"]
])
templates.append(rom_programmer.DataTemplate(addresses, data))
# Step 3: Reset microcode step
addresses = rom_programmer.combine_address_components([
mc_step_addr[3],
opcode_addr["OUTA"]
])
data = rom_programmer.combine_data_components([
control_signal["STEP_COUNTER_RESET"]
])
templates.append(rom_programmer.DataTemplate(addresses, data))
return templates | 3ebd5e74005316d3925eaa553c112df8a61eaf90 | 17,396 |
def incidence_matrices(G, V, E, faces, edge_to_idx):
"""
Returns incidence matrices B1 and B2
:param G: NetworkX DiGraph
:param V: list of nodes
:param E: list of edges
:param faces: list of faces in G
Returns B1 (|V| x |E|) and B2 (|E| x |faces|)
B1[i][j]: -1 if node i is tail of edge j, 1 if node i is head of edge j, else 0 (tail -> head) (smaller -> larger)
B2[i][j]: 1 if edge i appears sorted in face j, -1 if edge i appears reversed in face j, else 0; given faces with sorted node order
"""
B1 = np.array(nx.incidence_matrix(G, nodelist=V, edgelist=E, oriented=True).todense())
B2 = np.zeros([len(E),len(faces)])
for f_idx, face in enumerate(faces): # face is sorted
edges = [face[:-1], face[1:], [face[0], face[2]]]
e_idxs = [edge_to_idx[tuple(e)] for e in edges]
B2[e_idxs[:-1], f_idx] = 1
B2[e_idxs[-1], f_idx] = -1
return B1, B2 | 90a82132100bb6d2e867ee7460ad55c6891b9082 | 17,397 |
def get_hosts_ram_total(nova, hosts):
"""Get total RAM (free+used) of hosts.
:param nova: A Nova client
:type nova: *
:param hosts: A set of hosts
:type hosts: list(str)
:return: A dictionary of (host, total_ram)
:rtype: dict(str: *)
"""
hosts_ram_total = dict() #dict of (host, total_ram)
for host in hosts:
data = nova.hosts.get(host)
hosts_ram_total[host] = data[0].memory_mb
return hosts_ram_total | b913f9274339ab3ab976a17a8d07e5fe130b447d | 17,398 |
import re
import unicodedata
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-ascii characters,
and converts spaces to hyphens. For use in urls and filenames
From Django's "django/template/defaultfilters.py".
"""
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
if not isinstance(value, unicode):
value = unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value) | 471a3205c84baa55573b780375999a7658031b89 | 17,399 |
def wpr(c_close, c_high, c_low, period):
"""
William %R
:type c_close: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period: int
:rtype: (np.ndarray, np.ndarray)
"""
size = len(c_close)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
e = i + 1
s = e - period
mh = np.max(c_high[s:e])
out[i] = ((mh - c_close[i]) / (mh - np.min(c_low[s:e]))) * -100
return out | 0f1d8d46464be81daa6308df97a7a8d12a90274b | 17,400 |
def delete_action_log(request, log_id):
"""
View for delete the action log.
This view can only access by superuser and staff.
"""
action = get_object_or_404(ActionLog, id=log_id)
if action.status == 0 or action.status == 1:
messages.error(request, "Cannot delete the Action log that is running or in idle state!")
return redirect('actions')
action.delete()
messages.success(request, "Delete action log successfully!")
return redirect('actions') | 8560e5280a57ddc8158b811fac29763bbaa8ef37 | 17,401 |
def hsla_to_rgba(h, s, l, a):
""" 0 <= H < 360, 0 <= s,l,a < 1
"""
h = h % 360
s = max(0, min(1, s))
l = max(0, min(1, l))
a = max(0, min(1, a))
c = (1 - abs(2*l - 1)) * s
x = c * (1 - abs(h/60%2 - 1))
m = l - c/2
if h<60:
r, g, b = c, x, 0
elif h<120:
r, g, b = x, c, 0
elif h<180:
r, g, b = 0, c, x
elif h<240:
r, g, b = 0, x, c
elif h<300:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
return (int((r+m)*255), int((g+m)*255), int((b+m)*255), int(a*255)) | 55e546756d4dd2a49581a5f950beb286dd73f3f9 | 17,402 |
from typing import Dict
from pathlib import Path
from typing import Optional
def prioritize(paths: Dict[int, Path], purpose: str) -> Optional[Path]:
"""Returns highest-priority and existing filepath from ``paths``.
Finds existing configuration or data file in ``paths`` with highest
priority and returns it, otherwise returns ``None``.
"""
for key in sorted(paths.keys(), reverse=True):
if purpose == "config":
if paths[key].exists():
return paths[key]
if purpose == "data":
return paths[key] | 2c00d0bfe696040c2c19dc1d8b3393b7be124e11 | 17,403 |
def traverse(d, path):
"""Return the value at the given path from the given nested dict/list"""
for k in path.split('.'):
if k.isdigit():
k = int(k)
d = d[k]
return d | ba832a008073da5d97ba0a237a8e0ded17e4694e | 17,404 |
def _bundle_name_with_extension(ctx):
"""Returns the name of the bundle with its extension.
Args:
ctx: The Skylark context.
Returns:
The bundle name with its extension.
"""
return _bundle_name(ctx) + _bundle_extension(ctx) | 51f9c84fa2dd0ef9c5736a59ca2cd3c2d76aa108 | 17,405 |
def cvt_axisang_t_o2i(axisang, trans):
"""-correction: t_r, R_rt_r. outer to inner"""
trans -= get_offset(axisang)
return axisang, trans | ef263052e91ecc2fb8e668bca89a9d5622b75ff2 | 17,407 |
import pytz
import numpy
import dateutil
def processData(dict, valuename, timename='Aika', multiplier=1.0):
"""Process "raw" OData dict and strip only the time and value.
Also convert time to UTC and hydrodynamics model (COHERENS) format.
Parameters
----------
dict: dictionary
Data dictionary as received from OData fetcher
valuename: string
Value nameto process
timename: string
Time field name
multiplier: float
Multiply value with this number. Useful in e.g. unit conversions.
Returns dictionary with processed data.
"""
# Gets valuename field from dict of sites along with timefield and multiplies values by multiplier
# Returns dict of sites with list of values: time, coherenstime, value
tz = pytz.timezone('Europe/Helsinki') # Default data timezone in case it doesn't exist
if numpy.isnan(multiplier):
print("Warning: multiplier ignored (NaN)")
multiplier = 1.0
newdict = {}
for site in dict:
newdata = []
for meas in dict[site]:
time = dateutil.parser.parse(meas[timename])
# If timezone not present, assume local (Finland) timezone
if time.tzinfo is None or time.tzinfo.utcoffset(time) is None:
time = tz.localize(time)
# If timezone is not UTC, convert time to UTC
if time.tzname() != 'UTC':
time = time.astimezone(pytz.utc)
# Convert time from datetime object to COHERENS ASCII format
coherenstime = time.strftime("%Y/%m/%d;%H:%M:%S,000")
value = float(meas[valuename])*multiplier
newdata.append([time, coherenstime, value])
newdict[site] = newdata
return newdict | df452a703a3afface12dc76abb647a5e38b808c3 | 17,408 |
from datetime import datetime
def get_current_year():
"""Returns current year
"""
return str(datetime.date.today().year) | f019e7f2462a4d8db0db294fade6ca737e87a24c | 17,409 |
def parse_ans(text):
"""
Parses the given text as an answer set, i.e., a sequence of predicate
statements. Returns a (possibly empty) tuple of Predicate objects.
"""
return parser.parse_completely(
text,
parser.Rep(PredicateStatement),
devour=devour_asp
) | 44479668629b142115c27476242cbdf23b6657cc | 17,410 |
def get_xyz(data):
"""
:param data: 3D data
:return: 3D data coordinates
第1,2,3维数字依次递增
"""
nim = data.ndim
if nim == 3:
size_x, size_y, size_z = data.shape
x_arange = np.arange(1, size_x+1)
y_arange = np.arange(1, size_y+1)
z_arange = np.arange(1, size_z+1)
[xx, yy, zz] = np.meshgrid(x_arange, y_arange, z_arange, indexing='ij')
xyz = np.column_stack([zz.flatten(), yy.flatten(), xx.flatten()])
else:
"""
:param data: 2D data
:return: 2D data coordinates
第1,2维数字依次递增
"""
size_x, size_y = data.shape
x_arange = np.arange(1, size_x + 1)
y_arange = np.arange(1, size_y + 1)
[xx, yy] = np.meshgrid(x_arange, y_arange, indexing='ij')
xyz = np.column_stack([yy.flatten(), xx.flatten()])
return xyz | b1bd78fee6ca4a8fc2a33430c4ea5e922d696381 | 17,411 |
def wc_proximal_gradient(L, mu, gamma, n, verbose=1):
"""
Consider the composite convex minimization problem
.. math:: F_\\star \\triangleq \\min_x \\{F(x) \\equiv f_1(x) + f_2(x)\\},
where :math:`f_1` is :math:`L`-smooth and :math:`\\mu`-strongly convex,
and where :math:`f_2` is closed convex and proper.
This code computes a worst-case guarantee for the **proximal gradient** method (PGM).
That is, it computes the smallest possible :math:`\\tau(n, L, \\mu)` such that the guarantee
.. math :: \\|x_n - x_\\star\\|^2 \\leqslant \\tau(n, L, \\mu) \\|x_0 - x_\\star\\|^2,
is valid, where :math:`x_n` is the output of the **proximal gradient**,
and where :math:`x_\\star` is a minimizer of :math:`F`.
In short, for given values of :math:`n`, :math:`L` and :math:`\\mu`,
:math:`\\tau(n, L, \\mu)` is computed as the worst-case value of
:math:`\\|x_n - x_\\star\\|^2` when :math:`\\|x_0 - x_\\star\\|^2 \\leqslant 1`.
**Algorithm**: Proximal gradient is described by
.. math::
\\begin{eqnarray}
y_t & = & x_t - \\gamma \\nabla f_1(x_t), \\\\
x_{t+1} & = & \\arg\\min_x \\left\\{f_2(x)+\\frac{1}{2\gamma}\|x-y_t\|^2 \\right\\},
\\end{eqnarray}
for :math:`t \in \\{ 0, \\dots, n-1\\}` and where :math:`\\gamma` is a step-size.
**Theoretical guarantee**: It is well known that a **tight** guarantee for PGM is provided by
.. math :: \\|x_n - x_\\star\\|^2 \\leqslant \\max\\{(1-L\\gamma)^2,(1-\\mu\\gamma)^2\\}^n \\|x_0 - x_\\star\\|^2,
which can be found in, e.g., [1, Theorem 3.1]. It is a folk knowledge and the result can be found in many references
for gradient descent; see, e.g.,[2, Section 1.4: Theorem 3], [3, Section 5.1] and [4, Section 4.4].
**References**:
`[1] A. Taylor, J. Hendrickx, F. Glineur (2018). Exact worst-case convergence rates of the proximal gradient
method for composite convex minimization. Journal of Optimization Theory and Applications, 178(2), 455-476.
<https://arxiv.org/pdf/1705.04398.pdf>`_
[2] B. Polyak (1987). Introduction to Optimization. Optimization Software New York.
`[1] E. Ryu, S. Boyd (2016). A primer on monotone operator methods.
Applied and Computational Mathematics 15(1), 3-43.
<https://web.stanford.edu/~boyd/papers/pdf/monotone_primer.pdf>`_
`[4] L. Lessard, B. Recht, A. Packard (2016). Analysis and design of optimization algorithms via
integral quadratic constraints. SIAM Journal on Optimization 26(1), 57–95.
<https://arxiv.org/pdf/1408.3595.pdf>`_
Args:
L (float): the smoothness parameter.
mu (float): the strong convexity parameter.
gamma (float): proximal step-size.
n (int): number of iterations.
verbose (int): Level of information details to print.
- 1: No verbose at all.
- 0: This example's output.
- 1: This example's output + PEPit information.
- 2: This example's output + PEPit information + CVXPY details.
Returns:
pepit_tau (float): worst-case value.
theoretical_tau (float): theoretical value.
Example:
>>> pepit_tau, theoretical_tau = wc_proximal_gradient(L=1, mu=.1, gamma=1, n=2, verbose=1)
(PEPit) Setting up the problem: size of the main PSD matrix: 7x7
(PEPit) Setting up the problem: performance measure is minimum of 1 element(s)
(PEPit) Setting up the problem: initial conditions (1 constraint(s) added)
(PEPit) Setting up the problem: interpolation conditions for 2 function(s)
function 1 : 6 constraint(s) added
function 2 : 6 constraint(s) added
(PEPit) Compiling SDP
(PEPit) Calling SDP solver
(PEPit) Solver status: optimal (solver: SCS); optimal value: 0.6560999999942829
*** Example file: worst-case performance of the Proximal Gradient Method in function values***
PEPit guarantee: ||x_n - x_*||^2 <= 0.6561 ||x0 - xs||^2
Theoretical guarantee: ||x_n - x_*||^2 <= 0.6561 ||x0 - xs||^2
"""
# Instantiate PEP
problem = PEP()
# Declare a strongly convex smooth function and a closed convex proper function
f1 = problem.declare_function(SmoothStronglyConvexFunction, mu=mu, L=L)
f2 = problem.declare_function(ConvexFunction)
func = f1 + f2
# Start by defining its unique optimal point xs = x_*
xs = func.stationary_point()
# Then define the starting point x0 of the algorithm
x0 = problem.set_initial_point()
# Set the initial constraint that is the distance between x0 and x^*
problem.set_initial_condition((x0 - xs) ** 2 <= 1)
# Run the proximal gradient method starting from x0
x = x0
for _ in range(n):
y = x - gamma * f1.gradient(x)
x, _, _ = proximal_step(y, f2, gamma)
# Set the performance metric to the distance between x and xs
problem.set_performance_metric((x - xs) ** 2)
# Solve the PEP
pepit_verbose = max(verbose, 0)
pepit_tau = problem.solve(verbose=pepit_verbose)
# Compute theoretical guarantee (for comparison)
theoretical_tau = max((1 - mu*gamma)**2, (1 - L*gamma)**2)**n
# Print conclusion if required
if verbose != -1:
print('*** Example file: worst-case performance of the Proximal Gradient Method in function values***')
print('\tPEPit guarantee:\t ||x_n - x_*||^2 <= {:.6} ||x0 - xs||^2'.format(pepit_tau))
print('\tTheoretical guarantee:\t ||x_n - x_*||^2 <= {:.6} ||x0 - xs||^2 '.format(theoretical_tau))
# Return the worst-case guarantee of the evaluated method ( and the reference theoretical value)
return pepit_tau, theoretical_tau | ff8b67b963a2301e9b49870ffa9b6736a23420a4 | 17,412 |
def all_asset_types_for_shot(shot, client=default):
"""
Args:
shot (str / dict): The shot dict or the shot ID.
Returns:
list: Asset types from assets casted in given shot.
"""
path = "shots/%s/asset-types" % shot["id"]
return sort_by_name(raw.fetch_all(path, client=client)) | a7d06e49d564dbd294636e29f488703f5027026e | 17,414 |
from typing import Iterable
def train(x_mat: ndarray, k: int, *, max_iters: int = 10, initial_centroids: Iterable = None, history: bool = False):
"""
进行k均值训练
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param k: 聚类数目
:param max_iters: 最大迭代次数
:param initial_centroids: 初始聚类中心,不提供别的话将随机挑选聚类中心
:param history: 是否返回历史信息
:return: 计算好的聚类中心;包含每个样本所属聚类中心下标的行向量;包含每一次迭代计算的聚类中心列表(history为True的话)
"""
x_mat = __t.r2m(x_mat)
m, n = x_mat.shape
if initial_centroids is None:
rand_indices = np.arange(0, m)
np.random.shuffle(rand_indices)
initial_centroids = x_mat[rand_indices[:k], :]
if not isinstance(initial_centroids, ndarray):
initial_centroids = np.asarray(initial_centroids)
idx = None
centroids_history = None
if history:
centroids_history = [initial_centroids]
for i in range(max_iters):
idx = find_closest(x_mat, initial_centroids)
initial_centroids = compute_centroids(x_mat, idx)
if history:
centroids_history.append(initial_centroids)
if history:
return initial_centroids, idx, centroids_history
else:
return initial_centroids, idx | 3a27cb709d6b267c8da19312f634f6003e2ba9a3 | 17,415 |
def download4(url, user_agent='wswp', num_retries=2):
"""Download function that includes user agent support"""
# wswp: web scraping with python
print 'Downloading:', url
headers = {'User-agent': user_agent}
request = urllib2.Request(url, headers=headers)
try:
html = urllib2.urlopen(request).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# retry 5XX HTTP errors
html = download4(url, user_agent, num_retries-1)
return html | 1381e64e93b373e68a1a07eaab1688462f905374 | 17,416 |
import unittest
def testv1():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('./tests/api/v1', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1 | 519835d59ce7d370e8099e94a48b1e7309274d99 | 17,417 |
def get_third_order_displacements(cell,
symmetry,
is_plusminus='auto',
is_diagonal=False):
"""Create dispalcement dataset
Note
----
Atoms 1, 2, and 3 are defined as follows:
Atom 1: The first displaced atom. Third order force constant
between Atoms 1, 2, and 3 is calculated.
Atom 2: The second displaced atom. Second order force constant
between Atoms 2 and 3 is calculated.
Atom 3: Force is mesuared on this atom.
Parameters
----------
cell : PhonopyAtoms
Supercell
symmetry : Symmetry
Symmetry of supercell
is_plusminus : str or bool, optional
Type of displacements, plus only (False), always plus and minus (True),
and plus and minus depending on site symmetry ('auto').
is_diagonal : bool, optional
Whether allow diagonal displacements of Atom 2 or not
Returns
-------
dict
Data structure is like:
{'natom': 64,
'cutoff_distance': 4.000000,
'first_atoms':
[{'number': atom1,
'displacement': [0.03, 0., 0.],
'second_atoms': [ {'number': atom2,
'displacement': [0., -0.03, 0.],
'distance': 2.353},
{'number': ... }, ... ] },
{'number': atom1, ... } ]}
"""
positions = cell.get_scaled_positions()
lattice = cell.get_cell().T
# Least displacements of first atoms (Atom 1) are searched by
# using respective site symmetries of the original crystal.
# 'is_diagonal=False' below is made intentionally to expect
# better accuracy.
disps_first = get_least_displacements(symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
symprec = symmetry.get_symmetry_tolerance()
dds = []
for disp in disps_first:
atom1 = disp[0]
disp1 = disp[1:4]
site_sym = symmetry.get_site_symmetry(atom1)
dds_atom1 = {'number': atom1,
'direction': disp1,
'second_atoms': []}
# Reduced site symmetry at the first atom with respect to
# the displacement of the first atoms.
reduced_site_sym = get_reduced_site_symmetry(site_sym, disp1, symprec)
# Searching orbits (second atoms) with respect to
# the first atom and its reduced site symmetry.
second_atoms = get_least_orbits(atom1,
cell,
reduced_site_sym,
symprec)
for atom2 in second_atoms:
dds_atom2 = get_next_displacements(atom1,
atom2,
reduced_site_sym,
lattice,
positions,
symprec,
is_diagonal)
min_vec = get_equivalent_smallest_vectors(atom1,
atom2,
cell,
symprec)[0]
min_distance = np.linalg.norm(np.dot(lattice, min_vec))
dds_atom2['distance'] = min_distance
dds_atom1['second_atoms'].append(dds_atom2)
dds.append(dds_atom1)
return dds | 6ae03dbd10ffec75bc274b4a4115b81d28cefc40 | 17,418 |
def get_result_type(action):
"""Gets the corresponding ROS action result type.
Args:
action: ROS action name.
Returns:
Result message type. None if not found.
"""
msg_type = rostopic.get_topic_type("{}/result".format(action))[0]
# Replace 'ActionResult' with 'Result'.
return msg_type[:-12] + "Result" | f5612ac116357000106f7ff24f0d2bebb6789547 | 17,419 |
import statistics
import math
def get_turbulence(sequence):
"""
Computes turbulence for a given sequence, based on `Elzinga & Liefbroer's 2007 definition <https://www.researchgate.net/publication/225402919_De-standardization_of_Family-Life_Trajectories_of_Young_Adults_A_Cross-National_Comparison_Using_Sequence_Analysis>`_ which is also implemented in the `TraMineR <http://traminer.unige.ch/doc/seqST.html>`_ sequence analysis library.
Example
--------
>>> sequence = [1,1,2,2,3]
>>> ps.get_turbulence(sequence)
5.228...
"""
phi = get_ndistinct_subsequences(sequence)
#print('phi', phi)
state_durations = [value for key, value in get_spells(sequence)]
#print('durations', state_durations)
#print('mean duration', statistics.mean(state_durations))
variance_of_state_durations = statistics.variance(state_durations)
#print('variance', variance_of_state_durations)
tbar = statistics.mean(state_durations)
maximum_state_duration_variance = (len(sequence) - 1) * (1 - tbar) ** 2
#print('smax', maximum_state_duration_variance)
top_right = maximum_state_duration_variance + 1
bot_right = variance_of_state_durations + 1
turbulence = math.log2(phi * (top_right / bot_right))
#print('turbulence', turbulence)
return turbulence | 9900d377240b609de1cb7a5284752457947ef6c3 | 17,420 |
def reflect(array, holder=1):
"""
Reflects a np array across the y-axis
Args:
array: array to be reflected
holder: a holder variable so the function can be used in optimization algorithms. If <0.5, does not reflect.
Returns:
Reflected array
"""
c = array.copy()
if holder > 0.5:
c[:, 0] = -c[:, 0]
return c | c39cbf0bb3a949254e4f0c35b20bdf84766d2084 | 17,421 |
def absolute_name_scope(scope, reuse=tf.AUTO_REUSE):
"""Builds an absolute tf.name_scope relative to the current_scope.
This is helpful to reuse nested name scopes.
E.g. The following will happen when using regular tf.name_scope:
with tf.name_scope('outer'):
with tf.name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner/Const:0
with tf.name_scope('outer'):
with tf.name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner_1/Const:0
With absolute_name_scope:
with absolute_name_scope('outer'):
with absolute_name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner/Const:0
with absolute_name_scope('outer'):
with absolute_name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner/Const_1:0
"""
current_scope = tf.get_default_graph().get_name_scope()
if not current_scope:
if scope.endswith('/'):
scope = tf.variable_scope(scope, reuse=reuse)
else:
scope = tf.variable_scope('{}/'.format(scope), reuse=reuse)
else:
scope = tf.variable_scope('{}/{}/'.format(current_scope, scope), reuse=reuse)
return scope | b9bd9e801603472c3e7e1db7a8768387b9942f3c | 17,425 |
def regina_edge_orientation_agrees(tet, vert_pair):
"""
Given tet and an ordered pair of (regina) vert nums of that tet, does this ordering
agree with regina's ordering of the verts of that edge of the triangulation
"""
edge_num = vert_pair_to_edge_num[tuple(vert_pair)]
mapping = tet.faceMapping(1, edge_num)
map_order = [mapping[0], mapping[1]]
assert set(map_order) == set(vert_pair)
return map_order == vert_pair | a70f08f56754eee24b9c4c71d5b6b537388a4ca4 | 17,426 |
from typing import Optional
from datetime import datetime
async def populate_challenge(
challenge_status: str = "process",
is_public: bool = True,
user_id: Optional[UUID] = USER_UUID,
challenge_id: UUID = POPULATE_CHALLENGE_ID,
) -> Challenge:
"""Populate challenge for routes testings."""
if not user_id:
user_id = uuid4()
user: User = await populate_user(user_id=user_id)
track, _ = await Track.get_or_create(test_track_info)
await populate_playlist()
challenge_end = datetime.utcnow() + timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "vote":
challenge_end = datetime.utcnow() - timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "end":
challenge_end = datetime.utcnow() - timedelta(days=2)
vote_end = datetime.utcnow() - timedelta(days=1)
challenge, _ = await Challenge.get_or_create(
id=challenge_id,
name="test",
challenge_end=challenge_end,
vote_end=vote_end,
is_public=is_public,
owner=user,
track=track,
)
await challenge.participants.add(user)
return challenge | fa47e65c7615af8dfebed4dd66fd92141d50e130 | 17,427 |
from typing import List
def is_common_prefix(words: List[str], length: int) -> bool:
"""Binary Search"""
word: str = words[0][:length]
for next_word in words[1:]:
if not next_word.startswith(word):
return False
return True | f57c7309f725baba0b65c92181a6f1ab2827558a | 17,428 |
def freq_upsample(s, upsample):
""" padding in frequency domain, should be used with ifft so that
signal is upsampled in time-domain.
Args:
s : frequency domain signal
upsample : an integer indicating factor of upsampling.
Returns:
padded signal
"""
if upsample == 1:
return s
assert isinstance(upsample, int) and upsample > 1
l = len(s)
if l % 2 == 0:
h = l / 2
return upsample * np.concatenate(
(s[:h], np.array([s[h] / 2.0]),
np.zeros(l * (upsample - 1) - 1),
np.array([s[h] / 2.0]), s[h+1:]))
else:
h = l / 2 + 1
return upsample * np.concatenate(
(s[:h], np.zeros(l * (upsample - 1)), s[h:])) | 78377f6c552fe4f6d764a33b3e6ee555b4aabe71 | 17,429 |
def streaming_parsing_covering(groundtruth_categories,
groundtruth_instances,
predicted_categories,
predicted_instances,
num_classes,
max_instances_per_category,
ignored_label,
offset,
normalize_by_image_size=True,
name=None):
"""Aggregates the covering across calls with different input tensors.
See tf.metrics.* functions for comparable functionality and usage.
Args:
groundtruth_categories: A 2D uint16 tensor of groundtruth category labels.
groundtruth_instances: A 2D uint16 tensor of groundtruth instance labels.
predicted_categories: A 2D uint16 tensor of predicted category labels.
predicted_instances: A 2D uint16 tensor of predicted instance labels.
num_classes: Number of classes in the dataset as an integer.
max_instances_per_category: The maximum number of instances for each class
as an integer or integer tensor.
ignored_label: The class id to be ignored in evaluation as an integer or
integer tensor.
offset: The maximum number of unique labels as an integer or integer tensor.
normalize_by_image_size: Whether to normalize groundtruth region areas by
image size. If True, groundtruth instance areas and weighted IoUs will be
divided by the size of the corresponding image before accumulated across
the dataset.
name: An optional variable_scope name.
Returns:
coverings: A tensor of shape `[3, num_classes]`, where (1) per class
coverings, (2) per class sum of weighted IoUs, and (3) per class sum of
groundtruth region areas are saved in the perspective rows.
update_ops: List of operations that update the running overall parsing
covering.
Raises:
RuntimeError: If eager execution is enabled.
"""
if tf.executing_eagerly():
raise RuntimeError('Cannot aggregate when eager execution is enabled.')
input_args = [
tf.convert_to_tensor(groundtruth_categories, tf.uint16),
tf.convert_to_tensor(groundtruth_instances, tf.uint16),
tf.convert_to_tensor(predicted_categories, tf.uint16),
tf.convert_to_tensor(predicted_instances, tf.uint16),
tf.convert_to_tensor(num_classes, tf.int32),
tf.convert_to_tensor(max_instances_per_category, tf.int32),
tf.convert_to_tensor(ignored_label, tf.int32),
tf.convert_to_tensor(offset, tf.int32),
tf.convert_to_tensor(normalize_by_image_size, tf.bool),
]
return_types = [
tf.float64,
tf.float64,
]
with tf.variable_scope(name, 'streaming_parsing_covering', input_args):
covering_results = tf.py_func(
_parsing_covering_helper, input_args, return_types, stateful=False)
weighted_iou_per_class, gt_area_per_class = tuple(covering_results)
total_weighted_iou_per_class, updated_weighted_iou_per_class = (
_running_total(
weighted_iou_per_class, [num_classes],
name='weighted_iou_per_class_total'))
total_gt_area_per_class, updated_gt_area_per_class = _running_total(
gt_area_per_class, [num_classes], name='gt_area_per_class_total')
covering_per_class = _realdiv_maybe_zero(total_weighted_iou_per_class,
total_gt_area_per_class)
coverings = tf.stack([
covering_per_class,
total_weighted_iou_per_class,
total_gt_area_per_class,
],
axis=0)
update_ops = [updated_weighted_iou_per_class, updated_gt_area_per_class]
return coverings, update_ops | 7c41f5b0c1111287759cc03cdc2a0c8a932aba11 | 17,430 |
from typing import Union
def get_rxn_lookup(medObj:Union[m.Medication, m.LocalMed, m.NDC]):
"""
DEPRECATED
Lookup RxCUI for codes from a different source
:param medObj:
:return:
"""
if isinstance(medObj, m.RxCUI):
smores_error('TBD')
return 0, []
success_count, errors = 0, []
non_rxc_dict = medObj.get_cui_all(omit=['PARENT', 'RXNORM'], inc_obj=True)
_e = {}
if len(non_rxc_dict) > 0:
for src in non_rxc_dict:
_src_e = []
_src_s = 0
for medC, medO in non_rxc_dict[src].items():
rxc_l = medO.get_linked_cui('RXNORM')
for _o in rxc_l:
if _o is None:
_src_e.append(medC)
else:
_src_s += 1
medObj.add_cui(_o)
success_count += 1 if _src_s > 0 else 0
if len(_src_e) > 0:
_e[src] = _src_e
if len(_e) > 0:
errors = _e
return success_count, errors | bba03aa380666b13db89497c720a62570a2918d0 | 17,431 |
def identity(dims):
"""
Create an identity linear operator
:param dims: array of dimensions
"""
dims = expand_dims(dims)
return identity_create(dims) | 4d57c5a0da628c8f24de4f621728176714a4ab54 | 17,432 |
def _gen_samples_2d(enn_sampler: testbed_base.EpistemicSampler,
x: chex.Array,
num_samples: int,
categorical: bool = False) -> pd.DataFrame:
"""Generate posterior samples at x (not implemented for all posterior)."""
# Generate the samples
data = []
rng = hk.PRNGSequence(jax.random.PRNGKey(seed=0))
for seed in range(num_samples):
net_out = enn_sampler(x, next(rng))
y = jax.nn.softmax(net_out)[:, 1] if categorical else net_out[:, 0]
df = pd.DataFrame({'x0': x[:, 0], 'x1': x[:, 1], 'y': y, 'seed': seed})
data.append(df)
return pd.concat(data) | 19fc06700ae42b015694fc9389c05ad0caebf54d | 17,433 |
def rules(r_index, c_index, lives, some_board, duplicate_board):
"""Apply Conway's Rules to a board
Args:
r_index (int): Current row index
c_index (int): Current column index
lives (int): Number of ALIVE cells around current position
some_board (List of lists of strings): Board used to determine rule
duplicate_board (List of lists of strings): Board used to apply rule
Returns:
[List of lists of strings]: Board used to apply rule (modified board)
"""
if some_board[r_index][c_index] == ALIVE:
if lives < 2 or lives > 3:
duplicate_board[r_index][c_index] = DEAD
else:
if lives == 3:
duplicate_board[r_index][c_index] = ALIVE
return duplicate_board | f654a134be3eccad122720cd58f577a2d7e580d8 | 17,434 |
def get_describe_tasks(cluster_name, tasks_arns):
"""Get information about a list of tasks."""
return (
ecs_client()
.describe_tasks(cluster=cluster_name, tasks=tasks_arns)
.get("tasks", [])
) | 663cc2d2241aa3d75c8f2de35780ebe9a5d4ae31 | 17,435 |
def make_bcc110(latconst=1.0):
"""
Make a cell of bcc structure with z along [110].
"""
s= NAPSystem(specorder=_default_specorder)
#...lattice
a1= np.array([ 1.0, 0.0, 0.0 ])
a2= np.array([ 0.0, 1.414, 0.0 ])
a3= np.array([ 0.0, 0.0, 1.414 ])
s.set_lattice(latconst,a1,a2,a3)
symbol = _default_specorder[0]
symbols = [ symbol, symbol, symbol, symbol]
poss = [[0.00, 0.00, 0.00],
[0.00, 0.50, 0.50],
[0.50, 0.50, 0.00],
[0.50, 0.00, 0.50]]
vels = [ [0., 0., 0.] for i in range(4) ]
frcs = [ [0., 0., 0.] for i in range(4) ]
s.add_atoms(symbols, poss, vels, frcs)
return s | 187556e30b4e89718d4c8d1179579ba498062d26 | 17,436 |
def mode_mods_to_int(mode: str) -> int:
"""Converts mode_mods (str) to mode_mods (int)."""
# NOTE: This is a temporary function to convert the leaderboard mode to an int.
# It will be removed when the site is fully converted to use the new
# stats table.
for mode_num, mode_str in enumerate((
'vn_std', 'vn_taiko', 'vn_catch', 'vn_mania',
'rx_std', 'rx_taiko', 'rx_catch',
'ap_std'
)):
if mode == mode_str:
return mode_num
else:
return 0 | 0bfaa8cf04bcee9395dff719067be9753be075c4 | 17,437 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.