content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def kineticEnergyCOM(robot : object, symbolic = False): """This function calculates the total kinetic energy, with respect to each center of mass, given linear and angular velocities Args: robot (object): serial robot (this won't work with other type of robots) symbolic (bool, optional): used to calculate symbolic equations. Defaults to False. Returns: K (SymPy Matrix): kinetic matrix (symbolical) """ # Kinetic Matrix calculation D = inertiaMatrixCOM(robot, symbolic) return 0.5 * (robot.qdSymbolic.T * D * robot.qdSymbolic) if symbolic else 0.5 * (robot.jointsVelocities.T.dot(D).dot(robot.jointsVelocities))
5f0559e55a389741ad0591b5ac3f220ffdb76a2c
21,920
def get_input(request) -> str: """Get the input song from the request form.""" return request.form.get('input')
de237dc0ad3ce2fa6312dc6ba0ea9fe1c2bdbeb3
21,921
from typing import Hashable import math def _unit_circle_positions(item_counts: dict[Hashable, tuple[int, int]], radius=0.45, center_x=0.5, center_y=0.5) -> dict[Hashable, tuple[float, float]]: """ computes equally spaced points on a circle based on the radius and center positions :param item_counts: item dict LinkedNetwork.get_item_link_count_dict() :param radius: radius of the circle :param center_x: x center position :param center_y: y center position :return: dict of items and their corresponding positions """ r = radius cx, cy = center_x, center_y a = math.radians(360) / len(item_counts) points = {} i = 0 for key, _ in item_counts.items(): points[key] = (math.cos(a * i) * r + cx, math.sin(a * i) * r + cy) i += 1 return points
66f60f5b90f7825f2abfdd2484375c9558786250
21,922
import re def rate_table_download(request, table_id): """ Download a calcification rate table as CSV. """ def render_permission_error(request, message): return render(request, 'permission_denied.html', dict(error=message)) table_permission_error_message = \ f"You don't have permission to download table of ID {table_id}." try: rate_table = CalcifyRateTable.objects.get(pk=table_id) except CalcifyRateTable.DoesNotExist: # Technically the error message isn't accurate here, since it # implies the table ID exists. But users don't really have any # business knowing which table IDs exist or not outside their source. # So this obfuscation makes sense. return render_permission_error(request, table_permission_error_message) if rate_table.source: if not rate_table.source.visible_to_user(request.user): # Table belongs to a source, and the user doesn't have access to # that source. return render_permission_error( request, table_permission_error_message) # The source_id parameter tells us to limit the downloaded CSV to the # entries in the specified source's labelset, rather than including all # the rows of the rate table. This is particularly useful when downloading # a default rate table. if 'source_id' in request.GET: source_id = request.GET['source_id'] source_permission_error_message = \ f"You don't have permission to access source of ID {source_id}." try: source = Source.objects.get(pk=source_id) except Source.DoesNotExist: return render_permission_error( request, source_permission_error_message) if not source.visible_to_user(request.user): return render_permission_error( request, source_permission_error_message) else: source = None # At this point we do have permission, so proceed. # Convert the rate table's name to a valid filename in Windows and # Linux/Mac (or at least make a reasonable effort to). # Convert chars that are problematic in either OS to underscores. # # Linux only disallows / (and the null char, but we'll ignore that case). # Windows: # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#naming-conventions non_filename_chars_regex = re.compile(r'[<>:"/\\|?*]') csv_filename = non_filename_chars_regex.sub('_', rate_table.name) # Make a CSV stream response and write the data to it. response = create_csv_stream_response('{}.csv'.format(csv_filename)) rate_table_json_to_csv(response, rate_table, source=source) return response
d62eb09d11c0e91cca3eb36388f1165e2a4433ee
21,923
def rgb_to_RGB255(rgb: RGBTuple) -> RGB255Tuple: """ Convert from Color.rgb's 0-1 range to ANSI RGB (0-255) range. >>> rgb_to_RGB255((1, 0.5, 0)) (255, 128, 0) """ return tuple([int(round(map_interval(0, 1, 0, 255, c))) for c in rgb])
1fe460a4716244efbbacbc6d44f10a3fc6ba8d3f
21,925
import requests import re def check_rest_version(host="http://www.compbio.dundee.ac.uk/jpred4/cgi-bin/rest", suffix="version", silent=False): """Check version of JPred REST interface. :param str host: JPred host address. :param str suffix: Host address suffix. :param silent: Should the work be done silently? :type silent: :py:obj:`True` or :py:obj:`False` :return: Version of JPred REST API. :rtype: :py:class:`str` """ version_url = "{}/{}".format(host, suffix) response = requests.get(version_url) version = re.search(r"VERSION=(v\.[0-9]*.[0-9]*)", response.text).group(1) if not silent: print(version) return version
f9c5e858e4a4681d8b5045e8ed08738f1c32016a
21,926
def analyzer_options(*args): """ analyzer_options() Allow the user to set analyzer options. (show a dialog box) ( 'ui_analyzer_options' ) """ return _ida_kernwin.analyzer_options(*args)
e0b78523d32cce303fe8048012ae1e57c7c422bd
21,927
import torch def vector_vector_feature(v_a, v_b, weight, p_idx, frames, symmetric): """ Taking outer product, create matrix feature per pair, average, express in SO2 feature. :param v_a: [E, 3] :param v_b: [E, 3] :param weight: [E] :param p_idx: [E] index [0, V) :param frames: [V, 3, 3] per vertex, rows are (X, Y, normal) vectors. :param symmetric: bool :return: [V, 2/3, 5] (2 channels if symmetric) """ m_pair = torch.einsum("ni,nj,n->nij", v_a, v_b, weight) m_p = scatter_sum(m_pair, p_idx, dim=0) / scatter_sum(weight, p_idx)[:, None, None] m_p_gauge = frames @ m_p @ frames.transpose(1, 2) return (three_sym_matrix_to_so2_features if symmetric else three_matrix_to_so2_features)( m_p_gauge )
6b583cc834d79d463fc5e6b68b98cd027c4969ec
21,928
def parse_steps(filename): """ Read each line of FILENAME and return a dict where the key is the step and the value is a list of prerequisite steps. """ steps = defaultdict(lambda: list()) all_steps = set() with open(filename) as f: for line in f: words = line.split(' ') steps[words[7]].append(words[1]) all_steps.add(words[1]) # Add steps with no prerequisites. for step in all_steps: if step not in steps: steps[step] = [] return steps
450d93cb72cf92c186cbcecc1992c6e4391ca428
21,929
import glob import json def sitestructure(config, path, extra): """Read all markdown files and make a site structure file""" # no error handling here, because compile_page has it entire_site = list() for page in glob.iglob(path + '**/*.md', recursive=True): merged = compile_page(None, config, page, extra) if 'tags' in merged: merged['tags'] = [x.strip() for x in merged['tags'].split(',')] if 'content_raw' in merged: merged['snippet'] = merged['content_raw'][:200] + "..." # remote certain elements if 'content' in merged: del merged['content'] if 'content_raw' in merged: del merged['content_raw'] if 'templates' in merged: del merged['templates'] entire_site.append(merged) return json.dumps(entire_site)
6c7d21bce30ebb418a8146f302ce62bbe0386bbf
21,930
import uuid def _create_component(tag_name, allow_children=True, callbacks=[]): """ Create a component for an HTML Tag Examples: >>> marquee = _create_component('marquee') >>> marquee('woohoo') <marquee>woohoo</marquee> """ def _component(*children, **kwargs): if 'children' in kwargs: children = kwargs.pop('children') else: # Flatten children under specific circumstances # This supports the use case of div([a, b, c]) # And allows users to skip the * operator if len(children) == 1 and isinstance(children[0], list): # We want children to be tuples and not lists, so # they can be immutable children = tuple(children[0]) if 'style' in kwargs: style = kwargs.pop('style') else: style = None if 'attributes' in kwargs: attributes = kwargs['attributes'] else: attributes = dict(**kwargs) if (tag_name == 'a') and ('href' not in attributes): attributes['href'] = '#' if not allow_children and children: # We don't allow children, but some were passed in raise ValueError( '<{tag_name} /> cannot have children'.format(tag_name=tag_name)) for cb in callbacks: cbname = cb['name'] if cbname in attributes: if attributes[cbname] is not None: # from google.colab import output as colab_output callback_id = cbname + 'callback-' + str(uuid.uuid4()) register_callback(callback_id, attributes[cbname]) # js="google.colab.kernel.invokeFunction('{callback_id}', [], {kwargs})" js = "window.vdomr_invokeFunction('{callback_id}', [], {kwargs})" js = js.replace('{callback_id}', callback_id) js = js.replace('{kwargs}', cb['kwargs']) attributes[cbname] = js else: attributes[cbname] = '' v = VDOM(tag_name, attributes, style, children) return v return _component
a11572de6d079b35ffe0492154939cceb953b199
21,931
def typeof(val, purpose=Purpose.argument): """ Get the Numba type of a Python value for the given purpose. """ # Note the behaviour for Purpose.argument must match _typeof.c. c = _TypeofContext(purpose) ty = typeof_impl(val, c) if ty is None: msg = _termcolor.errmsg( "cannot determine Numba type of %r") % (type(val),) raise ValueError(msg) return ty
83d8e84fca58ce78b15e9106a14ff95c86ccac68
21,932
import logging def scale_site_by_jobslots(df, target_score, jobslot_col=Metric.JOBSLOT_COUNT.value, count_col=Metric.NODE_COUNT.value): """ Scale a resource environment (data frame with node type information) to the supplied share. This method uses the number of jobslots in each node as a target metric. """ if df[jobslot_col].isnull().sum() > 0 or df[count_col].isnull().sum() > 0: logging.warning("Node description has null values for jobslots or node target scores!") slots_per_type = df[jobslot_col] * df[count_col] total_slots = slots_per_type.sum() share = target_score / total_slots return scale_dataframe(df, share, count_col, jobslot_col)
c46129ebf4761fbcb7c3e40165c83259d0eb24e0
21,933
def primesfrom2to(n): """Input n>=6, Returns a array of primes, 2 <= p < n""" sieve = np.ones(n / 3 + (n % 6 == 2), dtype=np.bool) sieve[0] = False for i in xrange(int(n ** 0.5) / 3 + 1): if sieve[i]: k = 3 * i + 1 | 1 sieve[((k * k) / 3)::2 * k] = False sieve[(k * k + 4 * k - 2 * k * (i & 1)) / 3::2 * k] = False return np.r_[2, 3, ((3 * np.nonzero(sieve)[0] + 1) | 1)]
e66a8dd1bb23f1aab8786d4832e382d07e5973e0
21,934
import time def TimeFromTicks(ticks): """construct an object holding a time value from the given ticks value.""" return Time(*time.localtime(ticks)[3:6])
96651ff5e0da5e88988640b5e3f544ad82dd90b2
21,936
def check_key_exists(file_location, section, key): """ Searches an INI Configuration file for the existance of a section & key :param file_location: The file to get a key value from :param section: The section to find the key value :param key: The key that can contain a value to retrieve :return: The boolean value of whether or not the key exists """ config = ConfigParser() config.read(file_location) return config.has_option(section, key)
afdc8bad295cd2b0ed0576eab0ff633fb1f854b3
21,937
def replace_na(str_value: str, ch: str = "0") -> str: """replaces \"0\" with na, specifically designed for category list, may not work for others need Args: str_value (str): category list ch (str, optional): Replacemet char. Defaults to "0". Returns: str: clean cotegory name """ if str_value is not None: len_str = len(str_value) if len_str > 0: if str_value == "0": return "na" all_indices = [i for i, ltr in enumerate(str_value) if ltr == ch] if all_indices: for i in all_indices: if i == 0 and str_value[1].isalpha(): str_value = "na"+str_value[1:] elif i == (len_str - 1) and (str_value[len_str-2].isalpha() or str_value[len_str-2] != "."): str_value = str_value[:len_str] + "na" elif str_value[len_str-2] != ".": str_value = str_value[:i] + "na" + str_value[(i+1):] return str_value
d8e6dfe6806c7a008163ba92c62e7b2b18633538
21,938
def intent_requires(): """ This view encapsulates the method get_intent_requirement It requires an Intent. :return: A dict containing the different entities required for an Intent """ data = request.get_json() if "intent" in data: return kg.get_intent_requirements(data["intent"]) else: return {"message": "Must provide an intent name", "status": 404}
75901abc3d0833eba39c229cc35249c8cb3e6162
21,939
def standardize_df_off_tr(df_tr:pd.DataFrame, df_te:pd.DataFrame): """Standardize dataframes from a training and testing frame, where the means and standard deviations that are calculated from the training dataset. """ for key in df_tr.keys(): if key != 'target': # scale the testing data w/ the training means/stds ssd = df_tr[key].values.std() if np.abs(ssd) < .0001: ssd = .001 df_te[key] = (df_te[key].values - df_tr[key].values.mean())/ssd # scale the training data df_tr[key] = (df_tr[key].values - df_tr[key].values.mean())/ssd return df_tr, df_te
04438b7f31efbe80129ef1cda488ea3c93bcf55e
21,940
def filter_clusters(aoi_clusters, min_ratio, max_deviation, message, run=None): """ min_ratio: Has to have more than x % of all dots in the corner within the cluster max_deviation: Should not deviate more than x % of the screen size from the respective AOI """ aoi_clusters = aoi_clusters \ .sort_values(by='quadrant') \ .assign(n_ratio=aoi_clusters['n_cluster'] / \ aoi_clusters['n_total']) \ .assign(x_deviation=aoi_clusters['x'] - \ pd.Series([0.25, 0.75, 0.25, 0.75])) \ .assign(y_deviation=aoi_clusters['y'] - \ pd.Series([0.75, 0.75, 0.25, 0.25])) aoi_clusters['euclid_deviation'] = np.sqrt( aoi_clusters['x_deviation'] ** 2 + aoi_clusters['y_deviation'] ** 2) realistic_clusters = aoi_clusters[ (aoi_clusters['n_ratio'] > min_ratio) & (aoi_clusters['euclid_deviation'] < max_deviation)] not_enough_gaze_points = len(aoi_clusters[ (aoi_clusters['n_ratio'] > min_ratio)]) < 4 too_far_away = len(aoi_clusters[ aoi_clusters[ 'euclid_deviation'] < max_deviation]) < 4 if message: if not_enough_gaze_points | too_far_away: print(f"""\nRun {run} could not be clustered: """) if not_enough_gaze_points: print(f""" <{min_ratio * 100}% gaze point within """ f"""the AOIs for each corner""") if too_far_away: print(f""" >{max_deviation * 100}% from where the AOI """ f"""is supposed to be \n""") else: print(f"""\nRun {run} can be clustered: """) print(f"""{aoi_clusters[[ 'quadrant', 'n_cluster', 'cluster', 'n_ratio', 'x_deviation', 'y_deviation']]} \n""" f"""Notes: """) return realistic_clusters
2767afd05093e364cf6be22b98b4821c6811165e
21,941
def set_to_true(): """matches v1, which assign True to v1""" key = yield symbol res = Assign(key, True) return res
e1a8eb62be409252475ad39d9d72a087b0344f9f
21,942
def d1_to_q1(A, b, mapper, cnt, M): """ Constraints for d1 to q1 """ for key in mapper['ck'].keys(): for i in range(M): for j in range(i, M): # hermetian constraints if i != j: A[cnt, mapper['ck'][key](i, j)] += 0.5 A[cnt, mapper['ck'][key](j, i)] += 0.5 A[cnt, mapper['kc'][key](j, i)] += 0.5 A[cnt, mapper['kc'][key](i, j)] += 0.5 b[cnt, 0] = 0.0 else: A[cnt, mapper['ck'][key](i, j)] += 1.0 A[cnt, mapper['kc'][key](j, i)] += 1.0 b[cnt, 0] = 1.0 cnt += 1 return A, b, cnt
1ee9ec17f4464ef280aa22780d6034309941954e
21,944
from typing import OrderedDict def _get_dict_roi(directory=None): """Get all available images with ROI bounding box. Returns ------- dict : {<image_id>: <ROI file path>} """ d = OrderedDict() for f in listdir(directory or IJ_ROI_DIR): d[splitext(f)[0]] = join(directory or IJ_ROI_DIR, f) return d
56c2b6a8cb3cb296489050a5465e19e6829ee383
21,947
from operator import index def geol_units(img, lon_w, lat, legend=None): """Get geological units based on (lon, lat) coordinates. Parameters ---------- img: 2d-array 2D geol map image centered at 180°. lon_w: float or array Point west longitude(s). lat: float or array Point latitude(s). legend: dict, optional Table to mapping geol units to values. Returns ------- float, str or array Geological unit(s). """ units = img[index(img, lon_w, lat)] if not isinstance(legend, dict): return units if np.ndim(units) == 0: return legend[units] geol = np.vectorize(legend.get)(units) if np.ma.is_masked(lon_w) or np.ma.is_masked(lat): mask = np.ma.getmask(lon_w) | np.ma.getmask(lat) return np.ma.array(geol, mask=mask) return geol
d564ee29139d6a8c5d2235da10acb11b24866d80
21,948
def Water_Mask(shape_lsc,Reflect): """ Calculates the water and cloud mask """ mask = np.zeros((shape_lsc[1], shape_lsc[0])) mask[np.logical_and(Reflect[:, :, 3] < Reflect[:, :, 2], Reflect[:, :, 4] < Reflect[:, :, 1])] = 1.0 water_mask_temp = np.copy(mask) return(water_mask_temp)
6bcf7b4a96c4de9938c1520253d81460dd7a8025
21,949
def get_gs_distortion(dict_energies: dict): """Calculates energy difference between Unperturbed structure and most favourable distortion. Returns energy drop of the ground-state relative to Unperturbed (in eV) and the BDM distortion that lead to ground-state. Args: dict_energies (dict): Dictionary matching distortion to final energy, as produced by organize_data() Returns: (energy_difference, BDM_ground_state_distortion) """ if len(dict_energies['distortions']) == 1: energy_diff = dict_energies['distortions']['rattled'] - dict_energies['Unperturbed'] if energy_diff < 0 : gs_distortion = 'rattled' #just rattle (no BDM) else: gs_distortion = "Unperturbed" else: lowest_E_RBDM = min(dict_energies['distortions'].values()) #lowest E obtained with RBDM energy_diff = lowest_E_RBDM - dict_energies['Unperturbed'] if lowest_E_RBDM < dict_energies['Unperturbed'] : #if energy lower that with Unperturbed gs_distortion = list(dict_energies['distortions'].keys())[list(dict_energies['distortions'].values()).index( lowest_E_RBDM )] #BDM distortion that lead to ground-state else: gs_distortion = "Unperturbed" return energy_diff, gs_distortion
2f23103ccac8e801cb6c2c4aff1fb4fc08341e78
21,951
def parse_accept_language(data: str = None): """Parse HTTP header `Accept-Language` Returns a tuple like below: ``` ((1.0, Locale('zh_Hant_TW')), (0.9, Locale('en')), (0.0, _fallback_ns)) ``` """ langs = {(0.0, _fallback_ns)} if data is None: return tuple(langs) for s in data.split(","): tags = s.strip().split(";") loc_ins = Locale.parse(tags[0], sep="-") q = 1.0 if len(tags) > 1: q = float(tags[1][2:]) langs.add((q, loc_ins)) return tuple(sorted(langs, reverse=True))
fd2d9aef4825dc0d7fd7a84b69391c69353e9f86
21,952
def stop_service(): """ Stopping the service """ global __service_thread dbg("Trying to stop service thread") shutdown_service() __service_thread.join() __service_thread = None info("Server stopped") return True
97f7b9fb60a7a271f3c234be43b2b513c42ce77e
21,953
def get_constants_name_from_value(constant_dict, value) : """ @param constant_dict : constant dictionary to consider @param value : value's constant name to retrieve @rtype : a string """ try: return constant_dict[value] except KeyError: log.error("The constant name corresponding to the value '%s' can not be found in the dictionary '%s'" % (value, constant_dict)) return ERROR_CONSTANT_NAME_NOT_FOUND
3848e3e83946196250f3987a976b5a74da016a34
21,954
def rotxyz(x_ang,y_ang,z_ang): """Creates a 3x3 numpy rotation matrix from three rotations done in the order of x, y, and z in the local coordinate frame as it rotates. The three columns represent the new basis vectors in the global coordinate system of a coordinate system rotated by this matrix. Args: x_ang: angle for rotation about the x axis in radians y_ang: angle for rotation about the y axis in radians z_ang: angle for rotation about the z axis in radians Returns: The 3D rotation matrix for a x, y, z rotation """ # return rotx(x_ang) @ roty(y_ang) @ rotz(z_ang) return np.matmul(np.matmul(rotx(x_ang), roty(y_ang)), rotz(z_ang))
779c4ca37d5636ad7cff38d9200a9b50b3b0fffe
21,955
def hpdi(proba, array): """ Give the highest posterior density interval. For example, the 95% HPDI is a lower bound and upper bound such that: 1. they contain 95% probability, and 2. in total, have higher peaks than any other bound. Parameters: proba: float A value between 0 and 1, inclusive. For example, if proba is 0.95, then we'll get a 95% HPDI. array: np.array An array of samples. Returns: tuple(integer, integer) First item is the lower bound. Second item is the upper bound. """ if proba < 0 or proba > 1: raise ValueError( f"Proba {proba} should be between 0 and 1, inclusive." ) sorted_array = np.array(sorted(array)) # use binary search length = sorted_array.shape[0] normalizer = sorted_array.sum() minimum_width = normalizer start_index_to_return = None end_index_to_return = None limit = int((1 - proba) * length) for start_index in range(limit): end_index = length - limit + start_index diff = sorted_array[end_index] - sorted_array[start_index] if diff <= minimum_width: minimum_width = diff start_index_to_return = start_index end_index_to_return = end_index return ( sorted_array[start_index_to_return], sorted_array[end_index_to_return] )
a417b6adba19ef6206326791250c880e3b2a28a1
21,956
import urllib def _capabilities(repo, proto): """return a list of capabilities for a repo This function exists to allow extensions to easily wrap capabilities computation - returns a lists: easy to alter - change done here will be propagated to both `capabilities` and `hello` command without any other action needed. """ # copy to prevent modification of the global list caps = list(wireprotocaps) if _allowstream(repo.ui): if repo.ui.configbool('server', 'preferuncompressed', False): caps.append('stream-preferred') requiredformats = repo.requirements & repo.supportedformats # if our local revlogs are just revlogv1, add 'stream' cap if not requiredformats - set(('revlogv1',)): caps.append('stream') # otherwise, add 'streamreqs' detailing our local revlog format else: caps.append('streamreqs=%s' % ','.join(requiredformats)) if repo.ui.configbool('experimental', 'bundle2-exp', False): capsblob = bundle2.encodecaps(repo.bundle2caps) caps.append('bundle2-exp=' + urllib.quote(capsblob)) caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority)) caps.append('httpheader=1024') return caps
764c59bbbe525b8d825dfded871643bb88a1588f
21,958
import types from typing import Dict import collections def dep(doclike: types.DocLike) -> Dict[str, int]: """ Count the number of times each syntactic dependency relation appears as a token annotation in ``doclike``. Args: doclike Returns: Mapping of dependency relation to count of occurrence. """ return dict(collections.Counter(tok.dep_ for tok in doclike))
e4fcb5a54578b2b001eda4255cd3a22b89a6d195
21,959
def fix_phonology_table(engine, phonology_table, phonologybackup_table, user_table): """Give each phonology UUID and modifier_id values; also give the phonology backups of existing phonologies UUID values. """ print_('Fixing the phonology table ... ') msgs = [] #engine.execute('set names latin1') engine.execute('set names utf8;') users = engine.execute(user_table.select()).fetchall() phonologybackups = engine.execute(phonologybackup_table.select()).fetchall() buffer1 = [] buffer2 = [] for row in engine.execute(phonology_table.select()): values = row2dict(row) values['UUID'] = str(uuid4()) backups = sorted([pb for pb in phonologybackups if pb['phonology_id'] == values['id']], key=lambda pb: pb['datetimeModified']) if backups: try: most_recent_backuper = json.loads(backups[-1]['backuper'])['id'] if [u for u in users if u['id'] == most_recent_backuper]: values['modifier_id'] = most_recent_backuper else: values['modifier_id'] = values['enterer_id'] msgs.append('There is no user %d to serve as the most recent backuper for phonology %d' % (most_recent_backuper, values['id'])) except Exception: msgs.append('''WARNING: there are %d backups for phonology %d; however, it was not possible to extract a backuper from the most recent one (backuper value: %s)'''.replace('\n', ' ') % ( len(backups), values['id'], backups[-1]['backuper'])) values['modifier_id'] = values['enterer_id'] else: values['modifier_id'] = values['enterer_id'] buffer1.append(values) for pb in backups: buffer2.append({'pb_id': pb['id'], 'UUID': values['UUID']}) update = phonologybackup_table.update().where(phonologybackup_table.c.id==bindparam('pb_id')).\ values(UUID=bindparam('UUID')) engine.execute(update, buffer2) if buffer1: engine.execute('set names utf8;') update = phonology_table.update().where(phonology_table.c.id==bindparam('id_')).\ values(modifier_id=bindparam('modifier_id'), UUID=bindparam('UUID')) engine.execute(update, buffer1) print 'done.' return msgs
746ca4a479b450f3320b4c04a3ce6013beb88ed4
21,960
def D(field, dynkin): """A derivative. Returns a new field with additional dotted and undotted indices. Example: >>> D(L, "01") DL(01001)(-1/2) >>> D(L, "21") DL(21001)(-1/2) """ undotted_delta = int(dynkin[0]) - field.dynkin_ints[0] dotted_delta = int(dynkin[1]) - field.dynkin_ints[1] # derivative can only change one dotted and one undotted index assert abs(undotted_delta) == 1 assert abs(dotted_delta) == 1 # other info to construct field instance deriv_symbol = "D" symbol = deriv_symbol + field.label new_field_dynkin = dynkin + field.dynkin[2:] rest = { "charges": field.charges, "comm": field.comm, "is_conj": field.is_conj, "nf": field.nf, "stripped": field.stripped, } new_field = Field(symbol, dynkin=new_field_dynkin, **rest) new_field.latex = f"(D{strip_parens(field.get_latex())})" new_field.derivs = field.derivs + 1 # only add this information for the first derivative if new_field.stripped is None: new_field.stripped = { "label": field.label, "dynkin": field.dynkin, "symmetry": field.symmetry, "charges": field.charges, "latex": field.latex, } return new_field
1ee408a8cc2923b141c5ffe1c41d919a501111ae
21,961
def _det(m, n): """Recursive calculation of matrix determinant""" """utilizing cofactors""" sgn = 1 Det = 0 if n == 1: return m[0][0] cofact = [n*[0] for i in range(n)] for i in range(n): _get_cofact(m, cofact,0,i,n); Det += sgn*m[0][i]*_det(cofact, n - 1); sgn = -sgn; return Det
9019dd9dc1054fc4c36e72a6e3c9a3c478afa4ad
21,962
from typing import List def get_vcps() -> List[LinuxVCP]: """ Interrogates I2C buses to determine if they are DDC-CI capable. Returns: List of all VCPs detected. """ vcps = [] # iterate I2C devices for device in pyudev.Context().list_devices(subsystem="i2c"): vcp = LinuxVCP(device.sys_number) try: with vcp: pass except (OSError, VCPIOError): pass else: vcps.append(vcp) return vcps
ce3766c695fe9ffb0a6ebcced4ac04808987f340
21,963
def toGoatLatin(S): """ :type S: str :rtype: str """ l_words = [] for i, word in enumerate(S.split()): if not is_vowel(word[0]): word = word[1:] + word[0] aa = "a" * (i + 1) l_words.append(word + "ma" + aa) return " ".join(l_words)
5ed41084a0d35d69e65b2821b43c2373cf289d26
21,964
def list_startswith(_list, lstart): """ Check if a list (_list) starts with all the items from another list (lstart) :param _list: list :param lstart: list :return: bool, True if _list starts with all the items of lstart. """ if _list is None: return False lenlist = len(_list) lenstart = len(lstart) if lenlist >= lenstart: # if _list longer or as long as lstart, check 1st items: return (_list[:lenstart] == lstart) else: # _list smaller than lstart: always false return False
6f8952a80da81381464521fec55abaaee4a04881
21,965
def get_subscribers(subreddit_, *args): """Gets current sub count for one or more subreddits. Inputs ------- str: Desired subreddit name(s) Returns ------- int: sub count or dict:{subreddit: int(sub count)} """ if len(args) > 0: subreddit = reddit.subreddit(subreddit_) subcount = {subreddit_: subreddit.subscribers} for page in args: subreddit = reddit.subreddit(page) subcount[page] = subreddit.subscribers return subcount else: subreddit = reddit.subreddit(subreddit_) return subreddit.subscribers
2648eb7db5fe0ebc9940f714fab8770947960463
21,967
def pattern_match(value, pattern, env=None): """ Pattern match a value and a pattern. Args: value: the value to pattern-match on pattern: a pattern, consisting of literals and/or locally bound variables env: a dictionary of local variables bound while matching Returns: (True, env) if the match is successful, and (False, env) otherwise Raises: SyntaxError, if a variable name is used multiple times in the same pattern """ env = {} if env is None else env if isinstance(pattern, PatternMatchBind): if pattern.name in env: raise SyntaxError("Conflicting definitions for %s" % pattern.name) env[pattern.name] = value return True, env elif isinstance(pattern, PatternMatchListBind): head, tail = list(value[:len(pattern.head)]), value[len(pattern.head):] matches, env = pattern_match(head, pattern.head, env) if matches: return pattern_match(tail, pattern.tail, env) return False, env elif type(value) == type(pattern): if isinstance(value, ADT): return pattern_match(nt_to_tuple(value), nt_to_tuple(pattern), env) elif hasattr(value, "__iter__"): matches = [] if len(value) != len(pattern): return False, env for v, p in zip(value, pattern): match_status, env = pattern_match(v, p, env) matches.append(match_status) return all(matches), env elif value == pattern: return True, env return False, env
145ef26283f4e21f7ab763317174c5e6da043d84
21,968
import functools import six def wraps(wrapped): """A functools.wraps helper that handles partial objects on Python 2.""" # https://github.com/google/pytype/issues/322 if isinstance(wrapped, functools.partial): # pytype: disable=wrong-arg-types return six.wraps(wrapped, assigned=_PARTIAL_VALID_ASSIGNMENTS) else: return six.wraps(wrapped)
8e3762c9d7f50c8e26df0f0de545de7991d59e92
21,971
def byte_to_bits(byte): """Convert a byte to an tuple of 8 bits for use in Merkle-Hellman. The first element of the returned tuple is the most significant bit. Usage:: byte_to_bits(65) # => [0, 1, 0, 0, 0, 0, 0, 1] byte_to_bits(b'ABC'[0]) # => [0, 1, 0, 0, 0, 0, 0, 1] byte_to_bits('A') # => raises TypeError :param byte: The byte to convert. :type byte: int between 0 and 255, inclusive. :raises: BinaryConversionError if byte is not in [0, 255]. :returns: An 8-tuple of bits representing this byte's value. """ if not 0 <= byte <= 255: raise BinaryConversionError(byte) out = [] for i in range(8): out.append(byte & 1) byte >>= 1 return tuple(out[::-1])
231272c60a3d06de0a914b38fee4f50a0209bcd4
21,972
def KK_RC48_fit(params, w, t_values): """ Kramers-Kronig Function: -RC- Kristian B. Knudsen ([email protected] / [email protected]) """ Rs = params["Rs"] R1 = params["R1"] R2 = params["R2"] R3 = params["R3"] R4 = params["R4"] R5 = params["R5"] R6 = params["R6"] R7 = params["R7"] R8 = params["R8"] R9 = params["R9"] R10 = params["R10"] R11 = params["R11"] R12 = params["R12"] R13 = params["R13"] R14 = params["R14"] R15 = params["R15"] R16 = params["R16"] R17 = params["R17"] R18 = params["R18"] R19 = params["R19"] R20 = params["R20"] R21 = params["R21"] R22 = params["R22"] R23 = params["R23"] R24 = params["R24"] R25 = params["R25"] R26 = params["R26"] R27 = params["R27"] R28 = params["R28"] R29 = params["R29"] R30 = params["R30"] R31 = params["R31"] R32 = params["R32"] R33 = params["R33"] R34 = params["R34"] R35 = params["R35"] R36 = params["R36"] R37 = params["R37"] R38 = params["R38"] R39 = params["R39"] R40 = params["R40"] R41 = params["R41"] R42 = params["R42"] R43 = params["R43"] R44 = params["R44"] R45 = params["R45"] R46 = params["R46"] R47 = params["R47"] R48 = params["R48"] return ( Rs + (R1 / (1 + w * 1j * t_values[0])) + (R2 / (1 + w * 1j * t_values[1])) + (R3 / (1 + w * 1j * t_values[2])) + (R4 / (1 + w * 1j * t_values[3])) + (R5 / (1 + w * 1j * t_values[4])) + (R6 / (1 + w * 1j * t_values[5])) + (R7 / (1 + w * 1j * t_values[6])) + (R8 / (1 + w * 1j * t_values[7])) + (R9 / (1 + w * 1j * t_values[8])) + (R10 / (1 + w * 1j * t_values[9])) + (R11 / (1 + w * 1j * t_values[10])) + (R12 / (1 + w * 1j * t_values[11])) + (R13 / (1 + w * 1j * t_values[12])) + (R14 / (1 + w * 1j * t_values[13])) + (R15 / (1 + w * 1j * t_values[14])) + (R16 / (1 + w * 1j * t_values[15])) + (R17 / (1 + w * 1j * t_values[16])) + (R18 / (1 + w * 1j * t_values[17])) + (R19 / (1 + w * 1j * t_values[18])) + (R20 / (1 + w * 1j * t_values[19])) + (R21 / (1 + w * 1j * t_values[20])) + (R22 / (1 + w * 1j * t_values[21])) + (R23 / (1 + w * 1j * t_values[22])) + (R24 / (1 + w * 1j * t_values[23])) + (R25 / (1 + w * 1j * t_values[24])) + (R26 / (1 + w * 1j * t_values[25])) + (R27 / (1 + w * 1j * t_values[26])) + (R28 / (1 + w * 1j * t_values[27])) + (R29 / (1 + w * 1j * t_values[28])) + (R30 / (1 + w * 1j * t_values[29])) + (R31 / (1 + w * 1j * t_values[30])) + (R32 / (1 + w * 1j * t_values[31])) + (R33 / (1 + w * 1j * t_values[32])) + (R34 / (1 + w * 1j * t_values[33])) + (R35 / (1 + w * 1j * t_values[34])) + (R36 / (1 + w * 1j * t_values[35])) + (R37 / (1 + w * 1j * t_values[36])) + (R38 / (1 + w * 1j * t_values[37])) + (R39 / (1 + w * 1j * t_values[38])) + (R40 / (1 + w * 1j * t_values[39])) + (R41 / (1 + w * 1j * t_values[40])) + (R42 / (1 + w * 1j * t_values[41])) + (R43 / (1 + w * 1j * t_values[42])) + (R44 / (1 + w * 1j * t_values[43])) + (R45 / (1 + w * 1j * t_values[44])) + (R46 / (1 + w * 1j * t_values[45])) + (R47 / (1 + w * 1j * t_values[46])) + (R48 / (1 + w * 1j * t_values[47])) )
1395f182880db7f42d43eba05605673eab83770b
21,973
def race(deer, seconds): """ Use the reindeer's speed and rest times to find the timed distance """ distance = 0 stats = reindeer[deer] resting = False while True: if resting: if seconds <= stats[2]: break seconds -= stats[2] else: if seconds <= stats[1]: distance += seconds * stats[0] break seconds -= stats[1] distance += stats[1] * stats[0] resting = not resting return distance
ea7cb0577cdfa4aab558ca8ad6f4ddde2d79e996
21,974
def AsdlEqual(left, right): """Check if generated ASDL instances are equal. We don't use equality in the actual code, so this is relegated to test_lib. """ if left is None and right is None: return True if isinstance(left, (int, str, bool, pybase.SimpleObj)): return left == right if isinstance(left, list): if len(left) != len(right): return False for a, b in zip(left, right): if not AsdlEqual(a, b): return False return True if isinstance(left, pybase.CompoundObj): if left.tag != right.tag: return False field_names = left.__slots__ # hack for now for name in field_names: # Special case: we are not testing locations right now. if name == 'span_id': continue a = getattr(left, name) b = getattr(right, name) if not AsdlEqual(a, b): return False return True raise AssertionError(left)
ac5752cd30ff31488ecc000426b6f2430acb1718
21,975
import torch def attention_padding_mask(q, k, padding_index=0): """Generate mask tensor for padding value Args: q (Tensor): (B, T_q) k (Tensor): (B, T_k) padding_index (int): padding index. Default: 0 Returns: (torch.BoolTensor): Mask with shape (B, T_q, T_k). True element stands for requiring making. Notes: Assume padding_index is 0: k.eq(0) -> BoolTensor (B, T_k) k.eq(0).unsqueeze(1) -> (B, 1, T_k) k.eq(0).unsqueeze(1).expand(-1, q.size(-1), -1) -> (B, T_q, T_k) """ ## we take the mean because we want to get rid of last dim. ### what we do to remove that dim deosn't matter, since we are only ending up with ### true/false for mask. q = torch.mean(q,2) mask = k.eq(padding_index).unsqueeze(1).expand(-1, q.size(-1), -1) return mask
49d1dc8dd4e59284eb090711545cf70c9ba5fad4
21,977
import itertools def process_params(request, standard_params=STANDARD_QUERY_PARAMS, filter_fields=None, defaults=None): """Parse query params. Parses, validates, and converts query into a consistent format. :keyword request: the bottle request :keyword standard_params: query params that are present in most of our (opinionated) APIs (ex. limit, offset, sort, q, and facets) :keyword filter_fields: list of field names to allow filtering on :keyword defaults: dict of params and their default values :retuns: dict of query params with supplied values (string or list) """ if not filter_fields: filter_fields = [] unfilterable = (set(request.query.keys()) - set(filter_fields) - set(standard_params)) if unfilterable: bottle.abort(400, "The following query params were invalid: %s. " "Try one (or more) of %s." % (", ".join(unfilterable), ", ".join(filter_fields))) query_fields = defaults or {} for key in request.query: if key in filter_fields: # turns ?netloc=this.com&netloc=that.com,what.net into # {'netloc': ['this.com', 'that.com', 'what.net']} matches = request.query.getall(key) matches = list(itertools.chain(*(k.split(',') for k in matches))) if len(matches) > 1: query_fields[key] = matches else: query_fields[key] = matches[0] if 'sort' in request.query: sort = request.query.getall('sort') sort = list(itertools.chain(*( comma_separated_strings(str(k)) for k in sort))) query_fields['sort'] = sort if 'q' in request.query: search = request.query.getall('q') search = list(itertools.chain(*( comma_separated_strings(k) for k in search if k))) query_fields['q'] = search return query_fields
06de4c5df0bdcfcc091aefa12cc8aa7fd4c06597
21,980
import inspect def all_attributes(cls): """ Each object will have the attributes declared directly on the object in the attrs dictionary. In addition there may be attributes declared by a particular object's parent classes. This function walks the class hierarchy to collect the attrs in the object's parent classes For example if Location.City is a subclass of Location and Location has the attribute GPS_COORDS then this function would combine GPS_COORDS and the existing attributes on the Location.City object and return the combination """ attrs = cls.attrs.copy() # walk the class hierarchy for sub in inspect.getmro(cls): for name, prop in getattr(sub, 'attrs', {}).iteritems(): if name in attrs: continue attrs[name] = prop return attrs
3d1a1013fe36cef776b6a9842f774f5394aaeff5
21,981
def _reshape_model_inputs(model_inputs: np.ndarray, num_trajectories: int, trajectory_size: int) -> np.ndarray: """Reshapes the model inputs' matrix. Parameters ---------- model_inputs: np.ndarray Matrix of model inputs num_trajectories: int Number of trajectories trajectory_size: int Number of points in a trajectory Returns ------- input_matrix: np.ndarray Reshaped input matrix. """ num_vars = model_inputs.shape[1] input_matrix = model_inputs.reshape(num_trajectories, trajectory_size, num_vars) return input_matrix
2562d143c5dbf7b6c1c018afe2f87df6297752da
21,982
def VIS(img, **normalization): """Unmixes according to the Vegetation-Impervious-Soil (VIS) approach. Args: img: the ee.Image to unmix. **normalization: keyword arguments to pass to fractionalCover(), like shade_normalize=True. Returns: unmixed: a 3-band image file in order of (soil-veg-impervious). """ endmembers = [soil, pv, urban] endmember_names = ["soil", "pv", "impervious"] unmixed = fractionalCover(img, endmembers, endmember_names, **normalization) return unmixed
2c85aa894f6ccfae3da8650cb9c32cc125a19a45
21,984
def create_labels(mapfile, Nodes=None): """ Mapping from the protein identifier to the group Format : ##protein start_position end_position orthologous_group protein_annotation :param Nodes: set -- create mapping only for these set of nodes :param mapfile: file that contains the mapping for the organism :return: """ f = open(mapfile) labels = defaultdict(str) while True: line = f.readline().strip() if not line: break sp = line.split("\t") if not Nodes: labels[sp[0]] = sp[3] elif sp[0] in Nodes: labels[sp[0]] = sp[3] return labels
634eefc5a837e484059278939ba34fd2482846bf
21,985
def sortKSUID(ksuidList): """ sorts a list of ksuids by their date (recent in the front) """ return sorted(ksuidList, key=lambda x: x.getTimestamp(), reverse=False)
0476bc0ef19f8730488041ac33598ba7471f96e7
21,987
from typing import Counter def get_vocabulary(list_): """ Computes the vocabulary for the provided list of sentences :param list_: a list of sentences (strings) :return: a dictionary with key, val = word, count and a sorted list, by count, of all the words """ all_the_words = [] for text in list_: for word in text: all_the_words.append(word) vocabulary_counter = Counter(all_the_words) vocabulary_sorted = list(map(lambda x: x[0], sorted(vocabulary_counter.items(), key=lambda x: -x[1]))) return vocabulary_sorted, vocabulary_counter
d6c357a5768c2c784c7dfe97743d34795b2695c0
21,989
def check_field(rule: tuple, field: int) -> bool: """check if a field is valid given a rule""" for min_range, max_range in rule: if min_range <= field <= max_range: return True return False
32e34da10fff12e765dd6d48472acf0ac5ad72af
21,991
import math def split(value, precision=1): """ Split `value` into value and "exponent-of-10", where "exponent-of-10" is a multiple of 3. This corresponds to SI prefixes. Returns tuple, where the second value is the "exponent-of-10" and the first value is `value` divided by the "exponent-of-10". Args ---- value : int, float Input value. precision : int Number of digits after decimal place to include. Returns ------- tuple The second value is the "exponent-of-10" and the first value is `value` divided by the "exponent-of-10". Examples -------- .. code-block:: python si_prefix.split(0.04781) -> (47.8, -3) si_prefix.split(4781.123) -> (4.8, 3) See :func:`si_format` for more examples. """ negative = False digits = precision + 1 if value < 0.: value = -value negative = True elif value == 0.: return 0., 0 expof10 = int(math.log10(value)) if expof10 > 0: expof10 = (expof10 // 3) * 3 else: expof10 = (-expof10 + 3) // 3 * (-3) value *= 10 ** (-expof10) if value >= 1000.: value /= 1000.0 expof10 += 3 elif value >= 100.0: digits -= 2 elif value >= 10.0: digits -= 1 if negative: value *= -1 return value, int(expof10)
776ded073807773b755dcd7ab20c47d1f33ca1e1
21,992
import requests def test_cert(host, port=443, timeout=5, **kwargs): """Test that a cert is valid on a site. Args: host (:obj:`str`): hostname to connect to. can be any of: "scheme://host:port", "scheme://host", or "host". port (:obj:`str`, optional): port to connect to on host. If no :PORT in host, this will be added to host. Defaults to: 443 timeout (:obj:`str`, optional): Timeout for connect/response. Defaults to: 5. kwargs: passed thru to requests.get() Returns: (:obj:`tuple` of (:obj:`bool`, :obj:`Exception`)): True / False if cert was valid. Exception that was thrown if cert not valid, or None if successfull. """ kwargs.setdefault("timeout", timeout) kwargs.setdefault("url", build_url(host=host, port=port)) try: requests.get(**kwargs) return (True, None) except requests.exceptions.SSLError as exc: return (False, exc)
3d0e0098b5f654305c187f2a566c25f8c87a5ce3
21,994
def get_predicates(): # noqa: E501 """get_predicates Get a list of predicates used in statements issued by the knowledge source # noqa: E501 :rtype: List[BeaconPredicate] """ return controller_impl.get_predicates()
7f3f89b300a0e43449a1860cff8200af6d33a3b1
21,995
def noisy_job_stage3(aht, ht, zz, exact=False): """Adds noise to decoding circuit. Args: ===== aht, ht, zz : numeric Circuit parameters for decoding circuit exact : bool If True, works with wavefunction Returns: ======== noisy_circuit : cirq.Circuit Noisy version of input circuit param_resolvers : list """ job = Job(decoder_circuit(aht, ht, zz, exact)) noisy = DepolarizerChannel(probability=noise_level) noisy_job = noisy.transform_job(job) param_resolvers = [ParamResolver({k:v for k, v in e}) for e in noisy_job.sweep.param_tuples()] return noisy_job.circuit, param_resolvers
a5f1bcb8cced41b2b6179d2eeb68e8b8939aca96
21,996
import math def buy_and_hold_manager_factory(mgr, j:int, y, s:dict, e=1000): """ Ignores manager preference except every j data points For this to make any sense, 'y' must be changes in log prices. For this to be efficient, the manager must respect the "e" convention. That is, the manager must do little work when e<0 :param mgr: :param j: :param y: :param s: State :param mgr_kwargs: :return: w Portfolio weights """ if j==1: # Special case: just use the manager # This is the only time the user's e parameter is passed on. s_mgr = s['s_mgr'] w, s_mgr = mgr(y=y,s=s_mgr, e=e) s['s_mgr'] = s_mgr return w, s else: if s.get('w') is None: # Initialization s['count']=0 s_mgr = {} w, s_mgr = mgr(y=y,s=s_mgr, e=1000) s['s_mgr'] = s_mgr s['w'] = w return w, s else: s['count'] = s['count']+1 if s['count'] % j == 0: # Sporadically use the manager s_mgr = s['s_mgr'] w, s_mgr = mgr(y=y, s=s_mgr, e=1000) s['s_mgr'] = s_mgr s['w'] = w return w, s else: # Tell the manager not to worry too much about this data point, as the weights won't be used ... s_mgr = s['s_mgr'] _ignore_w, s_mgr = mgr(y=y, s=s_mgr, e=-1) s['s_mgr'] = s_mgr # ... instead we let it ride w_prev = s['w'] w = normalize( [ wi*math.exp(yi) for wi,yi in zip(w_prev,y)] ) s['w'] = w return w, s
2225b6f41979e1781a778f397b699751456dc2a4
21,997
def explicit_wait_visibility_of_element_located(browser, xpath, timeout=35): """Explicitly wait until visibility on element.""" locator = (By.XPATH, xpath) condition = expected_conditions.visibility_of_element_located(locator) try: wait = WebDriverWait(browser, timeout) result = wait.until(condition) except TimeoutException: print("Timeout Exception in explicit wait") return False return result
2fd6fe951d1d55121909e2a326b72af4524f577b
21,998
def list_all_resources(): """Return a list of all known resources. :param start_timestamp: Limits resources by last update time >= this value. (optional) :type start_timestamp: ISO date in UTC :param end_timestamp: Limits resources by last update time < this value. (optional) :type end_timestamp: ISO date in UTC :param metadata.<key>: match on the metadata within the resource. (optional) """ return _list_resources( project=acl.get_limited_to_project(flask.request.headers))
c2b42abd7c03d2f2b6541a45b7b45b2cb420ebc4
21,999
def get_completed_exploration_ids(user_id, collection_id): """Returns a list of explorations the user has completed within the context of the provided collection. Args: user_id: str. ID of the given user. collection_id: str. ID of the collection. Returns: list(str). A list of exploration ids that the user with the given user id has completed within the context of the provided collection with the given collection id. The list is empty if the user has not yet completed any explorations within the collection, or if either the collection and/or user do not exist. A progress model isn't added until the first exploration of a collection is completed, so, if a model is missing, there isn't enough information to infer whether that means the collection doesn't exist, the user doesn't exist, or if they just haven't mdae any progress in that collection yet. Thus, we just assume the user and collection exist for the sake of this call, so it returns an empty list, indicating that no progress has yet been made. """ progress_model = user_models.CollectionProgressModel.get( user_id, collection_id) return progress_model.completed_explorations if progress_model else []
7d3456f8fa0af83d776d7f2daf0edde33c83adb6
22,000
import re def clean_str(string): """ Tokenization/string cleaning for all datasets except for SST. """ string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string) string = re.sub(r"\?", " ? ", string) string = re.sub(r"\s{2,}", " ", string) return string.strip().lower()
dec81e721fb3a83c8ea372d21dfa805394edc0e3
22,001
def transitive_closure(graph): """ Compute the transitive closure of the graph :param graph: a graph (list of directed pairs) :return: the transitive closure of the graph """ closure = set(graph) while True: new_relations = set((x, w) for x, y in closure for q, w in closure if q == y) closure_until_now = closure | new_relations if closure_until_now == closure: break closure = closure_until_now closure_no_doubles = [(x, y) for (x, y) in closure if not x == y] return closure_no_doubles
3bb6567033cf920ccced7565e75f8f789c55c37d
22,002
def call_function(func_name, func_args, params, system): """ func_args : list of values (int or string) return str or None if fail return ROPChain if success """ if( system == Systems.TargetSystem.Linux and curr_arch_type() == ArchType.ARCH_X86 ): return call_function_linux_x86(func_name, func_args, params ) elif( system == Systems.TargetSystem.Linux and curr_arch_type() == ArchType.ARCH_X64 ): return call_function_linux_x64(func_name, func_args, params ) elif( system == Systems.TargetSystem.Linux and curr_arch_type() == ArchType.ARCH_ARM32 ): return call_function_linux_arm32(func_name, func_args, params ) else: return "Not implemented yet for this system/arch"
ede0b62dfa6d47c2c79ff405b056c26198e5afb5
22,003
import traceback def error_handler(update, context): """Log Errors caused by Updates.""" log.error( 'with user: "%s (%s)"\nmessage: "%s"\ntraceback: %s', update.effective_user, update.effective_user.id, context.error, traceback.format_exc() ) return ConversationHandler.END
45ea22efe64c600ede6de81ee278493ff14dc772
22,004
def jac(w, centred_img_patches, F, NUM_MODES): """ The Jacobian of the numerical search procedure. Parameters ---------- w : numpy array (floats) Column vector of model weights, used to construct mapping. centred_img_patches : numpy array (floats) The mean-centred {p x NUM_PATCHES} array of p-elements image patches. F : numpy array (floats) Column vector of all errors. NUM_MODES : int Number of independent modes into which the image will be decomposed. Returns ------- J : numpy array (floats) The Jacobian for the current error vector and set of weights. """ # Initialise numerical perturbation and Jacobian array PERT = 1e-15 num_var = w.size num_err = F.size J = np.zeros([num_err, num_var]) # Iterate over all weights and populate Jacobian for i in range(num_var): w_pert = w.copy() w_pert[i] = w[i] + PERT inverse_mapping_pert = generate_inverse_mapping(w_pert, centred_img_patches, NUM_MODES) sources_pert = map_patches_to_sources(inverse_mapping_pert, centred_img_patches) source_cov_pert = cov(sources_pert) dF = err(sources_pert, source_cov_pert) - F J[:,[i]] = dF/PERT return J
ee780ac6e366f14c1ab7c661db99bcdbdd3cc033
22,005
def get_session(): """Entrega uma instancia da session, para manipular o db.""" return Session(engine)
eb528d0de57e704e96ffa502e7504746efac6cbb
22,006
def sdm_ecart(f): """ Compute the ecart of ``f``. This is defined to be the difference of the total degree of `f` and the total degree of the leading monomial of `f` [SCA, defn 2.3.7]. Invalid if f is zero. Examples ======== >>> from sympy.polys.distributedmodules import sdm_ecart >>> sdm_ecart([((1, 2, 3), 1), ((1, 0, 1), 1)]) 0 >>> sdm_ecart([((2, 2, 1), 1), ((1, 5, 1), 1)]) 3 """ return sdm_deg(f) - sdm_monomial_deg(sdm_LM(f))
00d4e8807eef38326ee8a588a81287a1c9d62d0d
22,007
def draw_gif_frame(image, bbox, frame_no): """Draw a rectangle with given bbox info. Input: - image: Frame to draw on - length: Number of info (4 info/box) - bbox: A list containing rectangles' info to draw -> frame id x y w h Output: Frame that has been drawn on""" obj_id = bbox[1] bbox_left = int(bbox[2]) bbox_top = int(bbox[3]) bbox_right = bbox_left + int(bbox[4]) bbox_bottom = bbox_top + int(bbox[5]) # Set up params left_top_pt = (bbox_left, bbox_top) right_bottom_pt = (bbox_right, bbox_bottom) color = (0, 0, 255) thickness = 8 org = (bbox_left, bbox_top - 5) font = cv2.FONT_HERSHEY_SIMPLEX font_scale = 1 thicknes_id = 3 line_type = cv2.LINE_4 cv2.rectangle(image, left_top_pt, right_bottom_pt, color, thickness) cv2.putText(image, str(obj_id), org, font, font_scale, color, thicknes_id, line_type) put_text(image, str(frame_no)) return image
462773a88179361bc013777405b04f99ac73bd3b
22,008
def create_host(api_client, orig_host_name, orig_host_uid, cloned_host_name, cloned_host_ip): """ Create a new host object with 'new_host_name' as its name and 'new_host_ip_address' as its IP-address. The new host's color and comments will be copied from the the "orig_host" object. :param api_client: Api client of the domain :param orig_host_uid: original host uid :param cloned_host_name: cloned host name :param cloned_host_ip: cloned host IP :return: the cloned host uid on success, otherwise None """ # get details of the original host object log("\n\tGathering information for host {}".format(orig_host_name)) res = api_client.api_call("show-host", {"uid": orig_host_uid}) if res.success is False: discard_write_to_log_file(api_client, "Failed to open existing host: {}. Aborting.".format(res.error_message)) return None # copy the color and comments from the original host color = res.data["color"] comments = res.data["comments"] # create a new host object log("\n\tCreating a new host {}".format(cloned_host_name)) res = api_client.api_call("add-host", {"name": cloned_host_name, "ip-address": cloned_host_ip, "color": color, "comments": comments}) if res.success is False: discard_write_to_log_file(api_client, "Failed to create the new host: {}. Aborting.".format(res.error_message)) return None return res.data["uid"]
18fb2f727bae8150c98510a45e69409ff8aa4fe9
22,010
def parenthesize(x): """Return a copy of x surrounded by open and close parentheses""" cast = type(x) if cast is deque: return deque(['('] + list(x) + [')']) return cast('(') + x + cast(')')
ae76b220fd3bc00d3df99ec97982b44010f36e64
22,011
def get_logo_color(): """Return color of logo used in application main menu. RGB format (0-255, 0-255, 0-255). Orange applied. """ return (255, 128, 0)
a6eee63d816a44af31893830ac641d6c0b1b9ba1
22,012
def DG(p,t,Ep=10): """ Entrenamiento por Descenso de Gradiente """ # m será igual al número patrones de # entrenamiento (ejemplos) y n al número # de elementos del vector de caracteristicas. m,n = p.shape a = 0.5 #--- Pesos iniciales --- w = np.random.uniform(-0.25,0.25,2) b = np.random.uniform(-0.25,0.25) # ---------------------- for N in range(Ep): # Iteración sobre num. de épocas for ti in range(m): # Iteración sobre num. de patrones #---- Salida ---- net = np.dot(w,p[ti])+b y = logsig(net) #----------------- #---Regla Delta--- err = t[ti]- y Delta = 2*err*df(net)*p[ti] w = w + a*Delta b = b + a*2*err*df(net) #----------------- return w,b
d23c5b11432cfb6d7d4e3beb56b917f82809442e
22,013
def delete_video_db(video_id): """Delete a video reference from the database.""" connection = connect_db() connection.cursor().execute('DELETE FROM Content WHERE contentID=%s', (video_id,)) connection.commit() close_db(connection) return True
c91428f5f60590d7f0219d732900ae24fcc39749
22,015
import numba def int_to_float_fn(inputs, out_dtype): """Create a Numba function that converts integer and boolean ``ndarray``s to floats.""" if any(i.type.numpy_dtype.kind in "ib" for i in inputs): args_dtype = np.dtype(f"f{out_dtype.itemsize}") @numba.njit(inline="always") def inputs_cast(x): return x.astype(args_dtype) else: args_dtype_sz = max([_arg.type.numpy_dtype.itemsize for _arg in inputs]) args_dtype = np.dtype(f"f{args_dtype_sz}") @numba.njit(inline="always") def inputs_cast(x): return x.astype(args_dtype) return inputs_cast
f47f9485fa83acb2e7a0237c7ef851d3c23f8fe6
22,016
from numpy import sqrt def vel_gradient(**kwargs): """ Calculates velocity gradient across surface object in supersonic flow (from stagnation point) based upon either of two input variable sets. First method: vel_gradient(R_n = Object radius (or equivalent radius, for shapes that are not axisymmetric), p_0 = flow stagnation pressure, p_inf = flow freestream static pressure rho = flow density) Second method: vel_gardient(R_n = Object radius (or equivalent radius, for shapes that are not axisymmetric), delta = Shock stand-off distance (from object stagnation point), U_s = Flow velocity immediately behind shock) """ if ('R_n' in kwargs) and ('p_0' in kwargs) and ('p_inf' in kwargs) and \ ('rho' in kwargs): vel_gradient = (1 / kwargs['R_n']) * sqrt((2 * (kwargs['p_0'] - \ kwargs['p_inf'])) / kwargs['rho']) elif ('R_n' in kwargs) and ('U_s' in kwargs) and ('delta' in kwargs): b = kwargs['delta'] + kwargs['R_n'] vel_gradient = (kwargs['U_s'] / kwargs['R_n']) * (1 + ((2 + ((b**3) / \ (kwargs['R_n']**3))) / (2 * (((b**3) / (kwargs['R_n']**3)) - 1)))) else: raise KeyError('Incorrect variable assignment') return vel_gradient
8ee3ef490c113551e9200743e52378a8206a3666
22,017
from functools import reduce def lcm(numbers): """ Get the least common multiple of a list of numbers ------------------------------------------------------------------------------------ input: numbers [1,2,6] list of integers output: 6 integer """ return reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1)
a1c3ce93b0ea4f06c8fb54765110fa85f7517fe5
22,018
def parseBracketed(idxst,pos): """parse an identifier in curly brackets. Here are some examples: >>> def test(st,pos): ... idxst= IndexedString(st) ... (a,b)= parseBracketed(idxst,pos) ... print(st[a:b]) ... >>> test(r'{abc}',0) {abc} >>> test(r'{ab8c}',0) {ab8c} >>> test(r'{c}',0) {c} >>> test(r'{}',0) Traceback (most recent call last): ... ParseException: command enclosed in curly brackets at line 1, col 1 >>> test(r'{abc',0) Traceback (most recent call last): ... ParseException: command enclosed in curly brackets at line 1, col 1 >>> test(r'x{ab8c}',1) {ab8c} """ if not isinstance(idxst, IndexedString): raise TypeError("idxst par wrong: %s" % repr(idxst)) st= idxst.st() m= rx_bracketed.match(st,pos) if m is None: raise ParseException("command enclosed in curly brackets at", rowcol= idxst.rowcol(pos)) return(pos,m.end())
d78617fa8a85c234920d0f985566d7a00ebe6b1a
22,019
def compute_agg_tiv(tiv_df, agg_key, bi_tiv_col, loc_num): """ compute the agg tiv depending on the agg_key""" agg_tiv_df = (tiv_df.drop_duplicates(agg_key + [loc_num], keep='first')[list(set(agg_key + ['tiv', 'tiv_sum', bi_tiv_col]))] .groupby(agg_key, observed=True).sum().reset_index()) if 'is_bi_coverage' in agg_key: # we need to separate bi coverage from the other tiv agg_tiv_df.loc[agg_tiv_df['is_bi_coverage']==False, 'agg_tiv'] = agg_tiv_df['tiv_sum'] - agg_tiv_df[bi_tiv_col] agg_tiv_df.loc[agg_tiv_df['is_bi_coverage']==True, 'agg_tiv'] = agg_tiv_df[bi_tiv_col] else: agg_tiv_df['agg_tiv'] = agg_tiv_df['tiv_sum'] return agg_tiv_df[agg_key + ['agg_tiv']]
246ea2d61230f3e3bfe365fdf8fdbedbda98f25b
22,020
from typing import List def convert_configurations_to_array(configs: List[Configuration]) -> np.ndarray: """Impute inactive hyperparameters in configurations with their default. Necessary to apply an EPM to the data. Parameters ---------- configs : List[Configuration] List of configuration objects. Returns np.ndarray Array with configuration hyperparameters. Inactive values are imputed with their default value. """ configs_array = np.array([config.get_array() for config in configs], dtype=np.float64) configuration_space = configs[0].configuration_space for hp in configuration_space.get_hyperparameters(): default = hp._inverse_transform(hp.default) idx = configuration_space.get_idx_by_hyperparameter_name(hp.name) # Create a mask which is True for all non-finite entries in column idx! column_mask = np.zeros(configs_array.shape, dtype=np.bool) column_mask[:, idx] = True nonfinite_mask = ~np.isfinite(configs_array) mask = column_mask & nonfinite_mask configs_array[mask] = default return configs_array
09b14dc5d5bb5707b059b7a469d93c7288da84cf
22,021
from typing import Optional from datetime import datetime import requests def annual_mean( start: Optional[datetime] = None, end: Optional[datetime] = None ) -> dict: """Get the annual mean data ---------------------------- Data from March 1958 through April 1974 have been obtained by C. David Keeling of the Scripps Institution of Oceanography (SIO) and were obtained from the Scripps website (scrippsco2.ucsd.edu). The estimated uncertainty in the annual mean is the standard deviation of the differences of annual mean values determined independently by NOAA/ESRL and the Scripps Institution of Oceanography. NOTE: In general, the data presented for the last year are subject to change, depending on recalibration of the reference gas mixtures used, and other quality control procedures. Occasionally, earlier years may also be changed for the same reasons. Usually these changes are minor. CO2 expressed as a mole fraction in dry air, micromol/mol, abbreviated as ppm """ if start and not isinstance(start, datetime): raise TypeError("Start must be a datetime object") if end and not isinstance(end, datetime): raise TypeError("End must be a datetime object") url = 'https://www.esrl.noaa.gov/gmd/webdata/ccgg/trends/co2/co2_annmean_mlo.txt' res = requests.get(url) raw = res.content.decode("utf-8") lines = raw.splitlines() _license = "\n".join(lines[:41]) description = "\n".join(lines[41:56]) headers = lines[56] mean = { "url": url, "license": _license, "description": description, "headers": headers, "raw": raw, "data": { "yr": [], "mean (ppm)": [], "unc": [], }, } # Parse data for row in lines[57:]: yr, ppm, unc = row.split() date = datetime(year=int(yr), month=1, day=1) if start and start > date: continue if end and end < date: break mean["data"]["yr"].append(yr) mean["data"]["mean (ppm)"].append(ppm) mean["data"]["unc"].append(unc) return mean
4fd06301f9f414e08629cdbfeae75adcc6febdcf
22,022
def exception(logger,extraLog=None): """ A decorator that wraps the passed in function and logs exceptions should one occur @param logger: The logging object """ print logger def decorator(func): print "call decorator" def wrapper(*args, **kwargs): print "call exception decor" print args print kwargs try: print "-----: normal" return func(*args, **kwargs) except: # log the exception err = "There was an exception in " err += func.__name__ #logger.exception(err) print "-----: except" logger.exception(err,extra=extraLog) # re-raise the exception raise return wrapper return decorator
52a0ca19b738576a5e42da9d720ec5a5118466fe
22,023
def read_external_sources(service_name): """ Try to get config from external sources, with the following priority: 1. Credentials file(ibm-credentials.env) 2. Environment variables 3. VCAP Services(Cloud Foundry) :param service_name: The service name :return: dict """ config = {} config = read_from_credential_file(service_name) if not config: config = read_from_env_variables(service_name) if not config: config = read_from_vcap_services(service_name) return config
a8008efecf6cbc8801022c9a99617480d50ad525
22,024
import torch def intersect(box_a, box_b): """ We resize both tensors to [A,B,2] without new malloc: [A,2] -> [A,1,2] -> [A,B,2] [B,2] -> [1,B,2] -> [A,B,2] Then we compute the area of intersect between box_a and box_b. Args: box_a: (tensor) bounding boxes, Shape: [A,4]. box_b: (tensor) bounding boxes, Shape: [B,4]. Return: (tensor) intersection area, Shape: [A,B]. """ A = box_a.size(0) B = box_b.size(0) max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), box_b[:, 2:].unsqueeze(0).expand(A, B, 2)) min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), box_b[:, :2].unsqueeze(0).expand(A, B, 2)) inter = torch.clamp((max_xy - min_xy), min=0) return inter[:, :, 0] * inter[:, :, 1]
96f67dd8ee1b40af469b5e40dc1f3456250451b3
22,025
import re def tokenize(text): """ tokenize text messages Input: text messages Output: list of tokens """ # find urls and replace them with 'urlplaceholder' url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' text = re.sub(url_regex, 'urlplaceholder', text) # normalize case and remove punctuation text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # tokenize text tokens = word_tokenize(text) # lemmatize and remove stop words stop_words = stopwords.words("english") lemmatizer = WordNetLemmatizer() tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words] return tokens
02c322b01b0af995030c007be634b7d3d1603518
22,026
import scipy import copy def peakFindBottom(x, y, peaks, fig=None, verbose=1): """ Find the left bottom of a detected peak Args: x (array): independent variable data y (array): signal data peaks (list): list of detected peaks fig (None or int): if integer, then plot results verbose (int): verbosity level """ kk = np.ones(3) / 3. ys = scipy.ndimage.filters.correlate1d(y, kk, mode='nearest') peaks = copy.deepcopy(peaks) dy = np.diff(ys, n=1) dy = np.hstack((dy, [0])) kernel_size= [int(np.max([2, dy.size / 100])), ] dy = qtt.algorithms.generic.boxcar_filter(dy, kernel_size = kernel_size) for ii, peak in enumerate(peaks): if verbose: print('peakFindBottom: peak %d' % ii) if not peak['valid']: continue ind = range(peak['phalf0']) left_of_peak = 0 * y.copy() left_of_peak[ind] = 1 r = range(y.size) left_of_peak_and_decreasing = left_of_peak * (dy < 0) # set w to zero where the scan is increasing left_of_peak_and_decreasing[0] = 1 # make sure to stop at the left end of the scan... ww = left_of_peak_and_decreasing.nonzero()[0] if verbose >= 2: print(' peakFindBottom: size of decreasing area %d' % ww.size) if ww.size == 0: if peak['valid']: peak['valid'] = 0 peak['validreason'] = 'peakFindBottom' if verbose >= 2: print('peakFindBottom: invalid peak') print(ind) print(dy) continue bidx = ww[-1] peak['pbottomlow'] = bidx w = left_of_peak * (dy > 0) # we need to be rising # we need to be above 10% of absolute low value w = w * (ys < ys[bidx] + .1 * (ys[peak['p']] - ys[bidx])) w = w * (r >= peak['pbottomlow']) ww = w.nonzero()[0] if ww.size == 0: if peak['valid']: peak['valid'] = 0 peak['validreason'] = 'peakFindBottom' if verbose >= 2: print('peakFindBottom: invalid peak (%s)' % ('rising part ww.size == 0',)) print(w) print(ys) continue bidx = ww[-1] peak['pbottom'] = bidx peak['pbottoml'] = bidx peak['xbottom'] = x[bidx] peak['xbottoml'] = x[bidx] peak['vbottom'] = y[bidx] # legacy peak['ybottoml'] = y[bidx] if verbose >= 3: plt.figure(53) plt.clf() plt.plot(x[ind], 0 * np.array(ind) + 1, '.b', label='ind') plt.plot(x[range(y.size)], w, 'or', label='w') plt.plot(x[range(y.size)], dy < 0, 'dg', markersize=12, label='dy<0') pgeometry.enlargelims() pgeometry.plot2Dline([-1, 0, peak['x']], '--c', label='x') pgeometry.plot2Dline([-1, 0, x[peak['phalf0']]], '--y', label='phalf0') pgeometry.plot2Dline([-1, 0, x[peak['pbottomlow']]], ':k', label='pbottomlow') pgeometry.plot2Dline([-1, 0, peak['xbottoml']], '--y', label='xbottoml') plt.legend(loc=0) return peaks
dd79a4fd572f69f5db9adeaaf56ab4c8661c0ca1
22,027
def replacelast(string, old, new, count = 1): """Replace the last occurances of a string""" return new.join(string.rsplit(old,count))
6af2cd56cc43e92b0d398e8aad4e25f0c6c34ddd
22,028
def _safe_read_img(img): """ Read in tiff image if a path is given instead of np object. """ img = imread(img) if isinstance(img, str) else np.array(img) return np.nan_to_num(img)
a9a50b5ad76a6ed5833c2c149b1366b318814d6a
22,030
def max_version(*modules: Module) -> str: """Maximum version number of a sequence of modules/version strings See `get_version` for how version numbers are extracted. They are compared as `packaging.version.Version` objects. """ return str(max(get_version(x) for x in modules))
34ad9bd27591e3496e6a5a7e75dbf0191a8c077e
22,031
def load_secret(name, default=None): """Check for and load a secret value mounted by Docker in /run/secrets.""" try: with open(f"/run/secrets/{name}") as f: return f.read().strip() except Exception: return default
1aac980ad6bc039964ef9290827eb5c6d1b1455f
22,032
from operator import sub def snake_case(x): """ Converts a string to snake case """ # Disclaimer: This method is annoyingly complex, and i'm sure there is a much better way to do this. # The idea is to iterate through the characters # in the string, checking for specific cases and handling them accordingly. One note, # the built it isupper() and islower() methods will consider an underscore False. # The process looks like this: # First, we will check if the current character is uppercase, if its not, we simply insert # that character into the new string as is. # Second, we need to see if it's the first character of the string. if it is, we will need # to check if it is part of an acronym that should stay capitalized, even in snake case(e.g. XML, JSON, HTML). # We do this by looking at the next character and checking if it is also capitalized. If it # is, we will insert the character in capital form, if not, we will lowercase it and insert it. # If the current character is NOT the first character of the string, we still need to determine # if it is part of an acronym. The same process is applied except now we also look at the previous # character to see if it is capitalized. If it is, we can assume this is part of an acronym. # If the next character is uppercase, but the previous one isn't, than we assume it is part of # an acronym and insert it in uppercase form. now, when checking if the previous character is lowercase during our acronym check, # it is possible that islower() will return False because the character before it is an underscore. This means # We have to handle both possibilities. x = sub('\s+', '_', x) # First, we go ahead and replace any consecutive spaces with underscores out = '' for i, char in enumerate(x): if char.isupper(): # Get the next and previous characters for later use next_char = x[i + 1] previous_char = x[i - 1] if not i == 0: # Check if we are not at the first character if previous_char.islower(): out += '_' if next_char.islower() or next_char == '_': out += char.lower() continue elif previous_char == '_': if next_char.islower() or next_char == '_': out += char.lower() continue elif next_char.isupper(): out += char continue else: out += char.lower() continue elif not char == '_' and x[i - 1].isupper() and x[i - 2].isupper(): # This could be a lowercased word following an acronym without any spaces out += '_' # We will insert an underscore to break this character into its own word elif char == '_' and x[i - 1] == '_': continue out += char if out.endswith('_'): out = out[:len(out) - 1] return out
133ff68eb42e5e009fe2eee03d1e52f7d015732c
22,033
async def process_manga(data_list: list[dict], image_path: str) -> str: """对单张图片进行涂白和嵌字的工序 Args: data_list (list[dict]): ocr识别的文字再次封装 image_path (str): 图片下载的路径(同时也作为最后保存覆盖的路径) Returns: str: 保存的路径 """ image = Image.open(image_path).convert("RGB") for i in data_list: image = await draw_white(image, i) if i["is_vertical"]: image = await add_text_for_manga(image, i) else: image = await add_text(image, i) image.save(image_path) return image_path
47574e11067c00d3c5ab4110a7dae8100d450a1f
22,034
def padded_nd_indices(is_valid, shuffle=False, seed=None): """Pads the invalid entries by valid ones and returns the nd_indices. For example, when we have a batch_size = 1 and list_size = 3. Only the first 2 entries are valid. We have: ``` is_valid = [[True, True, False]] nd_indices, mask = padded_nd_indices(is_valid) ``` nd_indices has a shape [1, 3, 2] and mask has a shape [1, 3]. ``` nd_indices = [[[0, 0], [0, 1], [0, 0]]] mask = [[True, True, False]] ``` nd_indices can be used by gather_nd on a Tensor t ``` padded_t = tf.gather_nd(t, nd_indices) ``` and get the following Tensor with first 2 dims are [1, 3]: ``` padded_t = [[t(0, 0), t(0, 1), t(0, 0)]] ``` Args: is_valid: A boolean `Tensor` for entry validity with shape [batch_size, list_size]. shuffle: A boolean that indicates whether valid indices should be shuffled. seed: Random seed for shuffle. Returns: A tuple of Tensors (nd_indices, mask). The first has shape [batch_size, list_size, 2] and it can be used in gather_nd or scatter_nd. The second has the shape of [batch_size, list_size] with value True for valid indices. """ with tf.compat.v1.name_scope(name='nd_indices_with_padding'): is_valid = tf.convert_to_tensor(value=is_valid) list_size = tf.shape(input=is_valid)[1] num_valid_entries = tf.reduce_sum( input_tensor=tf.cast(is_valid, dtype=tf.int32), axis=1) indices, mask = _circular_indices(list_size, num_valid_entries) # Valid indices of the tensor are shuffled and put on the top. # [batch_size, list_size, 2]. shuffled_indices = organize_valid_indices( is_valid, shuffle=shuffle, seed=seed) # Construct indices for gather_nd [batch_size, list_size, 2]. nd_indices = _to_nd_indices(indices) nd_indices = tf.gather_nd(shuffled_indices, nd_indices) return nd_indices, mask
61a57aa95c1d3151900aba3db07bba0eae542dfd
22,035
def part_two(data: str) -> int: """The smallest number leading to an md5 hash with six leading zeros for data.""" return smallest_number_satisfying(data, starts_with_six_zeros)
57195761f388654f9aa099162f337e8177e56111
22,036
def showlatesttag(context, mapping): """List of strings. The global tags on the most recent globally tagged ancestor of this changeset. If no such tags exist, the list consists of the single string "null". """ return showlatesttags(context, mapping, None)
e04b03a9dca54a93f450de676ea05c307f157dab
22,037
def list_filters(): """ List all filters """ filters = [_serialize_filter(imgfilter) for imgfilter in FILTERS.values()] return response_list(filters)
e43da929925872f5eefca5da2659052a1a48d442
22,038
def len_adecuada(palabra, desde, hasta): """ (str, int, int) -> str Valida si la longitud de la palabra está en el rango deseado >>> len_adecuada('hola', 0, 100) 'La longitud de hola, está entre 0 y 100' >>> len_adecuada('hola', 1, 2) 'La longitud de hola, no está entre 1 y 2' :param palabra: :param desde: :param hasta: :return: """ return 'La longitud de {0}, {1}está entre {2} y {3}'\ .format(palabra, "" if desde <= len(palabra) <= hasta else "no ", desde, hasta)
df217a0159cd04c76f5eb12ca42e651ee62fcd99
22,039
import warnings def ECEF_from_ENU(enu, latitude, longitude, altitude): """ Calculate ECEF coordinates from local ENU (east, north, up) coordinates. Args: enu: numpy array, shape (Npts, 3), with local ENU coordinates latitude: latitude of center of ENU coordinates in radians longitude: longitude of center of ENU coordinates in radians Returns: numpy array, shape (Npts, 3), with ECEF x,y,z coordinates """ enu = np.array(enu) if enu.ndim > 1 and enu.shape[1] != 3: if enu.shape[0] == 3: warnings.warn('The expected shape of the ENU array is (Npts, 3). ' 'Support for arrays shaped (3, Npts) will go away in a ' 'future version.', PendingDeprecationWarning) enu_use = enu.T transpose = True else: raise ValueError('The expected shape of the ENU array array is (Npts, 3).') else: enu_use = enu transpose = False if enu.shape == (3, 3): warnings.warn('The enu array in ECEF_from_ENU is being ' 'interpreted as (Npts, 3). Historically this function ' 'has supported (3, Npts) arrays, please verify that ' 'array ordering is as expected.', PendingDeprecationWarning) if enu_use.ndim == 1: enu_use = enu_use[np.newaxis, :] xyz = np.zeros_like(enu_use) xyz[:, 0] = (-np.sin(latitude) * np.cos(longitude) * enu_use[:, 1] - np.sin(longitude) * enu_use[:, 0] + np.cos(latitude) * np.cos(longitude) * enu_use[:, 2]) xyz[:, 1] = (-np.sin(latitude) * np.sin(longitude) * enu_use[:, 1] + np.cos(longitude) * enu_use[:, 0] + np.cos(latitude) * np.sin(longitude) * enu_use[:, 2]) xyz[:, 2] = (np.cos(latitude) * enu_use[:, 1] + np.sin(latitude) * enu_use[:, 2]) xyz_center = XYZ_from_LatLonAlt(latitude, longitude, altitude) xyz[:, 0] = xyz[:, 0] + xyz_center[0] xyz[:, 1] = xyz[:, 1] + xyz_center[1] xyz[:, 2] = xyz[:, 2] + xyz_center[2] if len(enu.shape) == 1: xyz = np.squeeze(xyz) elif transpose: return xyz.T return xyz
13561632731d59e40e4232f8dc2798e8dcd8067f
22,040