content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_motif_proteins(meme_db_file): """ Hash motif_id's to protein names using the MEME DB file """ motif_protein = {} for line in open(meme_db_file): a = line.split() if len(a) > 0 and a[0] == 'MOTIF': if a[2][0] == '(': motif_protein[a[1]] = a[2][1:a[2].find(')')] else: motif_protein[a[1]] = a[2] return motif_protein
88e42b84314593a965e7dd681ded612914e35629
3,650,449
def pixellate_bboxes(im, bboxes, cell_size=(5,6), expand_per=0.0): """Pixellates ROI using Nearest Neighbor inerpolation :param im: (numpy.ndarray) image BGR :param bbox: (BBox) :param cell_size: (int, int) pixellated cell size :returns (numpy.ndarray) BGR image """ if not bboxes: return im elif not type(bboxes) == list: bboxes = list(bboxes) for bbox in bboxes: if expand_per > 0: bbox = bbox.expand_per(expand_per) x1,y1,x2,y2 = bbox.xyxy_int im_roi = im[y1:y2, x1:x2] h,w,c = im_roi.shape # pixellate im_roi = cv.resize(im_roi, cell_size, interpolation=cv.INTER_NEAREST) im_roi = cv.resize(im_roi, (w,h), interpolation=cv.INTER_NEAREST) im[y1:y2, x1:x2] = im_roi return im
8c028714467c350dfd799671b0b18739705393ba
3,650,451
def StepToGeom_MakeHyperbola2d_Convert(*args): """ :param SC: :type SC: Handle_StepGeom_Hyperbola & :param CC: :type CC: Handle_Geom2d_Hyperbola & :rtype: bool """ return _StepToGeom.StepToGeom_MakeHyperbola2d_Convert(*args)
6896b27c10526b3a7f1d5840a63209a6f30d163e
3,650,453
def create_dict(local=None, field=None, **kwargs): """ 以字典的形式从局部变量locals()中获取指定的变量 :param local: dict :param field: str[] 指定需要从local中读取的变量名称 :param kwargs: 需要将变量指定额外名称时使用 :return: dict """ if field is None or local is None: return {} result = {k: v for k, v in local.items() if k in field} result.update(**kwargs) return result
19aceef7f648cc72f29fceba811085cde9d6d587
3,650,454
def sum_list_for_datalist(list): """ DB에 저장할 때, 기준일로부터 과거 데이터가 존재하지 않을 경우에는 0을 return 한다. :param list: :return: float or int """ mysum = 0 for i in range(0, len(list)): if list[i] == 0: return 0 mysum = mysum + list[i] return mysum
bae8966f64c642176d92d31c27df691e0f255d6a
3,650,455
def elu(x, alpha=1.): """Exponential linear unit. Arguments: x {tensor} -- Input float tensor to perform activation. alpha {float} -- A scalar, slope of negative section. Returns: tensor -- Output of exponential linear activation """ return x * (x > 0) + alpha * (tf.math.exp(x) - 1.) * (x < 0)
c40c7aa4a0553dc6b0b6c6dd9f583a701f022e70
3,650,456
def coalesce(*args): """ Compute the first non-null value(s) from the passed arguments in left-to-right order. This is also known as "combine_first" in pandas. Parameters ---------- *args : variable-length value list Examples -------- >>> import ibis >>> expr1 = None >>> expr2 = 4 >>> result = ibis.coalesce(expr1, expr2, 5) Returns ------- coalesced : type of first provided argument """ return ops.Coalesce(args).to_expr()
0fb1af5db75c7ad65f470e348d76d0f289ba5ff2
3,650,457
import json def get_analysis(poem, operations, rhyme_analysis=False, alternative_output=False): """ View for /analysis that perform an analysis of poem running the different operations on it. :param poem: A UTF-8 encoded byte string with the text of the poem :param operations: List of strings with the operations to be performed: - "scansion": Performs scansion analysis - "enjambment": Performs enjambment detection :param rhyme_analysis: Whether or not rhyme analysis is to be performed :return: Response object with a dict with a key for each operation and its analysis or a serialized version of it """ analysis = analyze(poem.decode('utf-8'), operations, rhyme_analysis, alternative_output) mime = connexion.request.headers.get("Accept") # serialization = serialize(analysis, mime) return Response(json.dumps(analysis), mimetype=mime)
1f40376a4ecbbe6453caa909406f595707cc44be
3,650,458
def get_detail_root(): """ Get the detail storage path in the git project """ return get_root() / '.detail'
aa2c30ed839d32e084a11c52f17af621ecfb9011
3,650,459
def solve(strs, m, n): """ 2D 0-1 knapsack """ def count(s): m, n = 0, 0 for c in s: if c == "0": m += 1 elif c == "1": n += 1 return m, n dp = [] for _ in range(m + 1): dp.append([0] * (n + 1)) for s in strs: mi, ni = count(s) for j in range(m, mi - 1, -1): # reverse! for k in range(n, ni - 1, -1): # reverse! dp[j][k] = max(dp[j][k], dp[j - mi][k - ni] + 1) return dp[m][n]
3fb2b16fc9059227c0edce1199269988d18cb908
3,650,460
def bk_category_chosen_category(): """Returns chosen category for creating bk_category object.""" return "Bread"
cbf1c933e5c2b69214e828afaab5babdba61dca8
3,650,461
import math def apply_weights( events, total_num=1214165.85244438, # for chips_1200 nuel_frac=0.00003202064566, # for chips_1200 anuel_frac=0.00000208200747, # for chips_1200 numu_frac=0.00276174709613, # for chips_1200 anumu_frac=0.00006042213136, # for chips_1200 cosmic_frac=0.99714372811940, # for chips_1200 osc_file_name="./inputs/oscillations/matter_osc_cp_zero.root", verbose=False, ): """Calculate and apply the 'weight' column to scale events to predicted numbers. Args: events (pd.DataFrame): events dataframe to append weights to total_num (float): total number of expected events in a year nuel_frac (float): fraction of events from nuel anuel_frac (float): fraction of events from anuel numu_frac (float): fraction of events from numu anumu_frac (float): fraction of events from anumu cosmic_frac (float): fractions of events from cosmics osc_file_name (str): Oscillation data file name verbose (bool): should we print the weight summary? Returns: pd.DataFrame: events dataframe with weights """ def apply_scale_weight(event, w_nuel, w_anuel, w_numu, w_anumu, w_cosmic): """Add the correct weight to each event. Args: event (dict): pandas event(row) dict w_nuel: nuel weight w_anuel: anuel weight w_numu: numu weight w_anumu: anumu weight w_cosmic: cosmic weight """ if ( event[data.MAP_NU_TYPE["name"]] == 0 and event[data.MAP_SIGN_TYPE["name"]] == 0 and event[data.MAP_COSMIC_CAT["name"]] == 0 and event["t_sample_type"] == 0 ): # Beam nuel return w_nuel elif ( event[data.MAP_NU_TYPE["name"]] == 0 and event[data.MAP_SIGN_TYPE["name"]] == 0 and event[data.MAP_COSMIC_CAT["name"]] == 0 and event["t_sample_type"] == 1 ): # Appeared nuel return 1 elif ( event[data.MAP_NU_TYPE["name"]] == 0 and event[data.MAP_SIGN_TYPE["name"]] == 1 and event[data.MAP_COSMIC_CAT["name"]] == 0 and event["t_sample_type"] == 0 ): # Beam anuel return w_anuel elif ( event[data.MAP_NU_TYPE["name"]] == 1 and event[data.MAP_SIGN_TYPE["name"]] == 0 and event[data.MAP_COSMIC_CAT["name"]] == 0 and event["t_sample_type"] == 0 ): # Beam numu return w_numu elif ( event[data.MAP_NU_TYPE["name"]] == 1 and event[data.MAP_SIGN_TYPE["name"]] == 1 and event[data.MAP_COSMIC_CAT["name"]] == 0 and event["t_sample_type"] == 0 ): # Beam anumu return w_anumu elif event[data.MAP_COSMIC_CAT["name"]] == 1 and event["t_sample_type"] == 2: return w_cosmic else: return 0 def apply_osc_weight(event, numu_survival_prob, nuel_osc): """Add the correct weight to each event. Args: event (dict): pandas event(row) dict numu_survival_prob (np.array): numu survival probability array nuel_osc (np.array): numu appearance scaled probability array """ if ( event[data.MAP_NU_TYPE["name"]] == 0 and event[data.MAP_SIGN_TYPE["name"]] == 0 and event[data.MAP_COSMIC_CAT["name"]] == 0 and event["t_sample_type"] == 0 ): # Beam nuel return event["w"] if ( event[data.MAP_NU_TYPE["name"]] == 0 and event[data.MAP_SIGN_TYPE["name"]] == 0 and event[data.MAP_COSMIC_CAT["name"]] == 0 and event["t_sample_type"] == 1 ): # Appeared nuel nu_energy = math.floor(event["t_nu_energy"] / 100) if nu_energy > 99: nu_energy = 99 if nuel_osc[nu_energy] == 0.0: return event["w"] else: return nuel_osc[nu_energy] * event["w"] elif ( event[data.MAP_NU_TYPE["name"]] == 0 and event[data.MAP_SIGN_TYPE["name"]] == 1 and event[data.MAP_COSMIC_CAT["name"]] == 0 and event["t_sample_type"] == 0 ): # beam anuel return event["w"] elif ( event[data.MAP_NU_TYPE["name"]] == 1 and event[data.MAP_SIGN_TYPE["name"]] == 0 and event[data.MAP_COSMIC_CAT["name"]] == 0 and event["t_sample_type"] == 0 ): # Beam numu nu_energy = math.floor(event["t_nu_energy"] / 100) if nu_energy > 99: nu_energy = 99 return numu_survival_prob[nu_energy] * event["w"] elif ( event[data.MAP_NU_TYPE["name"]] == 1 and event[data.MAP_SIGN_TYPE["name"]] == 1 and event[data.MAP_COSMIC_CAT["name"]] == 0 and event["t_sample_type"] == 0 ): # Beam anumu nu_energy = math.floor(event["t_nu_energy"] / 100) if nu_energy > 99: nu_energy = 99 return numu_survival_prob[nu_energy] * event["w"] elif event[data.MAP_COSMIC_CAT["name"]] == 1 and event["t_sample_type"] == 2: return event["w"] else: return 0 np.seterr(divide="ignore", invalid="ignore") tot_nuel = events[ (events[data.MAP_NU_TYPE["name"]] == 0) & (events[data.MAP_SIGN_TYPE["name"]] == 0) & (events[data.MAP_COSMIC_CAT["name"]] == 0) & (events["t_sample_type"] == 0) ].shape[0] tot_anuel = events[ (events[data.MAP_NU_TYPE["name"]] == 0) & (events[data.MAP_SIGN_TYPE["name"]] == 1) & (events[data.MAP_COSMIC_CAT["name"]] == 0) & (events["t_sample_type"] == 0) ].shape[0] tot_numu = events[ (events[data.MAP_NU_TYPE["name"]] == 1) & (events[data.MAP_SIGN_TYPE["name"]] == 0) & (events[data.MAP_COSMIC_CAT["name"]] == 0) & (events["t_sample_type"] == 0) ].shape[0] tot_anumu = events[ (events[data.MAP_NU_TYPE["name"]] == 1) & (events[data.MAP_SIGN_TYPE["name"]] == 1) & (events[data.MAP_COSMIC_CAT["name"]] == 0) & (events["t_sample_type"] == 0) ].shape[0] tot_cosmic = events[events[data.MAP_COSMIC_CAT["name"]] == 1].shape[0] if tot_nuel == 0: w_nuel = 0.0 else: w_nuel = (1.0 / tot_nuel) * (nuel_frac * total_num) if tot_anuel == 0: w_anuel = 0.0 else: w_anuel = (1.0 / tot_anuel) * (anuel_frac * total_num) if tot_numu == 0: w_numu = 0.0 else: w_numu = (1.0 / tot_numu) * (numu_frac * total_num) if tot_anumu == 0: w_anumu = 0.0 else: w_anumu = (1.0 / tot_anumu) * (anumu_frac * total_num) if tot_cosmic == 0: w_cosmic = 0.0 else: w_cosmic = (1.0 / tot_cosmic) * (cosmic_frac * total_num) if verbose: print( "Weights: ({},{:.5f}), ({},{:.5f}), ({},{:.5f}), ({},{:.5f}), ({},{:.5f})".format( tot_nuel, w_nuel, tot_anuel, w_anuel, tot_numu, w_numu, tot_anumu, w_anumu, tot_cosmic, w_cosmic, ) ) events["w"] = events.apply( apply_scale_weight, axis=1, args=(w_nuel, w_anuel, w_numu, w_anumu, w_cosmic), ) # Now we need to apply the oscillation probability weights osc_file = uproot.open(osc_file_name) # We need to scale the nuel events so they simulate the appearance spectra numu_ev = events[ # Get the unoscillated numu beam events (events[data.MAP_NU_TYPE["name"]] == 1) & (events[data.MAP_SIGN_TYPE["name"]] == 0) & (events[data.MAP_COSMIC_CAT["name"]] == 0) & (events["t_sample_type"] == 0) ] nuel_ev = events[ # Get the nuel events generated with the numu flux (events[data.MAP_NU_TYPE["name"]] == 0) & (events[data.MAP_SIGN_TYPE["name"]] == 0) & (events[data.MAP_COSMIC_CAT["name"]] == 0) & (events["t_sample_type"] == 1) ] numu_e_h = np.histogram( numu_ev["t_nu_energy"] / 100, bins=100, range=(0, 100), weights=numu_ev["w"], ) nuel_e_h = np.histogram( nuel_ev["t_nu_energy"] / 100, bins=100, range=(0, 100), weights=nuel_ev["w"], ) nuel_osc = (numu_e_h[0] * osc_file["hist_mue"].values) / nuel_e_h[0] # Apply a weight to every event events["w"] = events.apply( apply_osc_weight, axis=1, args=( osc_file["hist_mumu"].values, nuel_osc, ), ) return events
81f35a51b3d28577204087511f9c405ff56afaaa
3,650,462
def _pipe_line_with_colons(colwidths, colaligns): """Return a horizontal line with optional colons to indicate column's alignment (as in `pipe` output format).""" segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)] return "|" + "|".join(segments) + "|"
76dd17c761e7adb06fe57c5210645a4fe3872374
3,650,463
from datetime import datetime def convert_to_dates(start, end): """ CTD - Convert two strings to datetimes in format 'xx:xx' param start: String - First string to convert param end: String - Second string to convert return: datetime - Two datetimes """ start = datetime.strptime(start, "%H:%M") end = datetime.strptime(end, "%H:%M") if end < start: end += timedelta(days=1) return start, end
53ffb9924d31385aac2eafc66fe7a6159e5a310d
3,650,464
def flipud(m): """ Flips the entries in each column in the up/down direction. Rows are preserved, but appear in a different order than before. Args: m (Tensor): Input array. Returns: Tensor. Raises: TypeError: If the input is not a tensor. Supported Platforms: ``GPU`` ``CPU`` Example: >>> import mindspore.numpy as np >>> A = np.arange(8.0).reshape((2,2,2)) >>> output = np.flipud(A) >>> print(output) [[[4. 5.] [6. 7.]] [[0. 1.] [2. 3.]]] """ return flip(m, 0)
06770689d23ca365fb57a6b9d1e74654b30ddaf2
3,650,465
def get_book_url(tool_name, category): """Get the link to the help documentation of the tool. Args: tool_name (str): The name of the tool. category (str): The category of the tool. Returns: str: The URL to help documentation. """ prefix = "https://jblindsay.github.io/wbt_book/available_tools" url = "{}/{}.html#{}".format(prefix, category, tool_name) return url
daf6c8e0832295914a03b002b548a82e2949612a
3,650,466
import hashlib def game_hash(s): """Generate hash-based identifier for a game account based on the text of the game. """ def int_to_base(n): alphabet = "BCDFGHJKLMNPQRSTVWXYZ" base = len(alphabet) if n < base: return alphabet[n] return int_to_base(n // base) + alphabet[n % base] return int_to_base( int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) )[-7:]
c218a2607390916117921fe0f68fc23fedd51fc3
3,650,467
import math def thumbnail_url(bbox, layers, qgis_project, style=None, internal=True): """Internal function to generate the URL for the thumbnail. :param bbox: The bounding box to use in the format [left,bottom,right,top]. :type bbox: list :param layers: Name of the layer to use. :type layers: basestring :param qgis_project: The path to the QGIS project. :type qgis_project: basestring :param style: Layer style to choose :type style: str :param internal: Flag to switch between public url and internal url. Public url will be served by Django Geonode (proxified). :type internal: bool :return: The WMS URL to fetch the thumbnail. :rtype: basestring """ x_min, y_min, x_max, y_max = bbox # We calculate the margins according to 10 percent. percent = 10 delta_x = (x_max - x_min) / 100 * percent delta_x = math.fabs(delta_x) delta_y = (y_max - y_min) / 100 * percent delta_y = math.fabs(delta_y) # We apply the margins to the extent. margin = [ y_min - delta_y, x_min - delta_x, y_max + delta_y, x_max + delta_x ] # Call the WMS. bbox = ','.join([str(val) for val in margin]) query_string = { 'SERVICE': 'WMS', 'VERSION': '1.1.1', 'REQUEST': 'GetMap', 'BBOX': bbox, 'SRS': 'EPSG:4326', 'WIDTH': '250', 'HEIGHT': '250', 'MAP': qgis_project, 'LAYERS': layers, 'STYLE': style, 'FORMAT': 'image/png', 'TRANSPARENT': 'true', 'DPI': '96', 'MAP_RESOLUTION': '96', 'FORMAT_OPTIONS': 'dpi:96' } qgis_server_url = qgis_server_endpoint(internal) url = Request('GET', qgis_server_url, params=query_string).prepare().url return url
aa405eae72eacd7fd7b842bf569cc1ba3bc19315
3,650,468
def evaluate_prediction_power(df, num_days=1): """" Applies a shift to the model for the number of days given, default to 1, and feed the data to a linear regression model. Evaluate the results using score and print it. """ scores = {} print "Num days: {}".format(range(num_days)) for i in range(num_days): X,y = get_xy(df, num_days=i) regressor = learn(X,y) scores[i] = regressor.score(X,y) return scores
d4e91c8eb656fea8fce16cc16eb1588415c80849
3,650,469
def get_select_file_dialog_dir(): """" Return the directory that should be displayed by default in file dialogs. """ directory = CONF.get('main', 'select_file_dialog_dir', get_home_dir()) directory = directory if osp.exists(directory) else get_home_dir() return directory
9ae485caf5c5162e0b0e4082cee3e99925861717
3,650,470
def join_collections(sql_query: sql.SQLQuery) -> QueryExpression: """Join together multiple collections to return their documents in the response. Params: ------- sql_query: SQLQuery object with information about the query params. Returns: -------- An FQL query expression for joined and filtered documents. """ tables = sql_query.tables order_by = sql_query.order_by from_table = tables[0] to_table = tables[-1] table_with_columns = next(table for table in tables if table.has_columns) if ( order_by is not None and order_by.columns[0].table_name != table_with_columns.name ): raise exceptions.NotSupportedError( "Fauna uses indexes for both joining and ordering of results, " "and we currently can only sort the principal table " "(i.e. the one whose columns are being selected or modified) in the query. " "You can sort on a column from the principal table, query one table at a time, " "or remove the ordering constraint." ) if not any(sql_query.filter_groups): raise exceptions.NotSupportedError( "Joining tables without cross-table filters via the WHERE clause is not supported. " "Selecting columns from multiple tables is not supported either, " "so there's no performance gain from joining tables without cross-table conditions " "for filtering query results." ) assert from_table.left_join_table is None intersection_queries = [] for filter_group in sql_query.filter_groups: intersection_query = q.intersection( *[ _build_intersecting_query(filter_group, None, table, direction) for table, direction in [(from_table, "right"), (to_table, "left")] ] ) intersection_queries.append(intersection_query) return q.union(*intersection_queries)
62ad0cbad609e8218b4ac9d78f893fbcfc90618e
3,650,473
def querylist(query, encoding='utf-8', errors='replace'): """Split the query component into individual `name=value` pairs and return a list of `(name, value)` tuples. """ if query: qsl = [query] else: return [] if isinstance(query, bytes): QUERYSEP = (b';', b'&') EQ = b'=' else: QUERYSEP = ';&' EQ = '=' for sep in QUERYSEP: qsl = [s for qs in qsl for s in qs.split(sep) if s] items = [] for qs in qsl: parts = qs.partition(EQ) name = uridecode_safe_plus(parts[0], encoding, errors) if parts[1]: value = uridecode_safe_plus(parts[2], encoding, errors) else: value = None items.append((name, value)) return items
25f726aa76c3b34a9aebc5e111b28162d0b91e3f
3,650,474
def rotate_space_123(angles): """Returns the direction cosine matrix relating a reference frame B rotated relative to reference frame A through the x, y, then z axes of reference frame A (spaced fixed rotations). Parameters ---------- angles : numpy.array or list or tuple, shape(3,) Three angles (in units of radians) that specify the orientation of a new reference frame with respect to a fixed reference frame. The first angle is a pure rotation about the x-axis, the second about the y-axis, and the third about the z-axis. All rotations are with respect to the initial fixed frame, and they occur in the order x, then y, then z. Returns ------- R : numpy.matrix, shape(3,3) Three dimensional rotation matrix about three different orthogonal axes. Notes ----- R = |c2 * c3 s1 * s2 * c3 - s3 * c1 c1 * s2 * c3 + s3 * s1| |c2 * s3 s1 * s2 * s3 + c3 * c1 c1 * s2 * s3 - c3 * s1| |-s2 s1 * c2 c1 * c2 | where s1, s2, s3 = sine of the first, second and third angles, respectively c1, c2, c3 = cosine of the first, second and third angles, respectively So the unit vector b1 in the B frame can be expressed in the A frame (unit vectors a1, a2, a3) with: b1 = c2 * c3 * a1 + c2 * s3 * a2 - s2 * a3 Thus a vector vb which is expressed in frame B can be expressed in A by pre-multiplying by R: va = R * vb """ cx = np.cos(angles[0]) sx = np.sin(angles[0]) cy = np.cos(angles[1]) sy = np.sin(angles[1]) cz = np.cos(angles[2]) sz = np.sin(angles[2]) Rz = np.mat([[ cz,-sz, 0], [ sz, cz, 0], [ 0, 0, 1]]) Ry = np.mat([[ cy, 0, sy], [ 0, 1, 0], [-sy, 0, cy]]) Rx = np.mat([[ 1, 0, 0], [ 0, cx, -sx], [ 0, sx, cx]]) return Rz * Ry * Rx
f62ac16e63591c4852681479ab9d39227bad3dfc
3,650,475
def get_tac_resource(url): """ Get the requested resource or update resource using Tacoma account :returns: http response with content in xml """ response = None response = TrumbaTac.getURL(url, {"Accept": "application/xml"}) _log_xml_resp("Tacoma", url, response) return response
4d3fce0c7c65a880bf565c79285bcda081d4ef5a
3,650,477
def cosweightlat(darray, lat1, lat2): """Calculate the weighted average for an [:,lat] array over the region lat1 to lat2 """ # flip latitudes if they are decreasing if (darray.lat[0] > darray.lat[darray.lat.size -1]): print("flipping latitudes") darray = darray.sortby('lat') region = darray.sel(lat=slice(lat1, lat2)) weights=np.cos(np.deg2rad(region.lat)) regionw = region.weighted(weights) regionm = regionw.mean("lat") return regionm
87a8722d4d0b7004007fbce966a5ce99a6e51983
3,650,478
def _GetSoftMaxResponse(goal_embedding, scene_spatial): """Max response of an embeddings across a spatial feature map. The goal_embedding is multiplied across the spatial dimensions of the scene_spatial to generate a heatmap. Then the spatial softmax-pooled value of this heatmap is returned. If the goal_embedding and scene_spatial are aligned to the same space, then _GetSoftMaxResponse returns larger values if the object is present in the scene, and smaller values if the object is not. Args: goal_embedding: A batch x D tensor embedding of the goal image. scene_spatial: A batch x H x W x D spatial feature map tensor. Returns: max_heat: A tensor of length batch. max_soft: The max value of the softmax (ranges between 0 and 1.0) """ batch, dim = goal_embedding.shape reshaped_query = tf.reshape(goal_embedding, (int(batch), 1, 1, int(dim))) scene_heatmap = tf.reduce_sum(tf.multiply(scene_spatial, reshaped_query), axis=3, keep_dims=True) scene_heatmap_flat = tf.reshape(scene_heatmap, (batch, -1)) max_heat = tf.reduce_max(scene_heatmap_flat, axis=1) scene_softmax = tf.nn.softmax(scene_heatmap_flat, axis=1) max_soft = tf.reduce_max(scene_softmax, axis=1) return max_heat, max_soft
547e61b403d99f2c0a4b5a0f78c03f7051a10d5c
3,650,479
def summarize_star(star): """return one line summary of star""" if star.find('name').text[-2] == ' ': name = star.find('name').text[-1] else: name = ' ' mass = format_star_mass_str(star) radius = format_star_radius_str(star) temp = format_body_temp_str(star) metallicity = format_star_metal_str(star) return u'{} {} {:>8} {:>8} {:>8} {:>8} {:>8} {}'.format(name, format_spectral_name(star), mass, radius, '', '', temp, metallicity)
f9860d742a646637e4b725e39151ed8f5e8adf0f
3,650,480
def to_unicode(text, encoding='utf8', errors='strict'): """Convert a string (bytestring in `encoding` or unicode), to unicode.""" if isinstance(text, unicode): return text return unicode(text, encoding, errors=errors)
1acb85930349832259e9309fed3669fbd1114cad
3,650,481
def parse_pipfile(): """Reads package requirements from Pipfile.""" cfg = ConfigParser() cfg.read("Pipfile") dev_packages = [p.strip('"') for p in cfg["dev-packages"]] relevant_packages = [ p.strip('"') for p in cfg["packages"] if "nested-dataclasses" not in p ] return relevant_packages, dev_packages
72f559193b77989afc3aa200b6806ef051280673
3,650,482
from typing import Mapping def print_dist(d, height=12, pch="o", show_number=False, title=None): """ Printing a figure of given distribution Parameters ---------- d: dict, list a dictionary or a list, contains pairs of: "key" -> "count_value" height: int number of maximum lines for the graph pch : str shape of the bars in the plot, e.g 'o' Return ------ str """ LABEL_COLOR = ['cyan', 'yellow', 'blue', 'magenta', 'green'] MAXIMUM_YLABEL = 4 try: if isinstance(d, Mapping): d = d.items() orig_d = [(str(name), int(count)) for name, count in d] d = [(str(name)[::-1].replace('-', '|').replace('_', '|'), count) for name, count in d] labels = [[c for c in name] for name, count in d] max_labels = max(len(name) for name, count in d) max_count = max(count for name, count in d) min_count = min(count for name, count in d) except Exception as e: raise ValueError('`d` must be distribution dictionary contains pair of: ' 'label_name -> disitribution_count, error: "%s"' % str(e)) # ====== create figure ====== # # draw height, 1 line for minimum bar, 1 line for padding the label, # then the labels nb_lines = int(height) + 1 + 1 + max_labels unit = (max_count - min_count) / height fig = "" # ====== add unit and total ====== # fig += ctext("Unit: ", 'red') + \ '10^%d' % max(len(str(max_count)) - MAXIMUM_YLABEL, 0) + ' ' fig += ctext("Total: ", 'red') + \ str(sum(count for name, count in d)) + '\n' # ====== add the figure ====== # for line in range(nb_lines): value = max_count - unit * line # draw the y_label if line % 2 == 0 and line <= int(height): # value fig += ctext( ('%' + str(MAXIMUM_YLABEL) + 's') % str(int(value))[:MAXIMUM_YLABEL], color='red') else: # blank fig += ' ' * MAXIMUM_YLABEL fig += '|' if line <= int(height) else ' ' # draw default line if line == int(height): fig += ''.join([ctext(pch + ' ', color=LABEL_COLOR[i % len(LABEL_COLOR)]) for i in range(len(d))]) # draw seperator for the label elif line == int(height) + 1: fig += '-' * (len(d) * 2) # draw the labels elif line > int(height) + 1: for i, lab in enumerate(labels): fig += ctext(' ' if len(lab) == 0 else lab.pop(), LABEL_COLOR[i % len(LABEL_COLOR)]) + ' ' # draw the histogram else: for i, (name, count) in enumerate(d): fig += ctext(pch if count - value >= 0 else ' ', LABEL_COLOR[i % len(LABEL_COLOR)]) + ' ' # new line fig += '\n' # ====== add actual number of necessary ====== # maximum_fig_length = MAXIMUM_YLABEL + 1 + len(orig_d) * 2 if show_number: line_length = 0 name_fmt = '%' + str(max_labels) + 's' for name, count in orig_d: n = len(name) + len(str(count)) + 4 text = ctext(name_fmt % name, 'red') + ': %d ' % count if line_length + n >= maximum_fig_length: fig += '\n' line_length = n else: line_length += n fig += text # ====== add title ====== # if title is not None: title = ctext('"%s"' % str(title), 'red') padding = ' ' n = (maximum_fig_length - len(title) // 2) // 2 - len(padding) * 3 fig = '=' * n + padding + title + padding + '=' * n + '\n' + fig return fig[:-1]
d6636edbca5b16de8984c36bf9533ae963e21e0e
3,650,483
def count_primes(num): """ Write a function that returns the number of prime numbers that exist up to and including a given number :param num: int :return: int """ count = 0 lower = int(input()) upper = int(input()) for num in range(lower, upper + 1): if num > 1: for i in range(2, num): if (num % i) == 0: break else: count += 1 return count
7a544265f3a7eca9118b0647bc8926c655cdb8ec
3,650,485
def run_experiment(config): """ Run the experiment. Args: config: The configuration dictionary. Returns: The experiment result. """ return None
b12a8a5cbdb03d60ca618826f20c9a731a39fd2a
3,650,486
def read_notification(notification_id): """Marks a notification as read.""" notification = Notification.query.get_or_404(notification_id) if notification.recipient_email != current_user.email: abort(401) notification.is_read = True db.session.add(notification) db.session.commit() return NO_PAYLOAD
b2d4066be7b202d680415831fa6d3aa60e2896dc
3,650,487
def grayscale_blur(image): """ Convert image to gray and blur it. """ image_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) image_gray = cv.blur(image_gray, (3, 3)) return image_gray
4e8bf0479c653a3ed073481ad71e2530527ec4a3
3,650,488
import hmac def calculate_stream_hmac(stream, hmac_key): """Calculate a stream's HMAC code with the given key.""" stream.seek(0) hash_hmac = hmac.new(bytearray(hmac_key, "utf-8"), digestmod=HASH_FUNCTION) while True: buf = stream.read(4096) if not buf: break hash_hmac.update(buf) return hash_hmac.hexdigest()
35da77cc708b4dc8a256fbfcc012da8c68868c8c
3,650,489
import re def convert_tac(ThreeAddressCode): """Reads three adress code generated from parser and converts to TAC for codegen; generates the three_addr_code along with leaders; populates generate symbol table as per three_addr_code""" for i in range(ThreeAddressCode.length()): three_addr_instr = ThreeAddressCode.code[i] three_addr_instr = [str(i+1)] + three_addr_instr three_addr_code.add_line(three_addr_instr) if len(three_addr_instr) != 5: print("Incorrect size for the following instruction: ") print(three_addr_instr) return -1 if three_addr_instr[0] == '': print("Line number not given in the following instruction: ") print(three_addr_instr) return -1 if re.search(r'\D', three_addr_instr[0]) != None: print("Invalid line number given in the following instruction: ") print(three_addr_instr) return -1 leader_generating_if_instr = [] leader_generating_if_instr += ['ifgotoeq'] leader_generating_if_instr += ['ifgotoneq'] leader_generating_if_instr += ['ifgotolt'] leader_generating_if_instr += ['ifgotolteq'] leader_generating_if_instr += ['ifgotogt'] leader_generating_if_instr += ['ifgotogteq'] if three_addr_instr[1] in leader_generating_if_instr: three_addr_code.add_leader(three_addr_code.length()) leader_generating_other_instr = ['label'] if three_addr_instr[1] in leader_generating_if_instr: three_addr_code.add_leader(three_addr_code.length()-1) leader_generating_other_instr = [] leader_generating_other_instr += ['goto'] leader_generating_other_instr += ['break'] leader_generating_other_instr += ['continue'] if three_addr_instr[1] in leader_generating_other_instr: three_addr_code.add_leader(three_addr_code.length()) three_addr_code.leaders = sorted(three_addr_code.leaders, key=int) return three_addr_code
4a9408cfbd6b6f79a618b7eb89aa55e6aab25689
3,650,491
def is_english_score(bigrams, word): """Calculate the score of a word.""" prob = 1 for w1, w2 in zip("!" + word, word + "!"): bigram = f"{w1}{w2}" if bigram in bigrams: prob *= bigrams[bigram] # / float(bigrams['total'] + 1) else: print("%s not found" % bigram) prob *= 1 # / float(bigrams['total'] + 1) return prob
834e28a32806d0599f5df97d978bc6b9c1a51da7
3,650,492
def cat(fname, fallback=_DEFAULT, binary=True): """Return file content. fallback: the value returned in case the file does not exist or cannot be read binary: whether to open the file in binary or text mode. """ try: with open_binary(fname) if binary else open_text(fname) as f: return f.read().strip() except IOError: if fallback != _DEFAULT: return fallback raise
b3f645d79607f1ed986fe76aa20689d0860ef9ca
3,650,493
import codecs def createStringObject(string): """ Given a string (either a ``str`` or ``unicode``), create a :class:`ByteStringObject<ByteStringObject>` or a :class:`TextStringObject<TextStringObject>` to represent the string. """ if isinstance(string, string_type): return TextStringObject(string) elif isinstance(string, bytes_type): try: if string.startswith(codecs.BOM_UTF16_BE): retval = TextStringObject(string.decode("utf-16")) retval.autodetect_utf16 = True return retval else: # This is probably a big performance hit here, but we need to # convert string objects into the text/unicode-aware version if # possible... and the only way to check if that's possible is # to try. Some strings are strings, some are just byte arrays. retval = TextStringObject(decodePdfDocEncoding(string)) retval.autodetect_pdfdocencoding = True return retval except UnicodeDecodeError: return ByteStringObject(string) else: raise TypeError("createStringObject() should have str or unicode arg")
07c0ca42faa2b68dc347e1edad7f70a07930d891
3,650,495
import win32com.client def _get_windows_network_adapters(): """Get the list of windows network adapters.""" wbem_locator = win32com.client.Dispatch('WbemScripting.SWbemLocator') wbem_service = wbem_locator.ConnectServer('.', 'root\cimv2') wbem_network_adapters = wbem_service.InstancesOf('Win32_NetworkAdapter') network_adapters = [] for adapter in wbem_network_adapters: if (adapter.NetConnectionStatus == 2 or adapter.NetConnectionStatus == 7): adapter_name = adapter.NetConnectionID mac_address = adapter.MacAddress.lower() config = adapter.associators_( 'Win32_NetworkAdapterSetting', 'Win32_NetworkAdapterConfiguration')[0] ip_address = '' subnet_mask = '' if config.IPEnabled: ip_address = config.IPAddress[0] subnet_mask = config.IPSubnet[0] #config.DefaultIPGateway[0] network_adapters.append({'name': adapter_name, 'mac-address': mac_address, 'ip-address': ip_address, 'subnet-mask': subnet_mask}) return network_adapters
796c25089411633d11b28fdd9c23d900db7005f0
3,650,496
def project(dim, states): """Qiskit wrapper of projection operator. """ ket, bra = states if ket in range(dim) and bra in range(dim): return st.basis(dim, ket) * st.basis(dim, bra).dag() else: raise Exception('States are specified on the outside of Hilbert space %s' % states)
351a190ec183264af58de15944efb3af255c5b03
3,650,497
def check_service_status(ssh_conn_obj, service_name, status="running", device='server'): """ Author: Chaitanya Vella ([email protected]) Function to check the service status :param ssh_conn_obj: :param service_name: :param status: :return: """ st.log("##### Checking {} status for {} service ######".format(status, service_name)) command = "status {}".format(service_name) result = conn_obj.execute_command(ssh_conn_obj, command) if device == 'server' else st.config(ssh_conn_obj, command) result = utils_obj.remove_last_line_from_string(result) if "command not found" not in result: match = "start/running" if status == "running" else "stop/waiting" if result.find("{}".format(match)) > 1: return True else: command = "service --status-all | grep {}".format(service_name) result = conn_obj.execute_command(ssh_conn_obj, command) result = utils_obj.remove_last_line_from_string(result) operator = "+" if status == "running" else "-" return True if operator in result and service_name in result else False return False
d8f2a9be7a784ad874d218601fdc043babdafe6e
3,650,498
def _persist_block(block_node, block_map): """produce persistent binary data for a single block Children block are assumed to be already persisted and present in block_map. """ data = tuple(_to_value(v, block_map) for v in block_node) return S_BLOCK.pack(*data)
2fb97099135fe931d1d387ed616b152ed7c28b34
3,650,499
import logging def get_sagemaker_feature_group(feature_group_name: str): """Used to check if there is an existing feature group with a given feature_group_name.""" try: return sagemaker_client().describe_feature_group(FeatureGroupName=feature_group_name) except botocore.exceptions.ClientError as error: logging.error( f"SageMaker could not find a feature group with the name {feature_group_name}. Error {error}" ) return None
1e94e894b1686a6833df51f1006f3f845a9e63b4
3,650,501
def check_system(command, message, exit=0, user=None, stdin=None, shell=False, timeout=None, timeout_signal='TERM'): """Runs the command and checks its exit status code. Handles all of the common steps associated with running a system command: runs the command, checks its exit status code against the expected result, and raises an exception if there is an obvious problem. Returns a tuple of the standard output, standard error, and the failure message generated by diagnose(). See the system() function for more details about the command-line options. """ status, stdout, stderr = system(command, user, stdin, shell=shell, timeout=timeout, timeout_signal=timeout_signal, quiet=False) fail = diagnose(message, command, status, stdout, stderr) if timeout and status == -1: raise osgunittest.TimeoutException(fail) else: assert status == exit, fail return stdout, stderr, fail
31d83941d5198d0786a6a67a4b1bcd320c26218a
3,650,502
import json def getJson(file, filters={}): """Given a specific JSON file (string) and a set of filters (dictionary key-values pairs), will return a JSON-formatted tree of the matching data entries from that file (starting as a null-key list of objects). """ with open(file, 'r') as f: j = json.loads(f.read()) all = j[''] dicts = basic.filter(all, filters) if len(dicts) > 0: return formatJson(dicts) else: raise Exception('No matching data entries found')
7b6832eae476eef48584690d993c2dee301bb565
3,650,504
def underline_node_formatter(nodetext, optionstext, caller=None): """ Draws a node with underlines '_____' around it. """ nodetext_width_max = max(m_len(line) for line in nodetext.split("\n")) options_width_max = max(m_len(line) for line in optionstext.split("\n")) total_width = max(options_width_max, nodetext_width_max) separator1 = "_" * total_width + "\n\n" if nodetext_width_max else "" separator2 = "\n" + "_" * total_width + "\n\n" if total_width else "" return separator1 + "|n" + nodetext + "|n" + separator2 + "|n" + optionstext
598e3aaf875b2539b93ec03d8665cc8011872015
3,650,505
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes): """ Create the layers for a fully convolutional network. Build skip-layers using the vgg layers. :param vgg_layer3_out: TF Tensor for VGG Layer 3 output :param vgg_layer4_out: TF Tensor for VGG Layer 4 output :param vgg_layer7_out: TF Tensor for VGG Layer 7 output :param num_classes: Number of classes to classify :return: The Tensor for the last layer of output """ fcn8 = tf.layers.conv2d(vgg_layer7_out, filters=num_classes, kernel_size=1, padding="SAME", name='fcn8') fcn9 = tf.layers.conv2d_transpose(fcn8, filters=vgg_layer4_out.get_shape().as_list()[-1], kernel_size=4, strides=(2,2), padding="SAME", name='fcn9') fcn9_skip = tf.add(fcn9, vgg_layer4_out, name='fcn9_plus_layer4') fcn10 = tf.layers.conv2d_transpose(fcn9_skip, filters=vgg_layer3_out.get_shape().as_list()[-1], kernel_size=4, strides=(2,2), padding="SAME", name='fcn10') fcn10_skip = tf.add(fcn10, vgg_layer3_out, name='fcn10_plus_layer3') fcn11 = tf.layers.conv2d_transpose(fcn10_skip, filters=num_classes, kernel_size=16, strides=(8,8), padding="SAME", name='fcn11') return fcn11
cf907d29555fbb7e9a11a1b2f6981637a977bf48
3,650,507
def determine_issues(project): """ Get the list of issues of a project. :rtype: list """ issues = project["Issue"] if not isinstance(issues, list): return [issues] return issues
7b8b670e4ad5a7ae49f3541c87026dd603406c9f
3,650,508
def get_media_after_date(mountpoint: str, date:str): """ Date format in EXIF yyyy:mm:dd, look for EXIF:CreateDate """ metadata = get_media_list(mountpoint) filtered_meta = list() for m in metadata: if 'File:FileModifyDate' in m: if is_after(m['File:FileModifyDate'].split(' ')[0],date): filtered_meta.append(m) return filtered_meta
950c937540bd44cd1f577f1ee763262dad51d353
3,650,510
import random def run_normal_game(): """Run a complex game, like the real thing.""" stage = create_stage() contestant_first_pick = random.randrange(3) montys_pick_algorithm(stage, contestant_first_pick) contestant_second_pick = contestants_second_pick_algorithm(stage, contestant_first_pick) wins = contestant_wins(stage, contestant_second_pick) #print (stage, contestant_first_pick, contestant_second_pick, wins) return wins
04f3e8805b3b7d7d9e9f631eee635c4b9af75fdf
3,650,511
def reformat_wolfram_entries(titles, entries): """Reformat Wolfram entries.""" output_list = [] for title, entry in zip(titles, entries): try: if ' |' in entry: entry = '\n\t{0}'.format(entry.replace(' |', ':') .replace('\n', '\n\t')) if title == 'Result': new_entry = entry.encode('utf-8') if PY2 else entry else: raw_entry = (title + ': ' + entry) new_entry = raw_entry.encode('utf-8') if PY2 else raw_entry output_list.append(new_entry) except (AttributeError, UnicodeEncodeError): pass return output_list
ba236671187ba4ab80fb013b3ee40c6ae58cc1c8
3,650,512
import six def GetAndValidateRowId(row_dict): """Returns the integer ID for a new Row. This method is also responsible for validating the input fields related to making the new row ID. Args: row_dict: A dictionary obtained from the input JSON. Returns: An integer row ID. Raises: BadRequestError: The input wasn't formatted properly. """ if 'revision' not in row_dict: raise BadRequestError('Required field "revision" missing.') try: return int(row_dict['revision']) except (ValueError, TypeError) as e: six.raise_from( BadRequestError('Bad value for "revision", should be numerical.'), e)
be9f096ddb8bba036d1fa06cdd3565296a949762
3,650,514
def generate_test_cases(ukernel, channel_tile, pixel_tile, isa): """Generates all tests cases for a BILINEAR micro-kernel. Args: ukernel: C name of the micro-kernel function. channel_tile: Number of channels processed per one iteration of the inner loop of the micro-kernel. pixel_tile: Number of pixels processed per one iteration of the outer loop of the micro-kernel. isa: instruction set required to run the micro-kernel. Generated unit test will skip execution if the host processor doesn't support this ISA. Returns: Code for the test case. """ _, test_name = ukernel.split("_", 1) _, datatype, ukernel_type, _ = ukernel.split("_", 3) test_args = [ukernel] return xngen.preprocess(IBILINEAR_TEST_TEMPLATE, { "TEST_NAME": test_name.upper().replace("UKERNEL_", ""), "TEST_FUNC": ukernel, "UKERNEL_TYPE": ukernel_type.upper(), "DATATYPE": datatype, "CHANNEL_TILE": channel_tile, "PIXEL_TILE": pixel_tile, "ISA_CHECK": xnncommon.generate_isa_check_macro(isa), "next_prime": next_prime, })
4fe3243c3f8d2ab3ce7861b46aa96ee79ef1014a
3,650,515
def get_file_range(ase, offsets, timeout=None): # type: (blobxfer.models.azure.StorageEntity, # blobxfer.models.download.Offsets, int) -> bytes """Retrieve file range :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity :param blobxfer.models.download.Offsets offsets: download offsets :param int timeout: timeout :rtype: bytes :return: content for file range """ dir, fpath, _ = parse_file_path(ase.name) return ase.client._get_file( share_name=ase.container, directory_name=dir, file_name=fpath, start_range=offsets.range_start, end_range=offsets.range_end, validate_content=False, # HTTPS takes care of integrity during xfer timeout=timeout, snapshot=ase.snapshot, ).content
be4f2f06c64ee457152fe582128b36db1a1baae4
3,650,516
def parse_cli_args(): """Return parsed command-line arguments.""" parser = ArgumentParser(description='parse and summarize a GLSL file') parser.add_argument('path') shader_type_names = [member.name for member in ShaderType] parser.add_argument('shader_type', nargs='?', choices=shader_type_names, default=ShaderType.Fragment.name) return parser.parse_args()
a044e20ea91e05c09cccc118641dac68ce748142
3,650,517
def genus_species_name(genus, species): """Return name, genus with species if present. Copes with species being None (or empty string). """ # This is a simple function, centralising it for consistency assert genus and genus == genus.strip(), repr(genus) if species: assert species == species.strip(), repr(species) return f"{genus} {species}" else: return genus
1fed57c5c87dfd9362262a69429830c7103b7fca
3,650,518
def _native_set_to_python_list(typ, payload, c): """ Create a Python list from a native set's items. """ nitems = payload.used listobj = c.pyapi.list_new(nitems) ok = cgutils.is_not_null(c.builder, listobj) with c.builder.if_then(ok, likely=True): index = cgutils.alloca_once_value(c.builder, ir.Constant(nitems.type, 0)) with payload._iterate() as loop: i = c.builder.load(index) item = loop.entry.key itemobj = c.box(typ.dtype, item) c.pyapi.list_setitem(listobj, i, itemobj) i = c.builder.add(i, ir.Constant(i.type, 1)) c.builder.store(i, index) return ok, listobj
808a10d85cc19c0b1c31b3e01afc9bbb402e1e90
3,650,519
def var_to_str(var): """Returns a string representation of the variable of a Jax expression.""" if isinstance(var, jax.core.Literal): return str(var) elif isinstance(var, jax.core.UnitVar): return "*" elif not isinstance(var, jax.core.Var): raise ValueError(f"Idk what to do with this {type(var)}?") c = int(var.count) if c == -1: return "_" str_rep = "" while c > 25: str_rep += chr(c % 26 + ord("a")) c = c // 26 str_rep += chr(c + ord("a")) return str_rep[::-1]
820645057359f8704cbd28d2545b9bb3c6e2f4d3
3,650,521
def lal_binary_neutron_star( frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1, phi_12, a_2, tilt_2, phi_jl, theta_jn, phase, lambda_1, lambda_2, **kwargs): """ A Binary Neutron Star waveform model using lalsimulation Parameters ---------- frequency_array: array_like The frequencies at which we want to calculate the strain mass_1: float The mass of the heavier object in solar masses mass_2: float The mass of the lighter object in solar masses luminosity_distance: float The luminosity distance in megaparsec a_1: float Dimensionless primary spin magnitude tilt_1: float Primary tilt angle phi_12: float Azimuthal angle between the two component spins a_2: float Dimensionless secondary spin magnitude tilt_2: float Secondary tilt angle phi_jl: float Azimuthal angle between the total binary angular momentum and the orbital angular momentum theta_jn: float Orbital inclination phase: float The phase at coalescence lambda_1: float Dimensionless tidal deformability of mass_1 lambda_2: float Dimensionless tidal deformability of mass_2 kwargs: dict Optional keyword arguments Supported arguments: waveform_approximant reference_frequency minimum_frequency maximum_frequency catch_waveform_errors pn_spin_order pn_tidal_order pn_phase_order pn_amplitude_order mode_array: Activate a specific mode array and evaluate the model using those modes only. e.g. waveform_arguments = dict(waveform_approximant='IMRPhenomHM', modearray=[[2,2],[2,-2]) returns the 22 and 2-2 modes only of IMRPhenomHM. You can only specify modes that are included in that particular model. e.g. waveform_arguments = dict(waveform_approximant='IMRPhenomHM', modearray=[[2,2],[2,-2],[5,5],[5,-5]]) is not allowed because the 55 modes are not included in this model. Be aware that some models only take positive modes and return the positive and the negative mode together, while others need to call both. e.g. waveform_arguments = dict(waveform_approximant='IMRPhenomHM', modearray=[[2,2],[4,-4]]) returns the 22 a\nd 2-2 of IMRPhenomHM. However, waveform_arguments = dict(waveform_approximant='IMRPhenomXHM', modearray=[[2,2],[4,-4]]) returns the 22 and 4-4 of IMRPhenomXHM. Returns ------- dict: A dictionary with the plus and cross polarisation strain modes """ waveform_kwargs = dict( waveform_approximant='IMRPhenomPv2_NRTidal', reference_frequency=50.0, minimum_frequency=20.0, maximum_frequency=frequency_array[-1], catch_waveform_errors=False, pn_spin_order=-1, pn_tidal_order=-1, pn_phase_order=-1, pn_amplitude_order=0) waveform_kwargs.update(kwargs) return _base_lal_cbc_fd_waveform( frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2, luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase, a_1=a_1, a_2=a_2, tilt_1=tilt_1, tilt_2=tilt_2, phi_12=phi_12, phi_jl=phi_jl, lambda_1=lambda_1, lambda_2=lambda_2, **waveform_kwargs)
4641e65e9f422bb9be9a90f2f849ab58f1cdea51
3,650,522
def handledisc(tree): """Binarize discontinuous substitution sites. >>> print(handledisc(Tree('(S (X 0 2 4))'))) (S (X 0 (X|<> 2 (X|<> 4)))) >>> print(handledisc(Tree('(S (X 0 2))'))) (S (X 0 (X|<> 2))) """ for a in tree.postorder(lambda n: len(n) > 1 and isinstance(n[0], int)): binarize(a, rightmostunary=True, threshold=1) return tree
1e164d0174a4b31462369a10e56f9d69d936d18b
3,650,523
def check_bounds(shape: Shape, point: Coord) -> bool: """Return ``True`` if ``point`` is valid index in ``shape``. Args: shape: Shape of two-dimensional array. point: Two-dimensional coordinate. Return: True if ``point`` is within ``shape`` else ``False``. """ return (0 <= point[0] < shape[0]) and (0 <= point[1] < shape[1])
88ab89fddf3f85fc38f3404ed90f384f50337905
3,650,524
def logout(home=None): """ Logs out current session and redirects to home :param str home: URL to redirect to after logout success """ flask_login.logout_user() return redirect(request.args.get('redirect', home or url_for('public.home')))
bded682e6807532aa6382ea0855ee4d335da550f
3,650,525
def N(u,i,p,knots): """ u: point for which a spline should be evaluated i: spline knot p: spline order knots: all knots Evaluates the spline basis of order p defined by knots at knot i and point u. """ if p == 0: if knots[int(i)] < u and u <=knots[int(i+1)]: return 1.0 else: return 0.0 else: try: k = ((float((u-knots[int(i)])) / float((knots[int(i+p)] - knots[int(i)]) )) * N(u,i,p-1,knots)) except ZeroDivisionError: k = 0.0 try: q = ((float((knots[int(i+p+1)] - u)) / float((knots[int(i+p+1)] - knots[int(i+1)]))) * N(u,i+1,p-1,knots)) except ZeroDivisionError: q = 0.0 return float(k + q)
0cd0756d558ee99b0ed32350860bc27f023fa88b
3,650,526
def check_merge(s, idn) -> bool: """ Check whether a set of nodes is valid to merge """ found = False in_size = None out_size = None stride = None act = None nds = [idn[i] for i in state2iset(s)] if len(nds) == 1: return True for nd in nds: if not isinstance(nd, Conv): # current only merge conv return False if not found: in_size = nd.input_shape[1], nd.input_shape[2] out_size = nd.output_shape[1], nd.output_shape[2] stride = nd.stride[0], nd.stride[1] act = nd.act found = True else: # all input resolution, output resolution and stride must be the same if in_size[0] != nd.input_shape[1] or in_size[1] != nd.input_shape[2]: return False if out_size[0] != nd.output_shape[1] or out_size[1] != nd.output_shape[2]: return False if stride[0] != nd.stride[0] or stride[1] != nd.stride[1]: return False if nd.groups != 1 or act != nd.act: return False if len(nd.inputs) > 1 or len(nd.inputs[0]) > 1 or not (nd.inputs[0][0] == nds[0].inputs[0][0]): return False return True
d0edfee6150d7814c926fb59d413b61a989c9808
3,650,528
def typeMap(name, package=None): """ typeMap(name: str) -> Module Convert from C/C++ types into VisTrails Module type """ if package is None: package = identifier if isinstance(name, tuple): return [typeMap(x, package) for x in name] if name in typeMapDict: return typeMapDict[name] else: registry = get_module_registry() if not registry.has_descriptor_with_name(package, name): return None else: return registry.get_descriptor_by_name(package, name).module
6f8ed31cfe1eb88201d0131d43c0fb0da2295405
3,650,529
def _rm_from_diclist(diclist, key_to_check, value_to_check): """Function that removes an entry form a list of dictionaries if a key of an entry matches a given value. If no value of the key_to_check matches the value_to_check for all of the entries in the diclist, the same diclist will be returned that was passed to the function. Parameters: diclist - A list of dictionaries. key_to_check - A key of a dictionary whose value should be checked to determine if a dictionary should be removed from the diclist. value_to_check - The value that should be compared to the value of the key_to_check to determine if a dictionary should be removed from the diclist. Returns the diclist passed to the function with an entry removed if its value of the key_to_check matched the value_to_check. """ for i in xrange(len(diclist)): if diclist[i][key_to_check] == value_to_check: diclist.pop(i) break return diclist
89806ec5029923709bd44d794d75a84f440c5aa7
3,650,530
import scipy def odr_linear(x, y, intercept=None, beta0=None): """ Performs orthogonal linear regression on x, y data. Parameters ---------- x: array_like x-data, 1D array. Must be the same lengths as `y`. y: array_like y-data, 1D array. Must be the same lengths as `x`. intercept: float, default None If not None, fixes the intercept. beta0: array_like, shape (2,) Guess at the slope and intercept, respectively. Returns ------- output: ndarray, shape (2,) Array containing slope and intercept of ODR line. """ def linear_fun(p, x): return p[0] * x + p[1] def linear_fun_fixed(p, x): return p[0] * x + intercept # Set the model to be used for the ODR fitting if intercept is None: model = scipy.odr.Model(linear_fun) if beta0 is None: beta0 = (0.0, 1.0) else: model = scipy.odr.Model(linear_fun_fixed) if beta0 is None: beta0 = (1.0,) # Make a Data instance data = scipy.odr.Data(x, y) # Instantiate ODR odr = scipy.odr.ODR(data, model, beta0=beta0) # Perform ODR fit try: result = odr.run() except scipy.odr.odr_error: raise scipy.odr.odr_error('ORD failed.') return result.beta
51fc464cb60e5b05645907d5ed3ec40d1b9cdb54
3,650,531
def get_centroid_world_coordinates(geo_trans, raster_x_size, raster_y_size, x_pixel_size, y_pixel_size): """Return the raster centroid in world coordinates :param geo_trans: geo transformation :type geo_trans: tuple with six values :param raster_x_size: number of columns :type raster_x_size: int :param raster_y_size: number of rows :param x_pixel_size: pixel size in x direction :type: x_pixel_size: float :param y_pixel_size: pixel size in y direction :type y_pixel_size: float :return: """ x0, y0 = pixel_to_world(geo_trans, 0, 0) x1, y1 = pixel_to_world(geo_trans, raster_x_size-1, raster_y_size-1) x1 += x_pixel_size y1 -= y_pixel_size return (x0 + x1) * 0.5, (y0 + y1) * 0.5
e0dd1d57cb020a85d9784f2c9bf22b4b8035ffae
3,650,532
import json def save_change_item(request): """ 保存改变项 算法:在rquest_list中查找对应的uuid,找到后将数据更新其中 :param request: :return: """ if request.method != 'POST': return HttpResponse("数据异常.") str_data = request.POST.get('jsons') logger.info("change_item: " + str_data) jsons = json.loads(str_data) id = jsons['id'] name = jsons['name'] url = jsons['url'] raw_mode_data = jsons['rawModeData'] method = jsons['method'] logger.info("打印send: {}".format(url)) request_list = JsonConf.json_data['requests'] for item in request_list: if id == item["id"]: item["method"] = method item["rawModeData"] = raw_mode_data item["name"] = name item["url"] = url break JsonConf.store(settings.INIT_DATA) return HttpResponse("保存成功.")
434bd1e77690cd60dd163a39fd1cb90dd0cb4952
3,650,533
import pytdx.hq import pytdx.util.best_ip def get_lastest_stocklist(): """ 使用pytdx从网络获取最新券商列表 :return:DF格式,股票清单 """ print(f"优选通达信行情服务器 也可直接更改为优选好的 {{'ip': '123.125.108.24', 'port': 7709}}") # ipinfo = pytdx.util.best_ip.select_best_ip() api = pytdx.hq.TdxHq_API() # with api.connect(ipinfo['ip'], ipinfo['port']): with api.connect('123.125.108.24', 7709): data = pd.concat([pd.concat( [api.to_df(api.get_security_list(j, i * 1000)).assign(sse='sz' if j == 0 else 'sh') for i in range(int(api.get_security_count(j) / 1000) + 1)], axis=0) for j in range(2)], axis=0) data = data.reindex(columns=['sse', 'code', 'name', 'pre_close', 'volunit', 'decimal_point']) data.sort_values(by=['sse', 'code'], ascending=True, inplace=True) data.reset_index(drop=True, inplace=True) # 这个方法不行 字符串不能运算大于小于,转成int更麻烦 # df = data.loc[((data['sse'] == 'sh') & ((data['code'] >= '600000') | (data['code'] < '700000'))) | \ # ((data['sse'] == 'sz') & ((data['code'] >= '000001') | (data['code'] < '100000'))) | \ # ((data['sse'] == 'sz') & ((data['code'] >= '300000') | (data['code'] < '309999')))] sh_start_num = data[(data['sse'] == 'sh') & (data['code'] == '600000')].index.tolist()[0] sh_end_num = data[(data['sse'] == 'sh') & (data['code'] == '706070')].index.tolist()[0] sz00_start_num = data[(data['sse'] == 'sz') & (data['code'] == '000001')].index.tolist()[0] sz00_end_num = data[(data['sse'] == 'sz') & (data['code'] == '100303')].index.tolist()[0] sz30_start_num = data[(data['sse'] == 'sz') & (data['code'] == '300001')].index.tolist()[0] sz30_end_num = data[(data['sse'] == 'sz') & (data['code'] == '395001')].index.tolist()[0] df_sh = data.iloc[sh_start_num:sh_end_num] df_sz00 = data.iloc[sz00_start_num:sz00_end_num] df_sz30 = data.iloc[sz30_start_num:sz30_end_num] df = pd.concat([df_sh, df_sz00, df_sz30]) df.reset_index(drop=True, inplace=True) return df
2953cbd800ad2e2b6bc6122ec225f34d165773ea
3,650,534
def getStopWords(stopWordFileName): """Reads stop-words text file which is assumed to have one word per line. Returns stopWordDict. """ stopWordDict = {} stopWordFile = open(stopWordFileName, 'r') for line in stopWordFile: word = line.strip().lower() stopWordDict[word] = None return stopWordDict
8bb85683f257c35de9d04e4993b42cd758a802e6
3,650,538
def metade(valor): """ -> Realiza o calculo de metade salárial :param valor: Valor do dinheiro :param view: Visualizar ou não retorno formatado :return: Retorna a metade do valor """ if not view: return moeda(valor / 2) else: return valor / 2 return valor / 2
fb1bbb605b8a0f1b8623ca70940377bd3c6a440a
3,650,539
def monopole(uvecs: [float, np.ndarray], order: int=3) -> [float, np.ndarray]: """ Solution for I(r) = 1. Also handles nonzero-w case. Parameters ---------- uvecs: float or ndarray of float The cartesian baselines in units of wavelengths. If a float, assumed to be the magnitude of the baseline. If an array of one dimension, each entry is assumed to be a magnitude. If a 2D array, may have shape (Nbls, 2) or (Nbls, 3). In the first case, w is assumed to be zero. order: int Expansion order to use for non-flat array case (w != 0). Returns ------- ndarray of complex Visibilities, shape (Nbls,) """ if np.isscalar(uvecs) or uvecs.ndim == 1 or uvecs.shape[1] == 2 or np.allclose(uvecs[:, 2], 0): # w is zero. uamps = vec_to_amp(uvecs) return 2 * np.pi * np.sinc(2 * uamps) uvecs = uvecs[..., None] ks = np.arange(order)[None, :] fac0 = (2 * np.pi * 1j * uvecs[:, 2, :])**ks / (gamma(ks + 2)) fac1 = hyp0f1((3 + ks) / 2, -np.pi**2 * (uvecs[:, 0, :]**2 + uvecs[:, 1, :]**2)) return 2 * np.pi * np.sum(fac0 * fac1, axis=-1)
6828b4014fc7970a4d85b6d04b6d3e16249d3dae
3,650,540
def get_question( numbers: OneOrManyOf(NUMBERS_AVAILABLE), cases: OneOrManyOf(CASES_AVAILABLE), num: hug.types.in_range(1, MAX_NUM + 1) = 10): """When queried for one or multiple numbers and cases, this endpoint returns a random question.""" questions = [] bag = NounCaseQuestionBag( noun_bag, adjective_bag, numbers, cases) while len(questions) < num: question = bag.get_question() questions.append( { 'question_elements': question.get_question_elements(), 'answer_elements': question.get_correct_answer_elements() }) return questions
b32d76f6ee7519935292743f6d7d8b8ad7357d3a
3,650,541
from textwrap import dedent def _print_attrs(attr, html=False): """ Given a Attr class will print out each registered attribute. Parameters ---------- attr : `sunpy.net.attr.Attr` The attr class/type to print for. html : bool Will return a html table instead. Returns ------- `str` String with the registered attributes. """ attrs = attr._attr_registry[attr] # Only sort the attrs if any have been registered sorted_attrs = _ATTR_TUPLE(*zip(*sorted(zip(*attrs)))) if attrs.name else make_tuple() *other_row_data, descs = sorted_attrs descs = [(dsc[:77] + '...') if len(dsc) > 80 else dsc for dsc in descs] table = Table(names=["Attribute Name", "Client", "Full Name", "Description"], dtype=["U80", "U80", "U80", "U80"], data=[*other_row_data, descs]) class_name = f"{(attr.__module__ + '.') or ''}{attr.__name__}" lines = [class_name] # If the attr lacks a __doc__ this will error and prevent this from returning anything. try: lines.append(dedent(attr.__doc__.partition("\n\n")[0]) + "\n") except AttributeError: pass format_line = "<p>{}</p>" if html else "{}" width = -1 if html else get_width() lines = [*[format_line.format(line) for line in lines], *table.pformat_all(show_dtype=False, max_width=width, align="<", html=html)] return '\n'.join(lines)
5044764b8799eed66d3e11fe9423922d79fd9981
3,650,542
import struct def create_wave_header(samplerate=44100, channels=2, bitspersample=16, duration=3600): """Generate a wave header from given params.""" # pylint: disable=no-member file = BytesIO() numsamples = samplerate * duration # Generate format chunk format_chunk_spec = b"<4sLHHLLHH" format_chunk = struct.pack( format_chunk_spec, b"fmt ", # Chunk id 16, # Size of this chunk (excluding chunk id and this field) 1, # Audio format, 1 for PCM channels, # Number of channels int(samplerate), # Samplerate, 44100, 48000, etc. int(samplerate * channels * (bitspersample / 8)), # Byterate int(channels * (bitspersample / 8)), # Blockalign bitspersample, # 16 bits for two byte samples, etc. ) # Generate data chunk data_chunk_spec = b"<4sL" datasize = int(numsamples * channels * (bitspersample / 8)) data_chunk = struct.pack( data_chunk_spec, b"data", # Chunk id int(datasize), # Chunk size (excluding chunk id and this field) ) sum_items = [ # "WAVE" string following size field 4, # "fmt " + chunk size field + chunk size struct.calcsize(format_chunk_spec), # Size of data chunk spec + data size struct.calcsize(data_chunk_spec) + datasize, ] # Generate main header all_chunks_size = int(sum(sum_items)) main_header_spec = b"<4sL4s" main_header = struct.pack(main_header_spec, b"RIFF", all_chunks_size, b"WAVE") # Write all the contents in file.write(main_header) file.write(format_chunk) file.write(data_chunk) # return file.getvalue(), all_chunks_size + 8 return file.getvalue()
b0b53b33733e5456e321cd7c276ad95754140f8a
3,650,543
import unicodedata import re def xslugify(value): """ Converts to ASCII. Converts spaces to hyphens. Removes characters that aren't alphanumerics, underscores, slash, or hyphens. Converts to lowercase. Also strips leading and trailing whitespace. (I.e., does the same as slugify, but also converts slashes to dashes.) """ value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') value = re.sub(r'[^\w\s/-]', '', value).strip().lower() return mark_safe(re.sub(r'[-\s/]+', '-', value))
7a8a3f00011a46465ccafdcaf1ac797577511b2b
3,650,544
def firstcond(list1, list2): """this is a fixture for testing conditions when the list is a four node list """ ll = LinkedList() ll.insert(1, 5) ll.insert(3, 9) ll.insert(2, 4) return ll
6e50038285e84e986304de5d2b28bef0db32b63d
3,650,545
def random_portfolio(n, k, mu=0., sd=0.01, corr=None, dt=1., nan_pct=0.): """ Generate asset prices assuming multivariate geometric Brownian motion. :param n: Number of time steps. :param k: Number of assets. :param mu: Drift parameter. Can be scalar or vector. Default is 0. :param sd: Volatility of single assets. Default is 0.01. :param corr: Correlation matrix of assets. Default is identity. :param dt: Time step. :param nan_pct: Add given percentage of NaN values. Useful for testing """ # default values corr = corr if corr is not None else np.eye(k) sd = sd * np.ones(k) mu = mu * np.ones(k) # drift nu = mu - sd**2 / 2. # do a Cholesky factorization on the correlation matrix R = np.linalg.cholesky(corr).T # generate uncorrelated random sequence x = np.matrix(np.random.normal(size=(n - 1,k))) # correlate the sequences ep = x * R # multivariate brownian W = nu * dt + ep * np.diag(sd) * np.sqrt(dt) # generate potential path S = np.vstack([np.ones((1, k)), np.cumprod(np.exp(W), 0)]) # add nan values if nan_pct > 0: r = S * 0 + np.random.random(S.shape) S[r < nan_pct] = np.nan return pd.DataFrame(S)
86801609a44619565188cd58b1d519c2e326086b
3,650,548
def superpose_images(obj, metadata, skip_overlaps=False, num_frames_for_bkgd=100, every=1, color_objs=False, disp_R=False, b=1.7, d=2, false_color=False, cmap='jet', remove_positive_noise=True): """ Superposes images of an object onto one frame. Parameters ---------- vid_path : string Path to video in which object was tracked. Source folder is `src/` obj : TrackedObject Object that has been tracked. Must have 'image', 'local centroid', 'frame_dim', 'bbox', and 'centroid' parameters. skip_overlaps : bool, optional If True, will skip superposing images that overlap with each other to produce a cleaner, though incomplete, image. Default False. every : int, optional Superposes every `every` image (so if every = 1, superposes every image; if every = 2, superposes every *other* image; if every = n, superposes every nth image). Default = 1 color_objs : bool, optional If True, will use image processing to highlight objects in each frame before superposing. Default False disp_R : bool, optional If True, will display the radius measured by image-processing in um above each object. b : float, optional Factor by which to scale brightness of superposed images to match background. Not sure why they don't automatically appear with the same brightness. Default is 1.7. d : int, optional Number of pixels to around the bounding box of the object to transfer to the superposed image. Helps ensure the outer edge of the image is bkgd. Default is 2. Returns ------- im : (M x N) numpy array of uint8 Image of object over time; each snapshot is superposed (likely black-and-white) """ ### initializes image as background ### # loads parameters highlight_kwargs = metadata['highlight_kwargs'] mask_data = highlight_kwargs['mask_data'] row_lo, _, row_hi, _ = mask.get_bbox(mask_data) # computes background bkgd = improc.compute_bkgd_med_thread(metadata['vid_path'], vid_is_grayscale=True, #assumes video is already grayscale num_frames=num_frames_for_bkgd, crop_y=row_lo, crop_height=row_hi-row_lo) # copies background to superpose object images on im = np.copy(bkgd) # converts image to 3-channel if highlighting objects (needs color) if color_objs: im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # initializes previous bounding box bbox_prev = (0,0,0,0) # loads video capture object cap = cv2.VideoCapture(metadata['vid_path']) # gets list of frames with object frame_list = obj.get_props('frame') ### Superposes image from each frame ### ct = 0 for i, f in enumerate(frame_list): # only superposes every "every"th image if (ct % every) != 0: ct += 1 continue # loads bounding box and image within it bbox = obj.get_prop('bbox', f) # skips images that overlap if requested if skip_overlaps: if basic.is_overlapping(bbox_prev, bbox): continue else: bbox_prev = bbox # highlights objects if requested if color_objs: # extracts radius of object R = obj.get_prop('radius [um]', f) # not sure why, but brightness must be 1.5 to match rest of image # selects offset that pushes label out of image offset = bbox[3]-bbox[1]+5 # reads frame and converts to color frame = basic.read_frame(cap, f) # highlights the object in the image im_obj = highlight.highlight_image(frame, f, cfg.highlight_method, metadata, {R : obj}, [R], brightness=b, offset=offset) # shows number ID of object in image centroid = obj.get_prop('centroid', f) # converts centroid from (row, col) to (x, y) for open-cv x = int(centroid[1]) y = int(centroid[0]) # superposes object image on overall image (3-channel images) row_min, col_min, row_max, col_max = bbox d = 2 im[row_min-d:row_max+d, col_min-d:col_max+d, :] = im_obj[row_min-d:row_max+d, col_min-d:col_max+d, :] if disp_R: # prints label on image (radius [um]) im = cv2.putText(img=im, text='{0:.1f}'.format(R), org=(x-10, y-7), fontFace=0, fontScale=0.5, color=cfg.white, thickness=2) else: # loads image im_raw = basic.read_frame(cap, f) im_obj = cv2.cvtColor(basic.adjust_brightness(im_raw, b), cv2.COLOR_BGR2GRAY)[row_lo:row_hi, :] # superposes object image on overall image row_min, col_min, row_max, col_max = bbox im[row_min:row_max, col_min:col_max] = im_obj[row_min:row_max, col_min:col_max] # increments counter ct += 1 # false-colors objects by taking signed difference with background if false_color: signed_diff = im.astype(int) - bkgd.astype(int) # remove noise above 0 (*assumes object is darker than background) if remove_positive_noise: signed_diff[signed_diff > 0] = 0 # defines false-color mapping to range to max difference max_diff = max(np.max(np.abs(signed_diff)), 1) # ensures >= 1 # normalizes image so -max_diff -> 0 and +max_diff -> 1 im_norm = (signed_diff + max_diff) / (2*max_diff) # maps normalized image to color image (still as floats from 0 to 1) color_mapped = cm.get_cmap(cmap)(im_norm) # converts to OpenCV format (uint8 0 to 255) im_false_color = basic.cvify(color_mapped) # converts from RGBA to RGB im = cv2.cvtColor(im_false_color, cv2.COLOR_RGBA2RGB) return im
10767c9d10e5d32af51a31f1f8eb85d8989bb5d4
3,650,550
def gen_s_linear(computed_data, param ): """Generate sensitivity matrix for wavelength dependent sensitivity modeled as line""" mat=np.zeros((computed_data.shape[0],computed_data.shape[0])) #print(mat.shape) for i in range(computed_data.shape[0]): for j in range(computed_data.shape[0]): v1 = computed_data[i, 0] - scenter # col 0 has position v2 = computed_data[j, 0] - scenter # col 0 has position #print(v1, v2) c1 = param[0] mat [i,j]=(1+ (c1/scale1)*v1 )/ \ (1+ (c1/scale1)*v2) return mat
c18c31e65804d65ac7419a2037f889a0f9de2f96
3,650,551
import numpy def upsample2(x): """ Up-sample a 2D array by a factor of 2 by interpolation. Result is scaled by a factor of 4. """ n = [x.shape[0] * 2 - 1, x.shape[1] * 2 - 1] + list(x.shape[2:]) y = numpy.empty(n, x.dtype) y[0::2, 0::2] = 4 * x y[0::2, 1::2] = 2 * (x[:, :-1] + x[:, 1:]) y[1::2, 0::2] = 2 * (x[:-1, :] + x[1:, :]) y[1::2, 1::2] = x[:-1, :-1] + x[1:, 1:] + x[:-1, 1:] + x[1:, :-1] return y
4eb23d668154ac12755c0e65eeff485ac5e5dd23
3,650,552
import six def mark_safe(s): """ Explicitly mark a string as safe for (HTML) output purposes. The returned object can be used everywhere a string or unicode object is appropriate. Can be called multiple times on a single string. """ if isinstance(s, SafeData): return s if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes): return SafeBytes(s) if isinstance(s, (six.text_type, Promise)): return SafeText(s) return SafeString(str(s))
dab8c0dfb78fd22fb35b5abc3680f74de8a1089a
3,650,553
def index(): """ Root URL response """ return jsonify(name='Payment Demo REST API Service', version='1.0'), status.HTTP_200_OK
2d370a9fdf1878f60af6de264d99193d06ff96d2
3,650,555
def unvoigt(A): """ Converts from 6x1 to 3x3 :param A: 6x1 Voigt vector (strain or stress) :return: 3x3 symmetric tensor (strain or stress) """ a=np.zeros(shape=(3,3)) a[0,0]=A[0] a[0,1]=A[5] a[0,2]=A[4] a[1,0]=A[5] a[1,1]=A[1] a[1,2]=A[3] a[2,0]=A[4] a[2,1]=A[3] a[2,2]=A[2] return (a)
72b28fceedb5ae2d34c768d5c29b5924310ff2b3
3,650,556
def _calculate_rmsd(P, Q): """Calculates the root-mean-square distance between the points of P and Q. The distance is taken as the minimum over all possible matchings. It is zero if P and Q are identical and non-zero if not. """ distance_matrix = cdist(P, Q, metric='sqeuclidean') matching = linear_sum_assignment(distance_matrix) return np.sqrt(distance_matrix[matching].sum())
22261e75edf3edf378fa30daa5c33abc68ff93cd
3,650,558
def initialize_parameters(): """ Initializes weight parameters to build a neural network with tensorflow. The shapes are: W1 : [4, 4, 3, 8] W2 : [2, 2, 8, 16] Note that we will hard code the shape values in the function to make the grading simpler. Normally, functions should take values as inputs rather than hard coding. Returns: parameters -- a dictionary of tensors containing W1, W2 """ tf.set_random_seed(1) # so that your "random" numbers match ours ### START CODE HERE ### (approx. 2 lines of code) W1 = tf.get_variable("W1",[4,4,3,8],initializer=tf.contrib.layers.xavier_initializer(seed = 0)) W2 = tf.get_variable("W2",[2,2,8,16],initializer=tf.contrib.layers.xavier_initializer(seed = 0)) ### END CODE HERE ### parameters = {"W1": W1, "W2": W2} return parameters
43481172a70ea88bcf5cfbc95792365c5af2ea52
3,650,559
import time import random import string import hashlib def generate_dynamic_secret(salt: str) -> str: """Creates a new overseas dynamic secret :param salt: A ds salt """ t = int(time.time()) r = "".join(random.choices(string.ascii_letters, k=6)) h = hashlib.md5(f"salt={salt}&t={t}&r={r}".encode()).hexdigest() return f"{t},{r},{h}"
2a9bdf00daea91f13f34724d1c744c17e9b4d6cf
3,650,560
def is_sim_f(ts_kname): """ Returns True if the TSDist is actually a similarity and not a distance """ return ts_kname in ('linear_allpairs', 'linear_crosscor', 'cross_correlation', 'hsdotprod_autocor_truncated', 'hsdotprod_autocor_cyclic')
11c18983d8d411714ba3147d4734ad77c40ceedf
3,650,562
import pika from typing import Union def initialise_pika_connection( host: Text, username: Text, password: Text, port: Union[Text, int] = 5672, connection_attempts: int = 20, retry_delay_in_seconds: float = 5, ) -> "BlockingConnection": """Create a Pika `BlockingConnection`. Args: host: Pika host username: username for authentication with Pika host password: password for authentication with Pika host port: port of the Pika host connection_attempts: number of channel attempts before giving up retry_delay_in_seconds: delay in seconds between channel attempts Returns: Pika `BlockingConnection` with provided parameters """ parameters = _get_pika_parameters( host, username, password, port, connection_attempts, retry_delay_in_seconds ) return pika.BlockingConnection(parameters)
7364547a4836aea0b277098bed75b8c5ec874522
3,650,563
def units(arg_name, unit): """Decorator to define units for an input. Associates a unit of measurement with an input. Parameters ---------- arg_name : str Name of the input to attach a unit to. unit : str Unit of measurement descriptor to use (e.g. "mm"). Example -------- Create an operation where its `x` parameter has its units defined in microns. >>> @OperationPlugin >>> @units('x', '\u03BC'+'m') >>> def op(x: float = -1) -> float: >>> return x *= -1.0 """ def decorator(func): _quick_set(func, 'units', arg_name, unit, {}) return func return decorator
45bd1695cada5612e2ce9e39632ed1357556535f
3,650,564
from typing import Callable async def to_thread_task(func: Callable, *args, **kwargs) -> Task: """Assign task to thread""" coro = to_thread(func, *args, **kwargs) return create_task(coro)
ad666a91588a670be7babf84294f338f0148b8e1
3,650,565
from typing import Dict from typing import Any from typing import Optional from typing import Union from pathlib import Path def combo2fname( combo: Dict[str, Any], folder: Optional[Union[str, Path]] = None, ext: Optional[str] = ".pickle", sig_figs: int = 8, ) -> str: """Converts a dict into a human readable filename. Improved version of `combo_to_fname`.""" name_parts = [f"{k}_{maybe_round(v, sig_figs)}" for k, v in sorted(combo.items())] fname = Path("__".join(name_parts) + ext) if folder is None: return fname return str(folder / fname)
9951171647167e39753546645f8e1f185d9fa55a
3,650,567
def cls_from_str(name_str): """ Gets class of unit type from a string Helper function for end-users entering the name of a unit type and retrieving the class that contains stats for that unit type. Args: name_str: str Returns: UnitStats """ name_str = name_str.lower() for cls in _UNIT_TYPES.values(): if cls.name.lower() == name_str: return cls
4dc26f8586065319a25f8965a5267308bd8dbfea
3,650,568
def convert_to_github_url_with_token(url, token): """ Convert a Github URL to a git+https url that identifies via an Oauth token. This allows for installation of private packages. :param url: The url to convert into a Github access token oauth url. :param token: The Github access token to use for the oauth url. :return: A git+https url with Oauth identification. """ for prefix in [GIT_SSH_PREFIX, GIT_GIT_PREFIX, GIT_HTTPS_PREFIX]: if url.startswith(prefix): return 'git+https://{}:[email protected]/{}'.format(token, url[len(prefix):]) return url
9b9c5e17cb389eb938af1221518a6838e65712bc
3,650,569
from datetime import datetime import numpy def get_numbers_of_papers(metrics): """ Convert the metrics into a format that is easier to work with. Year-ordered numpy arrays. """ publications = metrics['histograms']['publications'] year, total, year_refereed, refereed = [], [], [], [] y = list(publications['all publications'].keys()) y.sort() for i in range(len(y)): k = y[i] year.append(datetime.strptime(k, '%Y')) total.append(publications['all publications'][k]) refereed.append(publications['refereed publications'][k]) year, total, refereed = \ numpy.array(year), numpy.array(total), numpy.array(refereed) return year, total, refereed
ce8b079ea416ff01b4974ea7ae7aa82080321cbb
3,650,570
import json from typing import Dict def test_base_provider_get_transform_json_exception(mock_name, mock_value): """ Test BaseProvider.get() with a json transform that raises an exception """ mock_data = json.dumps({mock_name: mock_value}) + "{" class TestProvider(BaseProvider): def _get(self, name: str, **kwargs) -> str: assert name == mock_name return mock_data def _get_multiple(self, path: str, **kwargs) -> Dict[str, str]: raise NotImplementedError() provider = TestProvider() with pytest.raises(parameters.TransformParameterError) as excinfo: provider.get(mock_name, transform="json") assert "Extra data" in str(excinfo)
abb81b142a34f264466b808867bc3a7cc4460fcf
3,650,571
def load_check_definitions(lang): """ Retrieve Trust Advisor check definitions """ retval = {} resp = TA_C.describe_trusted_advisor_checks(language=lang) if resp: try: checks = resp['checks'] retval = {a['id']:a for a in checks} except ValueError: LOGGER.error('Received invalid check definitions: %s', str(resp)) else: LOGGER.error('No response from check definitions') return retval
43bca091506d33270a7e0fa3ec6ca84e4c342bf6
3,650,572
def filter_phrase(comments, phrase): """Returns list of comments and replies filtered by substring.""" results = [] for comment in comments: if phrase.lower() in comment.message.lower(): results.append(comment) for reply in comment.replies: if phrase.lower() in reply.message.lower(): results.append(reply) if not results: return None return results
0865163f117550e36b2c21608739649b7b99f825
3,650,573