content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def decoMakerApiCallChangePosToOptArg(argPos, argName): """ creates a decorator, which change the positional argument ARGPOS into an optional argument ARGNAME. argPos=1 is the first positional arg """ # for understanding, what we are doing here please read # # see http://stackoverflow.com/questions/739654/how-can-i-make-a-chain-of-function-decorators-in-python # - Passing arguments to the decorator def decoApiCallChangePosToOptArg(methodAPI): """ Decorator to change a positional argument into an optional ARGNAME """ @wraps(methodAPI) def wrapperChangePosToOptArg(self, *argsPositional, **argsOptional): argsPositionalChanged = list(argsPositional) if (argPos > 0) and (len(argsPositional) >= argPos): argValue = argsPositionalChanged.pop(argPos - 1) argsOptional[argName] = argValue return methodAPI(self, *argsPositionalChanged, **argsOptional) return wrapperChangePosToOptArg return decoApiCallChangePosToOptArg
5f189c70efc45b9fa4b06f7eb650ad4b245cbf51
19,392
def wrangle_address_dist_matrix(ba_rel, ba_asso): """ Get all buffer level derived variables based on "AH, Relatives and Associates" file Note: we have some participants that list relatives but no associates (or vice versa), so we have to fill NA values Keyword Arguments: - ba_ba_comparison: - ba_rel_comparison: - ba_asso_comparison: Returns: """ # lex_bestAddressSame_*_rel_c ba_rel['zero'] = 0 g = ba_rel.groupby(['ssn_altkey', 'timeseries1']) g2 = ba_rel[ba_rel['distance_spheroid_m'] == 0].groupby(['ssn_altkey', 'timeseries1']) concordant_rel = g2.size().combine_first(g.first().zero) concordant_rel.index.names = ['ssn_altkey', 'best_address_num'] concordant_rel.name = 'adr_lex_bestAddressSameRel_c' # lex_bestAddressSame_*_asso_c ba_asso['zero'] = 0 g = ba_asso.groupby(['ssn_altkey', 'timeseries1']) g2 = ba_asso[ba_asso['distance_spheroid_m'] == 0].groupby(['ssn_altkey', 'timeseries1']) concordant_asso = g2.size().combine_first(g.first().zero) concordant_asso.index.names = ['ssn_altkey', 'best_address_num'] concordant_asso.name = 'adr_lex_bestAddressSameAsso_c_2014' return pd.concat([concordant_rel, concordant_asso], axis=1).fillna(0)
5e586b5c8225f130868f1bce2586043f07c0c660
19,393
import math def circlePoints(x, r, cx, cy): """Ther dunction returns the y coordinate of a circonference's point :x: x's coordinate value. :r: length of the radius. :cx: x coordinate of the center. :cy: y coordinate of the center.""" return math.sqrt(math.pow(r,2) - math.pow(x-cx, 2)) + cy
c2cc14a845dccbcf62a38be3af69808024289adc
19,394
import re def get_thread_info(): """ Returns a pair of: - map of LWP -> thread ID - map of blocked threads LWP -> potential mutex type """ # LWP -> thread ID lwp_to_thread_id = {} # LWP -> potential mutex type it is blocked on blocked_threads = {} output = gdb.execute("info threads", from_tty=False, to_string=True) lines = output.strip().split("\n")[1:] regex = re.compile(r"[\s\*]*(\d+).*Thread.*\(LWP (\d+)\).*") for line in lines: try: thread_id = int(regex.match(line).group(1)) thread_lwp = int(regex.match(line).group(2)) lwp_to_thread_id[thread_lwp] = thread_id mutex_type = MutexType.get_mutex_type(thread_id, line) if mutex_type: blocked_threads[thread_lwp] = mutex_type except Exception: continue return (lwp_to_thread_id, blocked_threads)
36eecd0fc0fb7fc062035e087ef503d007c2c8cc
19,395
def _maybe_convert_labels(y_true): """Converts binary labels into -1/1.""" are_zeros = math_ops.equal(y_true, 0) are_ones = math_ops.equal(y_true, 1) is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones)) def _convert_binary_labels(): # Convert the binary labels to -1 or 1. return 2. * y_true - 1. updated_y_true = smart_cond.smart_cond(is_binary, _convert_binary_labels, lambda: y_true) return updated_y_true
bfd37162587fe3cf7cd0fd311304e70042ebd626
19,396
import json def create_plot(feature="bar"): """provided random generated plots""" if feature == "bar": N = 40 x = np.linspace(0, 1, N) y = np.random.randn(N) df = pd.DataFrame({'x': x, 'y': y}) # creating a sample dataframe data = [ go.Bar( x=df['x'], # assign x as the dataframe column 'x' y=df['y'] ) ] else: N = 1000 random_x = np.random.randn(N) random_y = np.random.randn(N) # Create a trace data = [go.Scatter( x=random_x, y=random_y, mode='markers' )] graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder) return graphJSON
0d2d41e8b2a07f6a4aa3d1ade2311cea02c5a63b
19,397
import torch def get_gram_matrix(tensor): """ Returns a Gram matrix of dimension (distinct_filer_count, distinct_filter_count) where G[i,j] is the inner product between the vectorised feature map i and j in layer l """ G = torch.mm(tensor, tensor.t()) return G
ad86f06768c07d6fe1ff509d996991f786ea1ffa
19,398
def calc_stat_moments(ds, dim_aggregator='time', time_constraint=None): """Calculates the first two statistical moments and the coefficient of variation in the specified dimension. Parameters: ----------- ds : xr.Dataset dim_aggregator : str coordinate to calculate the statistical moments over time_constraint : str longitude Returns ------- xr.DataArray covariance array """ if dim_aggregator == 'spatial': dim_aggregator = ['latitude', 'longitude'] else: dim_aggregator = 'time' if time_constraint == 'seasonally': mu = ds.groupby('time.season').mean(dim=dim_aggregator) sig = ds.groupby('time.season').std(dim=dim_aggregator) elif time_constraint == 'monthly': mu = ds.groupby('time.month').mean(dim=dim_aggregator) sig = ds.groupby('time.month').std(dim=dim_aggregator) else: mu = ds.mean(dim=dim_aggregator) sig = ds.std(dim=dim_aggregator) vc = sig**2/mu ds_new = xr.concat([mu, sig, vc], dim='stat_moments') ds_new.coords['stat_moments'] = ['mean', 'std', 'vc'] return ds_new
67a18691b183803ab9f9b20f02e2e36397648f6f
19,399
import uuid from datetime import datetime def submit_ride_request(): """ submit a ride request with the following required fields: - netId (string) - netId of requester - date (date object) - date of travel - time (time object) - time of travel - origin (string) - chosen from dropdown of origins - destination (string) - chosen from dropdown of destinations - preferred_car_type (string) - "regular" or "XL" - preferred_group_size (int) - number of desired riders in group """ if REQUIRE_KEY: try: key = request.headers["api-key"] except: return jsonify({"error": "No API key provided - permission denied"}), 403 if not is_key_valid(key): return jsonify({"error": "Invalid API key - permission denied"}), 403 netid = request.json.get("netId") if not netid: return jsonify({"error": "Please provide netId to request a ride"}), 400 date = request.json.get("date") if not date: return jsonify({"error": "Please provide date to request a ride"}), 400 time = request.json.get("time") if not time: return jsonify({"error": "Please provide time to request a ride"}), 400 origin = request.json.get("origin") if not origin: return jsonify({"error": "Please provide origin to request a ride"}), 400 destination = request.json.get("destination") if not destination: return jsonify({"error": "Please provide destination to request a ride"}), 400 preferred_car_type = request.json.get("preferred_car_type") if not preferred_car_type: return ( jsonify( {"error": "Please provide preferred_car_type to request a ride"}), 400, ) preferred_group_size = request.json.get("preferred_group_size") if not preferred_group_size: return ( jsonify( {"error": "Please provide preferred_group_size to request a ride"}), 400, ) uid = str(uuid.uuid4()) # generate unique identifier for request tz = timezone("EST") curr_time = datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") resp = client.put_item( TableName=REQUESTS_TABLE, Item={ "requestId": {"S": uid}, "netId": {"S": netid}, "request_time": {"S": curr_time}, "date": {"S": date}, "time": {"S": time}, "origin": {"S": origin}, "destination": {"S": destination}, "preferred_car_type": {"S": preferred_car_type}, "preferred_group_size": {"N": preferred_group_size}, "matched": {"BOOL": False}, "groupId": {"S": ""}, "confirmed": {"BOOL": False}, "allConfirmed": {"BOOL": False}, "rematch": {"BOOL": False}, }, ) return jsonify( { "requestId": uid, "netId": netid, "reesponse_time": curr_time, "date": date, "time": time, "origin": origin, "destination": destination, "preferred_car_type": preferred_car_type, "preferred_group_size": preferred_group_size, "matched": False, "groupId": "", "confirmed": False, "allConfirmed": False, } )
5f7d1d2d98adb3aa336b48032331b5b5fe2114f2
19,400
def _heatmap(data, row_ticks=None, col_ticks=None, row_labels=None, col_labels=None, ax=None, cbar_kw={}, cbarlabel="", **kwargs): """Create a heatmap from a numpy array and two lists of labels. (Code from `matplotlib documentation <https://matplotlib.org/stable/gallery/images_contours_and_fields/image_annotated_heatmap.html>`_) Args: data (np.ndarray): A 2D numpy array of shape ``[H, W]``. row_ticks (list[float]): List of row (y-axis) tick locations. col_ticks (list[float]): List of column (x-axis) tick locations. row_labels (list[str]): A list labels for the rows. Its length should be equal to that of ``row_ticks`` if ``row_ticks`` is not None. Otherwise, it should have a length of ``H``. col_labels (list[str]): A list of labels for the columns. Its length should be equal to that of ``col_ticks`` if ``col_ticks`` is not None. Otherwise, it should have a length of ``W``. ax (matplotlib.axes.Axes): instance to which the heatmap is plotted. If None, use current axes or create a new one. cbar_kw (dict): A dictionary with arguments to ``matplotlib.Figure.colorbar``. cbarlabel (str): The label for the colorbar. **kwargs: All other arguments that are forwarded to ``ax.imshow``. Returns: tuple: - matplotlib.image.AxesImage: the heatmap image - matplotlib.pyplot.colorbar: the colorbar of the heatmap """ if not ax: ax = plt.gca() # Plot the heatmap im = ax.imshow(data, **kwargs) # Create colorbar cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw) cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom") if col_ticks is None: # show all the ticks by default col_ticks = np.arange(data.shape[1] + 1) - .5 ax.set_xticks(col_ticks, minor=True) if row_ticks is None: # show all the ticks by default row_ticks = np.arange(data.shape[0] + 1) - .5 ax.set_yticks(row_ticks, minor=True) # ... and label them with the respective list entries. if col_labels is not None: assert len(col_ticks) == len(col_labels), ( "'col_ticks' should have the " "same length as 'col_labels'") ax.set_xticklabels(col_labels) if row_labels is not None: assert len(row_ticks) == len(row_labels), ( "'row_ticks' should have the " "same length as 'row_labels'") ax.set_yticklabels(row_labels) # Let the horizontal axes labeling appear on top. ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False) # Rotate the tick labels and set their alignment. plt.setp( ax.get_xticklabels(), rotation=-30, ha="right", rotation_mode="anchor") # Turn spines off and create white grid. ax.spines[:].set_visible(False) ax.grid(which="minor", color="w", linestyle='-', linewidth=3) ax.tick_params(which="minor", bottom=False, left=False) return im, cbar
a3b2117c5d42a0882083673e93505cb6ab2a8225
19,401
def html_colour_to_rgba(html_colour: str) -> (): """Convers HTML colout to its RGB values""" html_colour = html_colour.strip() if html_colour[0] == '#': html_colour = html_colour[1:] return tuple([int(x, 16) for x in (html_colour[:2], html_colour[2:4], html_colour[4:], '0')])
461aaf837a4f99acbb167e98be3642799f425671
19,402
def index(request): """Displays form.""" data = {"menu": "index", "max_characters": settings.PASTE["max_characters"]} if request.method == "POST": paste = Paste(slug=random_id(Paste)) if request.FILES: for language_name, any_file in request.FILES.items(): break language = Language.by_name(language_name) form = PasteForm( { "language": language.id, "title": any_file.name, "private": settings.PASTE["private_by_default"], "lifetime": settings.PASTE["default_lifetime"], "content": any_file.read().decode(), }, instance=paste, ) else: form = PasteForm(request.POST, instance=paste) if not form.is_valid(): data["form"] = form return render(request, "paste/index.html", data) form.save() # Some logic added to overrided method, see forms.py location = request.build_absolute_uri( reverse("short_paste", kwargs={"slug": paste.slug}) ) return HttpResponseRedirect( location, content=location + "\n", content_type="text/plain" ) data["form"] = PasteForm( initial={ "private": settings.PASTE["private_by_default"], "lifetime": settings.PASTE["default_lifetime"], "language": Language.by_name(settings.PASTE["default_language"]).id, } ) data["absolute_index_url"] = request.build_absolute_uri(reverse("index")) return render(request, "paste/index.html", data)
dece4cc7e0d4d0138be1bb0b333189989cd1244c
19,403
def trafficking_service(): """Connect to Google's DFA Reporting service with oauth2 using the discovery service.""" return google_service(DDM_TRAFFICKING_SCOPE)
be94a35558178be9ad697edc89d6ca55ca3ca08b
19,404
def deduplicate(s, ch): """ From http://stackoverflow.com/q/42216559/610569 s = 'this is an irritating string with random spacing .' deduplicate(s) 'this is an irritating string with random spacing .' """ return ch.join([substring for substring in s.strip().split(ch) if substring])
5b2bb10376143a1597ddfab1711716c802cdf113
19,405
def optimize(mod): """Optimize all the functions in a module. Modules are the only mutable piece of Relay. We write an optimization pass over the module which destructively updates each function while optimizing. """ return pass_set(mod)
8065bf95d802c95fbad985dbaa1a0835ac27d20f
19,406
def sum_series(n): """Calculate sum of n+(n-2)+(n-4)...""" return n if n < 2 else n + sum_series(n - 2)
317317fc6a7f14a9cbd564266b73ac087b2bdbd2
19,407
def get_preconditioner(): """Compute the preconditioner M""" diags_x = zeros((3, nx)) diags_x[0,:] = 1/hx/hx diags_x[1,:] = -2/hx/hx diags_x[2,:] = 1/hx/hx Lx = spdiags(diags_x, [-1,0,1], nx, nx) diags_y = zeros((3, ny)) diags_y[0,:] = 1/hy/hy diags_y[1,:] = -2/hy/hy diags_y[2,:] = 1/hy/hy Ly = spdiags(diags_y, [-1,0,1], ny, ny) J1 = kron(Lx, eye(ny)) + kron(eye(nx), Ly) # Now we have the matrix `J_1`. We need to find its inverse `M` -- # however, since an approximate inverse is enough, we can use # the *incomplete LU* decomposition J1_ilu = spilu(J1) # This returns an object with a method .solve() that evaluates # the corresponding matrix-vector product. We need to wrap it into # a LinearOperator before it can be passed to the Krylov methods: M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve) return M
9992f894b0bc1141a461ddd52f1b38671cd42986
19,408
def compute_noise_ceiling(y, scoring=roc_auc_score, K=None, soft=True, return_pred=False, doubles_only=False): """ Computes the noise ceiling for data with repetitions. Parameters ---------- y : pd.Series Series with numeric values and index corresponding to stim ID. K : int Number of classes (if categorical; does not work for regression yet) soft : bool Whether to use a probabilistic estimate (True) or "hard" label (False) Returns ------- ceiling : ndarray Numpy ndarray (shape: K,) with ceiling estimates """ labels = convert_doubles_to_single_labels(y, K, soft, keepdims=True, doubles_only=doubles_only) labels = labels.sort_index() y_flat = y.loc[labels.index.unique()].sort_index() # Needed to convert 1D to 2D ohe = OneHotEncoder(categories='auto', sparse=False) y_ohe = ohe.fit_transform(y_flat.values[:, np.newaxis]) # Compute best possible score ("ceiling") ceiling = scoring(y_ohe, labels.values, average=None) if return_pred: return ceiling, labels else: return ceiling
e10c02bca26342eff97b1e5e29c96ed2429c429e
19,409
def get_all_contained_items(item, stoptest=None): """ Recursively retrieve all items contained in another item :param text_game_maker.game_objects.items.Item item: item to retrieve items\ from :param stoptest: callback to call on each sub-item to test whether\ recursion should continue. If stoptest() == True, recursion will\ continue :return: list of retrieved items :rtype: [text_game_maker.game_objects.items.Item] """ ret = [] if not item.is_container: return ret stack = [item] while stack: subitem = stack.pop(0) for i in subitem.items: ret.append(i) if i.is_container: if stoptest and stoptest(i): continue stack.append(i) return ret
d04e5c297dddb70db83637e748281f04b08b6a25
19,411
def convert(string): """ the convert() function takes simple-formatted string and returns a 'lingtree description string' suitable for pasting into the SIL LingTree program directly the string should contain multiple lines, one line per 'node relationship'. E.G. : S = NP VP NP = \L Juan Juan = \G John VP = V V = \L duerme duerme = \G sleeps The left and right side are separated by an equals sign ( = ). ( => ) and ( -> ) also work fine. The right side may contain special backslash codes, but the left side should not contain any special codes. """ ref = {} top = 0 for line in string.split('\n'): line.strip() if line == '': continue if line == '\r': continue if line.startswith('#'): continue # ignore comment lines # normalize 'equals' syntax line = line.replace('->','=') line = line.replace('=>','=') try: leftside,rightside = line.split('=') leftside = leftside.strip() rightside = rightside.strip() if top == 0: # remember the top node ref[leftside] = Node(leftside) top = ref[leftside] # the leftside must always already exist in the ref if not leftside in ref: raise NameError # right side contains a special code # support multiple codes on right side, like '\L lexical \G gloss' if rightside.find('\\') != -1: c_last = '' for c in rightside.split('\\'): if c == '': continue code = c[0] c = c[1:].strip() ref[c] = Node(c) ref[c].setCode(code) if c_last == '': ref[leftside].addChild(ref[c]) else: ref[c_last].addChild(ref[c]) c_last = c # normal right side else: for r in rightside.split(): r = r.strip() if r == '': continue ref[r] = Node(r) ref[leftside].addChild(ref[r]) except NameError: return "Line may not be in top-down order: %s" % line except: return "Error occurred processing line: %s" % line # tell the top node to print itself return top.tell()
0afd987c9da9579b2a80e7d6a3428533df75ef3e
19,414
def Hij_to_cijkl(H): """Convert a Hooke's matrix to the corresponding rigidity tensor Parameters ---------- H: 6x6 iterable or dict The Hooke's matrix to be converted. If not already a np.ndarray, an iterable must be castable to one. Returns ------- c: 3x3x3x3 np.ndarray The corresponding 4th order rigidity tensor """ if type(H) == dict: c = np.empty((3,3,3,3), dtype=type(H[1][1])) c[0,0,0,0] = H[1][1] c[0,0,1,1] = H[1][2] c[0,0,2,2] = H[1][3] c[0,0,1,2] = H[1][4]; c[0,0,2,1] = H[1][4] c[0,0,0,2] = H[1][5]; c[0,0,2,0] = H[1][5] c[0,0,0,1] = H[1][6]; c[0,0,1,0] = H[1][6] c[1,1,0,0] = H[1][2] c[1,1,1,1] = H[2][2] c[1,1,2,2] = H[2][3] c[1,1,1,2] = H[2][4]; c[1,1,2,1] = H[2][4] c[1,1,0,2] = H[2][5]; c[1,1,2,0] = H[2][5] c[1,1,0,1] = H[2][6]; c[1,1,1,0] = H[2][6] c[2,2,0,0] = H[1][3] c[2,2,1,1] = H[2][3] c[2,2,2,2] = H[3][3] c[2,2,1,2] = H[3][4]; c[2,2,2,1] = H[3][4] c[2,2,0,2] = H[3][5]; c[2,2,2,0] = H[3][5] c[2,2,0,1] = H[3][6]; c[2,2,1,0] = H[3][6] c[2,1,0,0] = H[1][4] c[2,1,1,1] = H[2][4] c[2,1,2,2] = H[3][4] c[2,1,1,2] = H[4][4]; c[2,1,2,1] = H[4][4] c[2,1,0,2] = H[4][5]; c[2,1,2,0] = H[4][5] c[2,1,0,1] = H[4][6]; c[2,1,1,0] = H[4][6] c[1,2,0,0] = H[1][4] c[1,2,1,1] = H[2][4] c[1,2,2,2] = H[3][4] c[1,2,1,2] = H[4][4]; c[1,2,2,1] = H[4][4] c[1,2,0,2] = H[4][5]; c[1,2,2,0] = H[4][5] c[1,2,0,1] = H[4][6]; c[1,2,1,0] = H[4][6] c[2,0,0,0] = H[1][5] c[2,0,1,1] = H[2][5] c[2,0,2,2] = H[3][5] c[2,0,1,2] = H[4][5]; c[2,0,2,1] = H[4][5] c[2,0,0,2] = H[5][5]; c[2,0,2,0] = H[5][5] c[2,0,0,1] = H[5][6]; c[2,0,1,0] = H[5][6] c[0,2,0,0] = H[1][5] c[0,2,1,1] = H[2][5] c[0,2,2,2] = H[3][5] c[0,2,1,2] = H[4][5]; c[0,2,2,1] = H[4][5] c[0,2,0,2] = H[5][5]; c[0,2,2,0] = H[5][5] c[0,2,0,1] = H[5][6]; c[0,2,1,0] = H[5][6] c[1,0,0,0] = H[1][6] c[1,0,1,1] = H[2][6] c[1,0,2,2] = H[3][6] c[1,0,1,2] = H[4][6]; c[1,0,2,1] = H[4][6] c[1,0,0,2] = H[5][6]; c[1,0,2,0] = H[5][6] c[1,0,0,1] = H[6][6]; c[1,0,1,0] = H[6][6] c[0,1,0,0] = H[1][6] c[0,1,1,1] = H[2][6] c[0,1,2,2] = H[3][6] c[0,1,1,2] = H[4][6]; c[0,1,2,1] = H[4][6] c[0,1,0,2] = H[5][6]; c[0,1,2,0] = H[5][6] c[0,1,0,1] = H[6][6]; c[0,1,1,0] = H[6][6] else: if not type(H) == np.ndarray: H = np.array(H) if H.shape[0] != 6 or H.shape[1] != 6: raise ValueError('H must be a 6x6 iterable and castable to np.ndarray') c = np.empty((3,3,3,3), dtype=H.dtype) c[0,0,0,0] = H[0,0] c[0,0,1,1] = H[0,1] c[0,0,2,2] = H[0,2] c[0,0,1,2] = H[0,3]; c[0,0,2,1] = H[0,3] c[0,0,0,2] = H[0,4]; c[0,0,2,0] = H[0,4] c[0,0,0,1] = H[0,5]; c[0,0,1,0] = H[0,5] c[1,1,0,0] = H[1,0] c[1,1,1,1] = H[1,1] c[1,1,2,2] = H[1,2] c[1,1,1,2] = H[1,3]; c[1,1,2,1] = H[1,3] c[1,1,0,2] = H[1,4]; c[1,1,2,0] = H[1,4] c[1,1,0,1] = H[1,5]; c[1,1,1,0] = H[1,5] c[2,2,0,0] = H[2,0] c[2,2,1,1] = H[2,1] c[2,2,2,2] = H[2,2] c[2,2,1,2] = H[2,3]; c[2,2,2,1] = H[2,3] c[2,2,0,2] = H[2,4]; c[2,2,2,0] = H[2,4] c[2,2,0,1] = H[2,5]; c[2,2,1,0] = H[2,5] c[2,1,0,0] = H[3,0] c[2,1,1,1] = H[3,1] c[2,1,2,2] = H[3,2] c[2,1,1,2] = H[3,3]; c[2,1,2,1] = H[3,3] c[2,1,0,2] = H[3,4]; c[2,1,2,0] = H[3,4] c[2,1,0,1] = H[3,5]; c[2,1,1,0] = H[3,5] c[1,2,0,0] = H[3,0] c[1,2,1,1] = H[3,1] c[1,2,2,2] = H[3,2] c[1,2,1,2] = H[3,3]; c[1,2,2,1] = H[3,3] c[1,2,0,2] = H[3,4]; c[1,2,2,0] = H[3,4] c[1,2,0,1] = H[3,5]; c[1,2,1,0] = H[3,5] c[2,0,0,0] = H[4,0] c[2,0,1,1] = H[4,1] c[2,0,2,2] = H[4,2] c[2,0,1,2] = H[4,3]; c[2,0,2,1] = H[4,3] c[2,0,0,2] = H[4,4]; c[2,0,2,0] = H[4,4] c[2,0,0,1] = H[4,5]; c[2,0,1,0] = H[4,5] c[0,2,0,0] = H[4,0] c[0,2,1,1] = H[4,1] c[0,2,2,2] = H[4,2] c[0,2,1,2] = H[4,3]; c[0,2,2,1] = H[4,3] c[0,2,0,2] = H[4,4]; c[0,2,2,0] = H[4,4] c[0,2,0,1] = H[4,5]; c[0,2,1,0] = H[4,5] c[1,0,0,0] = H[5,0] c[1,0,1,1] = H[5,1] c[1,0,2,2] = H[5,2] c[1,0,1,2] = H[5,3]; c[1,0,2,1] = H[5,3] c[1,0,0,2] = H[5,4]; c[1,0,2,0] = H[5,4] c[1,0,0,1] = H[5,5]; c[1,0,1,0] = H[5,5] c[0,1,0,0] = H[5,0] c[0,1,1,1] = H[5,1] c[0,1,2,2] = H[5,2] c[0,1,1,2] = H[5,3]; c[0,1,2,1] = H[5,3] c[0,1,0,2] = H[5,4]; c[0,1,2,0] = H[5,4] c[0,1,0,1] = H[5,5]; c[0,1,1,0] = H[5,5] return c
691c38eabe459c83d69dc8def69b48a9a0f3055e
19,415
def train10(): """ CIFAR-10 training set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. :return: Training reader creator :rtype: callable """ return reader_creator( paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), 'data_batch', True)
fcfabaa575a8cffa511e579b797f738727c75945
19,417
def _generate_one_direction_LSTM(transformer, X, W, R, B, initial_h, initial_c, P, clip, act, dtype, hidden_size, batch_size): """Generate subgraph for one direction of unrolled LSTM layer Args: transformer (_ModelTransformerHelper): helper for model generation X (list of str): names of tensors in input sequence. Each tensor shape: [batch_size, input_size] W (str): name of concatenated weight tensor: [input, output, forget, cell] R (str): name of concatenated recurrence weights tensor: [input, output, forget, cell] B (str): name of concatenated bias tensor: [input, output, forget, cell] initial_h (str or None): name of tensor containing initial hidden state. Shape [batch_size, hidden_size] initial_c (str or None): name of tensor containing initial cell state. Shape [batch_size, hidden_size] P (str or None): name of concatenated peephole tensor: [input, output, forget] clip (float or None): range which clips input of activations act (dict of str): activation functions {'f': 'Sigmoid', 'g': 'Tanh', 'h': 'Tanh'} dtype (numpy dtype): data type used in created LSTM operation hidden_size (int): hidden dimension batch_size (int): batch dimension """ # one direction LSTM: # # For details see: # https://github.com/onnx/onnx/blob/5cf5feef5ec3fd5527b2fdb6c29780e3b705059f/docs/Changelog.md#LSTM-7 # # it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) # ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) # ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) # Ct = ft (.) Ct-1 + it (.) ct # ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) # Ht = ot (.) h(Ct) # # X - input tensor # i - input gate # o - output gate # f - forget gate # c - cell gate # t - time step (t-1 means previous time step) # W[iofc] - W parameter weight matrix for input, output, forget, and cell gates # R[iofc] - R recurrence weight matrix for input, output, forget, and cell gates # Wb[iofc] - W bias vectors for input, output, forget, and cell gates # Rb[iofc] - R bias vectors for input, output, forget, and cell gates # P[iof] - P peephole weight vector for input, output, and forget gates # WB[iofc] - W parameter weight matrix for backward input, output, forget, and cell gates # RB[iofc] - R recurrence weight matrix for backward input, output, forget, and cell gates # WBb[iofc] - W bias vectors for backward input, output, forget, and cell gates # RBb[iofc] - R bias vectors for backward input, output, forget, and cell gates # PB[iof] - P peephole weight vector for backward input, output, and forget gates # H - Hidden state seq_length = len(X) state_h_tensors = [] w_tensors = transformer.make_split(W, split_sizes=[hidden_size] * 4, axis=0) W = {'i': w_tensors[0], 'o': w_tensors[1], 'f': w_tensors[2], 'c': w_tensors[3]} r_tensors = transformer.make_split(R, split_sizes=[hidden_size] * 4, axis=0) R = {'i': r_tensors[0], 'o': r_tensors[1], 'f': r_tensors[2], 'c': r_tensors[3]} if B is not None: separate_b_tensors = transformer.make_split( B, split_sizes=[hidden_size] * 8, axis=0) b_tensors = [] for i in range(4): b_tensors += [ transformer.make_add(separate_b_tensors[i], separate_b_tensors[i + 4]) ] else: b_tensors = [ transformer.make_constant_tensor( np.zeros((hidden_size), dtype=dtype), 'zero_b') ] * 4 B = {'i': b_tensors[0], 'o': b_tensors[1], 'f': b_tensors[2], 'c': b_tensors[3]} if initial_h is not None: previous_h_state_tensor = initial_h else: previous_h_state_tensor = transformer.make_constant_tensor( np.zeros((batch_size, hidden_size), dtype=dtype), 'initial_h') if initial_c is not None: previous_c_state_tensor = initial_c else: previous_c_state_tensor = transformer.make_constant_tensor( np.zeros((batch_size, hidden_size), dtype=dtype), 'initial_c') if P is not None: p_tensors = transformer.make_split(P, split_sizes=[hidden_size] * 3, axis=0) P = {'i': p_tensors[0], 'o': p_tensors[1], 'f': p_tensors[2]} else: zero = transformer.make_constant_tensor( np.zeros((hidden_size), dtype=dtype), 'zero_peephole') P = {'i': zero, 'o': zero, 'f': zero} for i in range(seq_length): # it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) it = transformer.make_gemm(X[i], W['i'], B['i'], trans_b=True) it = transformer.make_gemm(previous_h_state_tensor, R['i'], it, trans_b=True) peephole_it = transformer.make_mul(P['i'], previous_c_state_tensor) it = transformer.make_add(it, peephole_it) if clip is not None: it = transformer.make_clip(it, min=-clip, max=clip) it = transformer.make_act(it, act['f']) # ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) ft = transformer.make_gemm(X[i], W['f'], B['f'], trans_b=True) ft = transformer.make_gemm(previous_h_state_tensor, R['f'], ft, trans_b=True) peephole_ft = transformer.make_mul(P['f'], previous_c_state_tensor) ft = transformer.make_add(ft, peephole_ft) if clip is not None: ft = transformer.make_clip(ft, min=-clip, max=clip) ft = transformer.make_act(ft, act['f']) # ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) ct = transformer.make_gemm(X[i], W['c'], B['c'], trans_b=True) ct = transformer.make_gemm(previous_h_state_tensor, R['c'], ct, trans_b=True) if clip is not None: ct = transformer.make_clip(ct, min=-clip, max=clip) ct = transformer.make_act(ct, act['g']) # Ct = ft (.) Ct-1 + it (.) ct ft_Ct = transformer.make_mul(ft, previous_c_state_tensor) it_ct = transformer.make_mul(it, ct) Ct = transformer.make_add(ft_Ct, it_ct) previous_c_state_tensor = Ct # ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) ot = transformer.make_gemm(X[i], W['o'], B['o'], trans_b=True) ot = transformer.make_gemm(previous_h_state_tensor, R['o'], ot, trans_b=True) peephole_ot = transformer.make_mul(P['o'], Ct) ot = transformer.make_add(ot, peephole_ot) if clip is not None: ot = transformer.make_clip(ot, min=-clip, max=clip) ot = transformer.make_act(ot, act['f']) # Ht = ot (.) h(Ct) Ht = transformer.make_act(Ct, act['h']) Ht = transformer.make_mul(ot, Ht) previous_h_state_tensor = Ht state_h_tensors += [Ht] return (state_h_tensors, previous_c_state_tensor)
f0093c872a0ae639e5616e29d9666353e19d5748
19,418
def FindTerms(Filename, NumTerms): """Reorders the first NumTerms of the output of Todd program to find omega breakpoints""" f = open(Filename, 'r') # Get the value of omega Omega = int(f.readline().split()[1]) print "Omega =", Omega # Skip these lines for i in range(3): f.readline() Terms = [] for line in f: s = line.split() if len(s) == 0: break if s[0].isdigit(): Terms.append(int(s[0])) f.close() if NumTerms > len(Terms): print("Requesting more terms than are available in file...exiting.") exit() print "Number of terms in file", Filename, ": ", len(Terms) print "Number of terms to use:", str(NumTerms) print TermsSub = Terms[0:NumTerms] TermsSub.sort() # Create a list of numbers of terms for the full set for omega = 1 through Omega FoundTerms = [] OmegaTerms = [] for i in range(Omega+1): OmegaTerms.append(NumTermsOmega(i)) for i in range(Omega+1): for j in range(len(TermsSub)): if TermsSub[j] == OmegaTerms[i]: print i, ": Found", OmegaTerms[i], "at position", j+1 FoundTerms = FoundTerms + [j+1] break if TermsSub[j] > OmegaTerms[i]: print i, ": Found next term past", OmegaTerms[i], "at position", j+1 FoundTerms = FoundTerms + [j+1] break if TermsSub[len(TermsSub)-1] != OmegaTerms[Omega]: print Omega, ": Last term at", len(TermsSub), "is less than", OmegaTerms[Omega] FoundTerms = FoundTerms + [len(TermsSub)] # Just here to put some extra space after running print return FoundTerms
7ec4d6cb4a1fc698cc951e618d375a06eddc54e7
19,419
def hessian_power(h): """ Power in the hessian filter band Frobenius norm squared """ if len(h) == 2: p = np.abs(h[0])**2 + 2*np.abs(h[1])**2 + np.abs(h[2])**2 elif len(h) == 6: p = np.abs(h[0])**2 + 2*np.abs(h[1])**2 + 2*np.abs(h[2])**2 + np.abs(h[3])**2 + 2*np.abs(h[4])**2 + np.abs(h[5])**2 else: raise RuntimeError('Unsupported number of dimensions {}.'.format(len(h))) return p
3ea5b77b55d9a22406c34b5ffb9a244f127df476
19,420
def read_table(source, columns=None, memory_map=True): """ Read a pyarrow.Table from Feather format Parameters ---------- source : str file path, or file-like object columns : sequence, optional Only read a specific set of columns. If not provided, all columns are read. memory_map : boolean, default True Use memory mapping when opening file on disk Returns ------- table : pyarrow.Table """ reader = ext.FeatherReader() reader.open(source, use_memory_map=memory_map) if columns is None: return reader.read() column_types = [type(column) for column in columns] if all(map(lambda t: t == int, column_types)): table = reader.read_indices(columns) elif all(map(lambda t: t == str, column_types)): table = reader.read_names(columns) else: column_type_names = [t.__name__ for t in column_types] raise TypeError("Columns must be indices or names. " "Got columns {} of types {}" .format(columns, column_type_names)) # Feather v1 already respects the column selection if reader.version < 3: return table # Feather v2 reads with sorted / deduplicated selection elif sorted(set(columns)) == columns: return table else: # follow exact order / selection of names new_fields = [table.schema.field(c) for c in columns] new_schema = schema(new_fields, metadata=table.schema.metadata) new_columns = [table.column(c) for c in columns] return Table.from_arrays(new_columns, schema=new_schema)
bcbdfa9b9bde999a4caca7dae9fa76eea04c6978
19,421
def save_classnames_in_image_maxcardinality( rgb_img, label_img, id_to_class_name_map, font_color=(0, 0, 0), save_to_disk: bool = False, save_fpath: str = "" ) -> np.ndarray: """ Args: rgb_img label_img id_to_class_name_map: Mapping[int,str] Returns: rgb_img """ H, W, C = rgb_img.shape class_to_conncomps_dict = scipy_conn_comp(label_img) for class_idx, conncomps_list in class_to_conncomps_dict.items(): mask_idx = find_max_cardinality_mask(conncomps_list) maxsz_conncomp = conncomps_list[mask_idx] text = id_to_class_name_map[class_idx] y, x = get_mean_mask_location(maxsz_conncomp) x -= 55 x = max(0, x) x = min(W - 1, x) # print(f'Class idx: {class_idx}: (x,y)=({x},{y})') rgb_img = add_text_cv2( rgb_img, text, coords_to_plot_at=(x, y), font_color=font_color, font_scale=1, thickness=2 ) if save_to_disk: cv2_write_rgb(save_fpath, rgb_img) return rgb_img
807e4c850226a56cdd41ce5edfec7dc983313e7a
19,422
def yolo2lite_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0): """Create YOLO_V2 Lite MobileNet model CNN body in Keras.""" mobilenet = MobileNet(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha) print('backbone layers number: {}'.format(len(mobilenet.layers))) # input: 416 x 416 x 3 # mobilenet.output : 13 x 13 x (1024*alpha) # conv_pw_11_relu(layers[73]) : 26 x 26 x (512*alpha) # f1: 13 x 13 x (1024*alpha) f1 = mobilenet.output # f2: 26 x 26 x (512*alpha) f2 = mobilenet.get_layer('conv_pw_11_relu').output f1_channel_num = int(1024*alpha) f2_channel_num = int(512*alpha) y = yolo2lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes) return Model(inputs, y)
650a2f053db1fb8cee7e054a66bdb1cea7a82724
19,423
from typing import List from typing import Tuple def from_pandas( X: pd.DataFrame, max_iter: int = 100, h_tol: float = 1e-8, w_threshold: float = 0.0, tabu_edges: List[Tuple[str, str]] = None, tabu_parent_nodes: List[str] = None, tabu_child_nodes: List[str] = None, ) -> StructureModel: """ Learn the `StructureModel`, the graph structure describing conditional dependencies between variables in data presented as a pandas dataframe. The optimisation is to minimise a score function :math:`F(W)` over the graph's weighted adjacency matrix, :math:`W`, subject to the a constraint function :math:`h(W)`, where :math:`h(W) == 0` characterises an acyclic graph. :math:`h(W) > 0` is a continuous, differentiable function that encapsulated how acyclic the graph is (less == more acyclic). Full details of this approach to structure learning are provided in the publication: Based on DAGs with NO TEARS. @inproceedings{zheng2018dags, author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.}, booktitle = {Advances in Neural Information Processing Systems}, title = {{DAGs with NO TEARS: Continuous Optimization for Structure Learning}}, year = {2018}, codebase = {https://github.com/xunzheng/notears} } Args: X: input data. max_iter: max number of dual ascent steps during optimisation. h_tol: exit if h(W) < h_tol (as opposed to strict definition of 0). w_threshold: fixed threshold for absolute edge weights. tabu_edges: list of edges(from, to) not to be included in the graph. tabu_parent_nodes: list of nodes banned from being a parent of any other nodes. tabu_child_nodes: list of nodes banned from being a child of any other nodes. Returns: StructureModel: graph of conditional dependencies between data variables. Raises: ValueError: If X does not contain data. """ data = deepcopy(X) non_numeric_cols = data.select_dtypes(exclude="number").columns if len(non_numeric_cols) > 0: raise ValueError( "All columns must have numeric data. " "Consider mapping the following columns to int {non_numeric_cols}".format( non_numeric_cols=non_numeric_cols ) ) col_idx = {c: i for i, c in enumerate(data.columns)} idx_col = {i: c for c, i in col_idx.items()} if tabu_edges: tabu_edges = [(col_idx[u], col_idx[v]) for u, v in tabu_edges] if tabu_parent_nodes: tabu_parent_nodes = [col_idx[n] for n in tabu_parent_nodes] if tabu_child_nodes: tabu_child_nodes = [col_idx[n] for n in tabu_child_nodes] g = from_numpy( data.values, max_iter, h_tol, w_threshold, tabu_edges, tabu_parent_nodes, tabu_child_nodes, ) sm = StructureModel() sm.add_nodes_from(data.columns) sm.add_weighted_edges_from( [(idx_col[u], idx_col[v], w) for u, v, w in g.edges.data("weight")], origin="learned", ) return sm
1c7c645b60a23e9fc5dca3f63b8216aa82445ff0
19,424
def conv_seq_to_sent_symbols(seq, excl_symbols=None, end_symbol='.', remove_end_symbol=True): """ Converts sequences of tokens/ids into a list of sentences (tokens/ids). :param seq: list of tokens/ids. :param excl_symbols: tokens/ids which should be excluded from the final result. :param end_symbol: self-explanatory. :param remove_end_symbol: whether to remove from each sentence the end symbol. :return: list of lists, where each sub-list contains tokens/ids. """ excl_symbols = excl_symbols if excl_symbols else {} assert end_symbol not in excl_symbols coll = [] curr_sent = [] for symbol in seq: if symbol in excl_symbols: continue if symbol == end_symbol: if not remove_end_symbol: curr_sent.append(symbol) coll.append(curr_sent) curr_sent = [] else: curr_sent.append(symbol) if curr_sent: coll.append(curr_sent) return coll
a87da4bb5c34882d882832380f3831929ad41415
19,425
def emphasize_match(seq, line, fmt='__{}__'): """ Emphasize the matched portion of string. """ indices = substr_ind(seq.lower(), line.lower(), skip_spaces=True) if indices: matched = line[indices[0]:indices[1]] line = line.replace(matched, fmt.format(matched)) return line
c33c3d5cc52fb7b34bf4ae68495f78afcc1a051d
19,426
def param_value(memory, position, mode): """Get the value of a param according to its mode""" if mode == 0: # position mode return memory[memory[position]] elif mode == 1: # immediate mode return memory[position] else: raise ValueError("Unknown mode : ", mode)
e02ed7e1baea57af4b08c408b6decadee9c72162
19,427
def check_array_shape(inp: np.ndarray, dims: tuple, shape_m1: int, msg: str): """check if inp shape is allowed inp: test object dims: list, list of allowed dims shape_m1: shape of lowest level, if 'any' allow any shape msg: str, error msg """ if inp.ndim in dims: if inp.shape[-1] == shape_m1: return None if shape_m1 == "any": return None raise MagpylibBadUserInput(msg)
2ca900df0ea13e4b41f8bc9c7ff1e727aea27713
19,428
def Implies(p, q, simplify=True, factor=False): """Factory function for Boolean implication expression.""" p = Expression.box(p) q = Expression.box(q) expr = ExprImplies(p, q) if factor: expr = expr.factor() elif simplify: expr = expr.simplify() return expr
e5b57ccffdb413ad1de46fef65b670dd20bb53c4
19,429
def r_power(r_amp): """Return the fraction of reflected power. Parameters ---------- r_amp : float The net reflection amplitude after calculating the transfer matrix. Returns ------- R : numpy array The model reflectance """ return np.abs(r_amp)**2
2d7250f2e447c6a85d7ace63cf087733b920444e
19,430
def wrap_get_server(layer_name, func): """ Wrapper for memcache._get_server, to read remote host on all ops. This relies on the module internals, and just sends an info event when this function is called. """ @wraps(func) # XXX Not Python2.4-friendly def wrapper(*f_args, **f_kwargs): ret = func(*f_args, **f_kwargs) try: args = {'KVKey' : f_args[1]} (host, _) = ret if host: if host.family == socket.AF_INET: args['RemoteHost'] = host.ip elif host.family == socket.AF_UNIX: args['RemoteHost'] = 'localhost' oboe.log('info', layer_name, keys=args, store_backtrace=oboe._collect_backtraces('memcache')) except Exception, e: print >> sys.stderr, "Oboe error: %s" % e return ret return wrapper
fc8f65480b3be4e8cb04ac1a42e1b969ce5b4ccc
19,431
def shape(batch) -> (int, int): """Get count of machine/tasks of a batch""" return len(batch), len(batch[0])
43119e21e5f9034cb4c733f2855254c77a452e9e
19,432
from typing import Union from typing import List import requests from io import StringIO def uniprot_mappings(query: Union[str, List[str]], map_from: str = 'ID', map_to: str = 'PDB_ID', ) -> pd.DataFrame: """Map identifiers using the UniProt identifier mapping tool. :param query: list or space delimited string of identifiers :param map_from: type of input identifiers (default: accession) :param map_to: type of desired output identifiers (default: PDB identifiers) See: https://www.uniprot.org/help/api_idmapping """ url = 'https://www.uniprot.org/uploadlists/' if isinstance(query, list): query = ' '.join(query) params = {'from': map_from, 'to': map_to, 'format': 'tab', 'query': query, } response = requests.post(url, params) if not response.ok: raise ValueError("query is wrongly formatted and resulted in a server failure") data = StringIO(response.text) df = pd.read_csv(data, sep='\t') df = df.rename(columns={'To': map_to, 'From': map_from}) return df
79fb3b75c2724bee51242dfcbba78a7efdc11667
19,433
def build_property_filter_spec(client_factory, property_specs, object_specs): """Builds the property filter spec. :param client_factory: factory to get API input specs :param property_specs: property specs to be collected for filtered objects :param object_specs: object specs to identify objects to be filtered :returns: property filter spec """ property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_filter_spec.propSet = property_specs property_filter_spec.objectSet = object_specs return property_filter_spec
43822b966766fe5922b0d9ff45f6e5835ae6500e
19,434
def get_user_name_from_token(): """Extract user name and groups from ID token returns: a tuple of username and groups """ curl = _Curl() token_info = curl.get_token_info() try: return token_info['name'], token_info['groups'], token_info['preferred_username'] except Exception: return None, None
cd4b4931fdbc646bde0d555d8914cf0fac9fca89
19,435
def prob17(limit=1000): """ If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total. If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage. """ digits = {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine'} exceptions = {10: 'ten', 11: 'eleven', 12: 'twelve', 14: 'fourteen'} bases = {2: 'twen', 3: 'thir', 4: 'for', 5: 'fif', 6: 'six', 7: 'seven', 8: 'eigh', 9: 'nine'} powers = {1: 'teen', 10: 'ty', 100: 'hundred', 1000: 'thousand'} count = 0 for num in range(1, limit + 1): right = str(num)[-2:] #print right if int(right) == 0: pass elif int(right) in exceptions: count += len(exceptions[int(right)]) elif 10 < int(right) < 20: count += len(bases[int(right[1])]) + len(powers[1]) else: if right[-1] != '0': count += len(digits[int(right[-1])]) if len(right) == 2 and right[0] != '0': count += len(bases[int(right[0])]) + len(powers[10]) if len(str(num)) > 2: left = str(num)[:-2] #print left if right != '00': count += 3 if left[-1] != '0': count += len(digits[int(left[-1])]) + len(powers[100]) if len(left) == 2 and left[0] != '0': count += len(digits[int(left[0])]) + len(powers[1000]) return count
586f60fa4411a5818102a903286aa97095faeffb
19,436
def mask_orbit_start_and_end(time, flux, orbitgap=1, expected_norbits=2, orbitpadding=6/(24), raise_error=True): """ Ignore the times near the edges of orbits. args: time, flux returns: time, flux: with `orbitpadding` days trimmed out """ norbits, groups = lcmath.find_lc_timegroups(time, mingap=orbitgap) if norbits != expected_norbits: errmsg = 'got {} orbits, expected {}. groups are {}'.format( norbits, expected_norbits, repr(groups)) if raise_error: raise AssertionError(errmsg) else: print(errmsg) print('returning what was passed') return time, flux sel = np.zeros_like(time).astype(bool) for group in groups: tg_time = time[group] start_mask = (np.min(tg_time), np.min(tg_time) + orbitpadding) end_mask = (np.max(tg_time) - orbitpadding, np.max(tg_time)) sel |= ( (time > max(start_mask)) & (time < min(end_mask)) ) return_time = time[sel] return_flux = flux[sel] return return_time, return_flux
c8da4b3394424acd8ec1b9ccb1af03ba2add2043
19,437
def to_cnf(expr): """ Convert a propositional logical sentence s to conjunctive normal form. That is, of the form ((A | ~B | ...) & (B | C | ...) & ...) Examples ======== >>> from sympy.logic.boolalg import to_cnf >>> from sympy.abc import A, B, D >>> to_cnf(~(A | B) | D) And(Or(D, Not(A)), Or(D, Not(B))) """ # Don't convert unless we have to if is_cnf(expr): return expr expr = sympify(expr) expr = eliminate_implications(expr) return distribute_and_over_or(expr)
b718d26ba3669d88b2d4bc368be48307cd7334bf
19,438
from typing import List from pathlib import Path from typing import Optional def plot_contours_for_all_classes(sample: Sample, segmentation: np.ndarray, foreground_class_names: List[str], result_folder: Path, result_prefix: str = "", image_range: Optional[TupleFloat2] = None, channel_index: int = 0) -> List[Path]: """ Creates a plot with the image, the ground truth, and the predicted segmentation overlaid. One plot is created for each class, each plotting the Z slice where the ground truth has most pixels. :param sample: The image sample, with the photonormalized image and the ground truth labels. :param segmentation: The predicted segmentation: multi-value, size Z x Y x X. :param foreground_class_names: The names of all classes, excluding the background class. :param result_folder: The folder into which the resulting plot PNG files should be written. :param result_prefix: A string prefix that will be used for all plots. :param image_range: The minimum and maximum image values that will be mapped to the color map ranges. If None, use the actual min and max values. :param channel_index: The index of the image channel that should be plotted. :return: The paths to all generated PNG files. """ check_size_matches(sample.labels[0], segmentation) num_classes = sample.labels.shape[0] if len(foreground_class_names) != num_classes - 1: raise ValueError( f"Labels tensor indicates {num_classes} classes, but got {len(foreground_class_names)} foreground " f"class names: {foreground_class_names}") plot_names: List[Path] = [] image = sample.image[channel_index, ...] contour_arguments = [{'colors': 'r'}, {'colors': 'b', 'linestyles': 'dashed'}] binaries = binaries_from_multi_label_array(segmentation, num_classes) for class_index, binary in enumerate(binaries): if class_index == 0: continue ground_truth = sample.labels[class_index, ...] if is_missing_ground_truth(ground_truth): continue largest_gt_slice = get_largest_z_slice(ground_truth) labels_at_largest_gt = ground_truth[largest_gt_slice] segmentation_at_largest_gt = binary[largest_gt_slice, ...] class_name = foreground_class_names[class_index - 1] patient_id = sample.patient_id if isinstance(patient_id, str): patient_id_str = patient_id else: patient_id_str = f"{patient_id:03d}" filename_stem = f"{result_prefix}{patient_id_str}_{class_name}_slice_{largest_gt_slice:03d}" plot_file = plot_image_and_label_contour(image=image[largest_gt_slice, ...], labels=[labels_at_largest_gt, segmentation_at_largest_gt], contour_arguments=contour_arguments, image_range=image_range, plot_file_name=result_folder / filename_stem) plot_names.append(plot_file) return plot_names
d3f7b1e246bd2eb19272279f946af4218f2eb65b
19,439
from typing import VT from typing import List from typing import Dict from typing import Set from typing import FrozenSet from typing import Tuple import itertools def match_gadgets_phasepoly(g: BaseGraph[VT,ET]) -> List[MatchPhasePolyType[VT]]: """Finds groups of phase-gadgets that act on the same set of 4 vertices in order to apply a rewrite based on rule R_13 of the paper *A Finite Presentation of CNOT-Dihedral Operators*.""" targets: Dict[VT,Set[FrozenSet[VT]]] = {} gadgets: Dict[FrozenSet[VT], Tuple[VT,VT]] = {} inputs = g.inputs() outputs = g.outputs() for v in g.vertices(): if v not in inputs and v not in outputs and len(list(g.neighbors(v)))==1: if g.phase(v) != 0 and g.phase(v).denominator != 4: continue n = list(g.neighbors(v))[0] tgts = frozenset(set(g.neighbors(n)).difference({v})) if len(tgts)>4: continue gadgets[tgts] = (n,v) for t in tgts: if t in targets: targets[t].add(tgts) else: targets[t] = {tgts} if g.phase(v) != 0 and g.phase(v).denominator == 4: if v in targets: targets[v].add(frozenset([v])) else: targets[v] = {frozenset([v])} targets = {t:s for t,s in targets.items() if len(s)>1} matches: Dict[FrozenSet[VT], Set[FrozenSet[VT]]] = {} for v1,t1 in targets.items(): s = t1.difference(frozenset([v1])) if len(s) == 1: c = s.pop() if any(len(targets[v2])==2 for v2 in c): continue s = t1.difference({frozenset({v1})}) for c in [d for d in s if not any(d.issuperset(e) for e in s if e!=d)]: if not all(v2 in targets for v2 in c): continue if any(v2<v1 for v2 in c): continue # type: ignore a = set() for t in c: a.update([i for s in targets[t] for i in s if i in targets]) for group in itertools.combinations(a.difference(c),4-len(c)): gr = list(group)+list(c) b: Set[FrozenSet[VT]] = set() for t in gr: b.update([s for s in targets[t] if s.issubset(gr)]) if len(b)>7: matches[frozenset(gr)] = b m: List[MatchPhasePolyType[VT]] = [] taken: Set[VT] = set() for groupp, gad in sorted(matches.items(), key=lambda x: len(x[1]), reverse=True): if taken.intersection(groupp): continue m.append((list(groupp), {s:(gadgets[s] if len(s)>1 else list(s)[0]) for s in gad})) taken.update(groupp) return m
740957f18d5ad223d9b4c64f3c61e6f1bc4bea9e
19,440
import re def get_replaceid(fragment): """get replace id for shared content""" replaceid=re.findall(r":[A-z]+:\s(.+)", fragment)[0] return replaceid
25e1a940904d86c5e57d2d36dbd91247c6e08bb3
19,441
def _tower_fn(is_training, weight_decay, feature, label, data_format, num_layers, batch_norm_decay, batch_norm_epsilon): """Build computation tower (Resnet). Args: is_training: true if is training graph. weight_decay: weight regularization strength, a float. feature: a Tensor. label: a Tensor. data_format: channels_last (NHWC) or channels_first (NCHW). num_layers: number of layers, an int. batch_norm_decay: decay for batch normalization, a float. batch_norm_epsilon: epsilon for batch normalization, a float. Returns: A tuple with the loss for the tower, the gradients and parameters, and predictions. """ model = cifar10_model.ResNetCifar10( num_layers, batch_norm_decay=batch_norm_decay, batch_norm_epsilon=batch_norm_epsilon, is_training=is_training, data_format=data_format) logits = model.forward_pass(feature, input_data_format='channels_last') tower_pred = { 'classes': tf.argmax(input=logits, axis=1), 'probabilities': tf.nn.softmax(logits) } tower_loss = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=label) tower_loss = tf.reduce_mean(tower_loss) model_params = tf.trainable_variables() tower_loss += weight_decay * tf.add_n( [tf.nn.l2_loss(v) for v in model_params]) tower_grad = tf.gradients(tower_loss, model_params) return tower_loss, zip(tower_grad, model_params), tower_pred
8f15447ef605990c5b596afe11e5bcd8f84164b1
19,442
def DiscRate(t, dur): """Discount rates for the outer projection""" return scen.DiscRate(dur) + DiscRateAdj(t)
07e6af30738531b349fac62bfbfe9e58ba0bee51
19,443
def outlierDivergence(dist1, dist2, alpha): """Defines difference between how distributions classify outliers. Choose uniformly from Distribution 1 and Distribution 2, and then choose an outlier point according to the induced probability distribution. Returns the probability that said point would be classified differently by the other distribution. Parameters: -dist1 (list or tuple of numbers): Distrubution 1 -dist2 (list of tuple of numbers): Distribution 2 -alpha: 100*alpha and 100*(1-alpha) are the percentile cutoffs of each distribution for classifying values as outliers """ return (probDiffClass(dist1, dist2, alpha) + probDiffClass(dist2, dist1, alpha)) / 2
4c631f30dd32d35dd8ced7dbcbc0c93b4ee6c89b
19,444
def getCP2KBasisFromPlatoOrbitalGauPolyBasisExpansion(gauPolyBasisObjs, angMomVals, eleName, basisNames=None, shareExp=True, nVals=None): """ Gets a BasisSetCP2K object, with coefficients normalised, from an iter of GauPolyBasis objects in plato format Args: gauPolyBasisObjs: (iter plato_pylib GauPolyBasis object) Each element is the Plato representation of a basis function angMomVals: (iter of int) Angular momentum values for each orbital eleName: (str) Label for the element used in this basis set basisNames: (iter of str) Names used to specify this basis set in the CP2K input file (more than one allowed) shareExp: (Bool, Optional) If True, will try to exploit shared exponents when generating the basis set Returns outBasis: (BasisSetCP2K Object) Convenient representation of a basis set for CP2K; this is the object that would be parsed from a CP2K basis file """ if basisNames is None: basisNames = ["basis_set_a"] if nVals is None: nVals = [1 for x in gauPolyBasisObjs] splitExponentSets = [getCP2KExponentSetFromGauPolyBasis(gaus, angMom, nVal) for gaus,angMom,nVal in it.zip_longest(gauPolyBasisObjs, angMomVals, nVals)] if shareExp: outExponentSets = _getExponentSetsWithSharedExponentPartsMerged(splitExponentSets) else: outExponentSets = splitExponentSets outObj = BasisSetCP2K(eleName, basisNames, outExponentSets) return outObj
1a23922122dd0d32c69e8829d891811ec0ad37b1
19,446
import math def longitude_to_utm_epsg(longitude): """ Return Proj4 EPSG for a given longitude in degrees """ zone = int(math.floor((longitude + 180) / 6) + 1) epsg = '+init=EPSG:326%02d' % (zone) return epsg
f17a03514cc9caf99e1307c0382d7b9fa0289330
19,447
def SupportVectorRegression(X_train, X_test, y_train, y_test, search, save=False): """ Support Vector Regression. Can run a grid search to look for the best parameters (search=True) and save the model to a file (save=True). """ if search: # parameter values over which we will search parameters = {'C': [0.1, 0.5, 1., 1.5, 2.], 'kernel': ['rbf', 'sigmoid', 'poly'], 'degree': [3, 5]} s = SVR() clf = grid_search.GridSearchCV(s, parameters, scoring='r2', n_jobs=-1, verbose=1, cv=3) else: clf = SVR(verbose=1) print '\nTraining...' clf.fit(X_train, y_train) print 'Done' if search: print 'The best score and estimator:' print(clf.best_score_) print(clf.best_estimator_) print 'Best hyperparameters:' print clf.best_params_ clf = clf.best_estimator if save: print 'Save the SVR model to a pickled file...' fp = open('model/SVR.pkl', 'w') cPickle.dump(clf, fp) fp.close() print '\nPredicting...' predicted = clf.predict(X_test) expected = y_test.copy() print 'Done' return predicted, expected
e51e2aa3a706fb2308b9e693bef67950f4eb8a0b
19,448
def conv_relu_pool(X, conv_params, pool_params): """ Initializes weights and biases and does a 2d conv-relu-pool """ # Initialize weights W = tf.Variable( tf.truncated_normal(conv_params, stddev=0.1) ) b = tf.constant(0.1, shape=conv_params['shape']) conv = tf.nn.conv2d(X, W, strides=conv_params['strides'], padding=conv_params['padding']) # Simple ReLU activation function conv = tf.nn.relu(conv) # 2 by 2 max ppoling with a stride of 2 out = tf.nn.max_pool(conv, ksize=pool_params['shape'], strides=pool_params['strides'], padding=pool_params['padding']) return out
d3bba15fbba220790eaf2e74cec0595b3d327479
19,449
def compute_node_depths(tree): """Returns a dictionary of node depths for each node with a label.""" res = {} for leaf in tree.leaf_node_iter(): cnt = 0 for anc in leaf.ancestor_iter(): if anc.label: cnt += 1 res[leaf.taxon.label] = cnt return res
a633f77d0fff1f29fe95108f96ccc59817179ddd
19,450
def create_device(hostname, address, username="root", password=""): """Create and return a DeviceInfo struct.""" return DeviceInfo(hostname, address, username, password)
af5d722ae1a7f2383bc274340313bfcc61c56c18
19,451
def max_box(box1, box2): """ return the maximum of two bounding boxes """ ext = lambda values: min(values) if sum(values) <= 0 else max(values) return tuple(tuple(ext(offs) for offs in zip(dim[0], dim[1])) for dim in zip(box1, box2))
5323e927999e613ac709bca57373e1985c65946b
19,452
def homogenize(a, w=1.0): """ Example: a=[ [a00, a01], [a10, a11], [a20, a21] ], w=1 -> result=[ [a00, a01, w], [a10, a11, w], [a20, a21, w] ] """ return np.hstack([a, np.full((len(a), 1), w, a.dtype)])
dd8dec2d96a6c6aa04d6754ee66c93230b91a309
19,453
def find_similar_term(term, dictionary): """ Returns a list of terms similar to the given one according to the Damerau-Levenshtein distance https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance """ return list(filter(lambda t: textdistance.damerau_levenshtein.distance(t, term) <= 2, dictionary))
a4fca3fbfee5597ae4a333f6e8856fc75aa31a2a
19,454
import hashlib def calc_sign(string): """str/any->str return MD5. From: Biligrab, https://github.com/cnbeining/Biligrab MIT License""" return str(hashlib.md5(str(string).encode('utf-8')).hexdigest())
3052e18991b084b3a220b0f3096d9c065cf4661c
19,455
from typing import Callable from typing import Optional from typing import Tuple from typing import Dict def compile_circuit( circuit: cirq.Circuit, *, device: cirq.google.xmon_device.XmonDevice, compiler: Callable[[cirq.Circuit], cirq.Circuit] = None, routing_algo_name: Optional[str] = None, router: Optional[Callable[..., ccr.SwapNetwork]] = None, ) -> Tuple[cirq.Circuit, Dict[cirq.ops.Qid, cirq.ops.Qid]]: """Compile the given model circuit onto the given device. This uses a different compilation method than described in https://arxiv.org/pdf/1811.12926.pdf Appendix A. The latter goes through a 7-step process involving various decompositions, routing, and optimization steps. We route the model circuit and then run a series of optimizers on it (which can be passed into this function). Args: circuit: The model circuit to compile. device: The device to compile onto. compiler: An optional function to deconstruct the model circuit's gates down to the target devices gate set and then optimize it. Returns: A tuple where the first value is the compiled circuit and the second value is the final mapping from the model circuit to the compiled circuit. The latter is necessary in order to preserve the measurement order. """ compiled_circuit = circuit.copy() # Swap Mapping (Routing). Ensure the gates can actually operate on the # target qubits given our topology. if router is None and routing_algo_name is None: routing_algo_name = 'greedy' swap_network = ccr.route_circuit(compiled_circuit, ccr.xmon_device_to_graph(device), router=router, algo_name=routing_algo_name) compiled_circuit = swap_network.circuit # Compile. This should decompose the routed circuit down to a gate set that # our device supports, and then optimize. The paper uses various # compiling techniques - because Quantum Volume is intended to test those # as well, we allow this to be passed in. This compiler is not allowed to # change the order of the qubits. if compiler: compiled_circuit = compiler(compiled_circuit) return compiled_circuit, swap_network.final_mapping()
f0e64da1aa7b87d1459c8708493ac5ec59b74c33
19,456
def remove_prefix(utt, prefix): """ Check that utt begins with prefix+" ", and then remove. Inputs: utt: string prefix: string Returns: new utt: utt with the prefix+" " removed. """ try: assert utt[: len(prefix) + 1] == prefix + " " except AssertionError as e: print("ERROR: utterance '%s' does not start with '%s '" % (utt, prefix)) print(repr(utt[: len(prefix) + 1])) print(repr(prefix + " ")) raise e return utt[len(prefix) + 1:]
fa6717e34c6d72944636f6b319b98574f2b41a69
19,457
def format_ucx(name, idx): """ Formats a name and index as a collider """ # one digit of zero padding idxstr = str(idx).zfill(2) return "UCX_%s_%s" % (name, idxstr)
c3365bf66bca5fe7ab22bd642ae59dfb618be251
19,458
def get_connection(): """ Return a connection to the database and cache it on the `g` object. Generally speaking, each app context has its own connection to the database; these are destroyed when the app context goes away (ie, when the server is done handling that request). """ if 'db_connection' not in g: # mariadb.connect might throw an error if it can't connect to the # database, but that's okay--Flask will just turn that into an HTTP # 500 response, which is the correct behavior in this case. g.db_connection = mariadb.connect( user=current_app.config['DB_USER'], password=current_app.config['DB_PASSWORD'], host=current_app.config['DB_HOST'], port=current_app.config['DB_PORT'], database=current_app.config['DB_NAME'], ) return g.db_connection
33d4f75aeead593a0521797ac1df07324d05cb16
19,459
def first_location_of_minimum(x): """ Returns the first location of the minimal value of x. The position is calculated relatively to the length of x. :param x: the time series to calculate the feature of :type x: pandas.Series :return: the value of this feature :return type: float """ x = np.asarray(x) return np.argmin(x) / len(x) if len(x) > 0 else np.NaN
88f7630931215f656bc99d31eea88a4f106a5faa
19,460
def _positive_int(integer_string, strict=False, cutoff=None): """ Cast a string to a strictly positive integer. """ ret = int(integer_string) if ret == -1: return -1 if ret < 0 or (ret == 0 and strict): raise ValueError() if cutoff: return min(ret, cutoff) return ret
ecb4908fdb5223dc7301f68792c1540154931dfe
19,461
def x1_0_mult(guess, slip): """ Compute x1_0 element-wise :param guess: (np.array) [odds] :param slip: (np.array) [odds] :return: np.array """ return ((1.0+guess)/(guess*(1.0+slip)))/x0_mult(guess,slip)
2c00f210c89b2a85ef40fe0adabd89c2034722e3
19,462
def get_xblock_app_config(): """ Get whichever of the above AppConfig subclasses is active. """ return apps.get_app_config(XBlockAppConfig.label)
8b937738a0c1c2c1131a37975808d249d0b06899
19,463
def remove_experiment_requirement(request, object_id, object_type): """Removes the requirement from the experiment, expects requirement_id (PK of req object) to be present in request.POST""" if request.POST: assert 'requirement_id' in request.POST requirement_id = request.POST['requirement_id'] exp_or_package = get_package_or_experiment(request, object_type, object_id) requirement = exp_or_package.requirements.filter(pk=requirement_id) if requirement: requirement = requirement[0] with transaction.atomic(): exp_or_package.requirements.remove(requirement) exp_or_package.save() logger.info("deleted dependency %s from experiment %s", requirement, exp_or_package) requirement.delete() return JsonResponse({'deleted': True}) return JsonResponse({'deleted': False})
3cd0b2919bd47038cbc7f3431d552a3864395645
19,464
from typing import List def _k_hot_from_label_names(labels: List[str], symbols: List[str]) -> List[int]: """Converts text labels into symbol list index as k-hot.""" k_hot = [0] * len(symbols) for label in labels: try: k_hot[symbols.index(label)] = 1 except IndexError: raise ValueError( 'Label %s did not appear in the list of defined symbols %r' % (label, symbols)) return k_hot
e074b55e9dae2f8aec6beb14d863f7356035705d
19,465
import logging import resource def upload(training_dir, algorithm_id=None, writeup=None, api_key=None, ignore_open_monitors=False): """Upload the results of training (as automatically recorded by your env's monitor) to OpenAI Gym. Args: training_dir (Optional[str]): A directory containing the results of a training run. algorithm_id (Optional[str]): An algorithm id indicating the paricular version of the algorithm (including choices of parameters) you are running (visit https://gym.openai.com/algorithms to create an id) writeup (Optional[str]): A Gist URL (of the form https://gist.github.com/<user>/<id>) containing your writeup for this evaluation. api_key (Optional[str]): Your OpenAI API key. Can also be provided as an environment variable (OPENAI_GYM_API_KEY). """ if not ignore_open_monitors: open_monitors = monitoring._open_monitors() if len(open_monitors) > 0: envs = [m.env.spec.id if m.env.spec else '(unknown)' for m in open_monitors] raise error.Error("Still have an open monitor on {}. You must run 'env.monitor.close()' before uploading.".format(', '.join(envs))) env_info, training_episode_batch, training_video = upload_training_data(training_dir, api_key=api_key) env_id = env_info['env_id'] training_episode_batch_id = training_video_id = None if training_episode_batch: training_episode_batch_id = training_episode_batch.id if training_video: training_video_id = training_video.id if logger.level <= logging.INFO: if training_episode_batch_id is not None and training_video_id is not None: logger.info('[%s] Creating evaluation object from %s with learning curve and training video', env_id, training_dir) elif training_episode_batch_id is not None: logger.info('[%s] Creating evaluation object from %s with learning curve', env_id, training_dir) elif training_video_id is not None: logger.info('[%s] Creating evaluation object from %s with training video', env_id, training_dir) else: raise error.Error("[%s] You didn't have any recorded training data in {}. Once you've used 'env.monitor.start(training_dir)' to start recording, you need to actually run some rollouts. Please join the community chat on https://gym.openai.com if you have any issues.".format(env_id, training_dir)) evaluation = resource.Evaluation.create( training_episode_batch=training_episode_batch_id, training_video=training_video_id, env=env_info['env_id'], algorithm={ 'id': algorithm_id, }, writeup=writeup, gym_version=env_info['gym_version'], api_key=api_key, ) logger.info( """ **************************************************** You successfully uploaded your evaluation on %s to OpenAI Gym! You can find it at: %s **************************************************** """.rstrip(), env_id, evaluation.web_url()) return evaluation
fbb07310bffb82b0831b08334014a33831a8714d
19,466
from typing import Tuple from typing import List def partition_analysis(analysis: str) -> Tuple[List[FSTTag], FSTLemma, List[FSTTag]]: """ :return: the tags before the lemma, the lemma itself, the tags after the lemma :raise ValueError: when the analysis is not parsable. >>> partition_analysis('PV/e+fakeword+N+I') (['PV/e'], 'fakeword', ['N', 'I']) >>> partition_analysis('fakeword+N+I') ([], 'fakeword', ['N', 'I']) >>> partition_analysis('PV/e+PV/ki+atamihêw+V+TA+Cnj+1Pl+2SgO') (['PV/e', 'PV/ki'], 'atamihêw', ['V', 'TA', 'Cnj', '1Pl', '2SgO']) """ match = partition_pattern.match(analysis) if not match: raise ValueError(f"analysis not parsable: {analysis}") pre, lemma, post = match.groups() return ( [FSTTag(t) for t in pre.split("+") if t], FSTLemma(lemma), [FSTTag(t) for t in post.split("+") if t], )
e071bc5a9d624ef42ea55bb1a4ce5e612715a06c
19,467
def dummy_dictionary( dummy_tokens=3, additional_token_list=None, dictionary_cls=pytorch_translate_dictionary.Dictionary, ): """First adds the amount of dummy_tokens that you specify, then finally the additional_token_list, which is a list of string token values""" d = dictionary_cls() for i in range(dummy_tokens): token = f"token_{i}" d.add_symbol(token) if additional_token_list is not None: for token in additional_token_list: d.add_symbol(token) d.finalize(padding_factor=-1) return d
870456e86ddf4f98643387945f9823c0bd7cd532
19,468
def lies_in_epsilon(x: Num, c: Num, e: Num) -> bool: """ Функция проверки значения x на принадлежность отрезку выда [c - e, c + e]. :param x: значение :param c: значение попадание в epsilon-окрестность которого необходимо проверить :param e: epsilon-окрестность вокруг значения c :return: True - если точка лежит в интервале, иначе - False. """ if (x >= (c - e)) and (x <= (c + e)): return True return False
d7e2b4dec95859d1c5992a64d689eab4ec543001
19,469
def decimal_to_digits(decimal, min_digits=None): """ Return the number of digits to the first nonzero decimal. Parameters ----------- decimal: float min_digits: int, minimum number of digits to return Returns ----------- digits: int, number of digits to the first nonzero decimal """ digits = abs(int(np.log10(decimal))) if min_digits is not None: digits = np.clip(digits, min_digits, 20) return digits
773cc534208fd08b66f86f8bf4fb465b9ac0a15d
19,470
def lcs(a, b): """ Compute the length of the longest common subsequence between two sequences. Time complexity: O(len(a) * len(b)) Space complexity: O(min(len(a), len(b))) """ # This is an adaptation of the standard LCS dynamic programming algorithm # tweaked for lower memory consumption. # Sequence a is laid out along the rows, b along the columns. # Minimize number of columns to minimize required memory if len(a) < len(b): a, b = b, a # Sequence b now has the minimum length # Quit early if one sequence is empty if len(b) == 0: return 0 # Use a single buffer to store the counts for the current row, and # overwrite it on each pass row = [0] * len(b) for ai in a: left = 0 diag = 0 for j, bj in enumerate(b): up = row[j] if ai == bj: value = diag + 1 else: value = max(left, up) row[j] = value left = value diag = up # Return the last cell of the last row return left
0201e9efade98aece854e05d0910192251e5f63c
19,471
def paginate_years(year): """Return a list of years for pagination""" START_YEAR = 2020 # first year that budgets were submitted using this system y = int(year) return (START_YEAR, False, y-1, y, y+1, False, settings.CURRENT_YEAR)
37ed9fdd6e1af69a3e404e64f6d42634ea6a0d2e
19,472
def wait_for_service_tasks_state( service_name, expected_task_count, expected_task_states, timeout_sec=120 ): """ Returns once the service has at least N tasks in one of the specified state(s) :param service_name: the service name :type service_name: str :param expected_task_count: the expected number of tasks in the specified state(s) :type expected_task_count: int :param expected_task_states: the expected state(s) for tasks to be in, e.g. 'TASK_RUNNING' :type expected_task_states: [str] :param timeout_sec: duration to wait :type timeout_sec: int :return: the duration waited in seconds :rtype: int """ return time_wait( lambda: task_states_predicate(service_name, expected_task_count, expected_task_states), timeout_seconds=timeout_sec)
a19ed21a7996dcd08148727c8120596dc6bdac18
19,473
def eval_add(lst): """Evaluate an addition expression. For addition rules, the parser will return [number, [[op, number], [op, number], ...]] To evaluate that, we start with the first element of the list as result value, and then we iterate over the pairs that make up the rest of the list, adding or subtracting depending on the operator. """ first = lst[0] result = first for n in lst[1]: if n[0] == '+': result += n[1] else: result -= n[1] return result
5d6972ccc7a0857da224e30d579b159e89fb8dce
19,474
def is_password_valid(plaintextpw: str, storedhash: str) -> bool: """ Checks if a plaintext password matches a stored hash. Uses ``bcrypt``. The stored hash includes its own incorporated salt. """ # Upon CamCOPS from MySQL 5.5.34 (Ubuntu) to 5.1.71 (CentOS 6.5), the # VARCHAR was retrieved as Unicode. We needed to convert that to a str. # For Python 3 compatibility, we just str-convert everything, avoiding the # unicode keyword, which no longer exists. if storedhash is None: storedhash = "" storedhash = str(storedhash) if plaintextpw is None: plaintextpw = "" plaintextpw = str(plaintextpw) try: h = bcrypt.hashpw(plaintextpw, storedhash) except ValueError: # e.g. ValueError: invalid salt return False return h == storedhash
8c2bc03bd4d0445fd157823b0e0303761d6638ea
19,476
def find_item(item_to_find, items_list): """ Returns True if an item is found in the item list. :param item_to_find: item to be found :param items_list: list of items to search in :return boolean """ is_found = False for item in items_list: if (item[1] == item_to_find[1]) and mention_match(item[0], item_to_find[0]): is_found = True return is_found
acf02fbe8b1599105d86ecc6aaa98023901561c0
19,477
def validate_target_types(target_type): """ Target types validation rule. Property: SecretTargetAttachment.TargetType """ VALID_TARGET_TYPES = ( "AWS::RDS::DBInstance", "AWS::RDS::DBCluster", "AWS::Redshift::Cluster", "AWS::DocDB::DBInstance", "AWS::DocDB::DBCluster", ) if target_type not in VALID_TARGET_TYPES: raise ValueError( "Target type must be one of : %s" % ", ".join(VALID_TARGET_TYPES) ) return target_type
db33903e36849fb8f97efecb95f1bbfa8150ed6f
19,478
def cprint(*objects, **kwargs): """Apply Color formatting to output in terminal. Same as builtin print function with added 'color' keyword argument. eg: cprint("data to print", color="red", sep="|") available colors: black red green yellow blue pink cyan white no-color """ colors = { "black": "\033[0;30m", "red": "\033[0;31m", "green": "\033[0;92m", "yellow": "\033[0;93m", "blue": "\033[0;34m", "pink": "\033[0;95m", "cyan": "\033[0;36m", "white": "\033[0;37m", "no-color": "\033[0m" } color = kwargs.pop('color', 'no-color') return print(colors[color], *objects, colors['no-color'], **kwargs)
42d0f2357da7f84404a888cf717a737d86609aa4
19,479
from typing import List def clean(tokens: List[str]) -> List[str]: """ Returns a list of unique tokens without any stopwords. Input(s): 1) tokens - List containing all tokens. Output(s): 1) unique_tokens - List of unique tokens with all stopwords removed. """ # handle alphanumeric strings unique_tokens = list(set(tokens)) for word in unique_tokens: if word in hindi_stopwords: unique_tokens.pop(word) return unique_tokens
ccc0fdba6ffd3cf699ac39013631880061812f9f
19,481
def validate_file_submission(): """Validate the uploaded file, returning the file if so.""" if "sourceFile" not in flask.request.files: raise util.APIError(400, message="file not provided (must " "provide as uploadFile).") # Save to GCloud uploaded_file = flask.request.files["sourceFile"] uploaded_file.seek(0) return uploaded_file
f3636851448e26b814196a56fe56061fb9acffcf
19,482
def oauth_redirect(request, consumer_key=None, secret_key=None, request_token_url=None, access_token_url=None, authorization_url=None, callback_url=None, parameters=None): """ View to handle the OAuth based authentication redirect to the service provider """ request.session['next'] = get_login_redirect_url(request) client = OAuthClient(request, consumer_key, secret_key, request_token_url, access_token_url, authorization_url, callback_url, parameters) return client.get_redirect()
7e1491d6643d9610c207a2c92164c9f75526cf95
19,483
def render_template(content, context): """Render templates in content.""" # Fix None issues if context.last_release_object is not None: prerelease = context.last_release_object.prerelease else: prerelease = False # Render the template try: render = Template(content) render = render.render( installed=context.installed, pending_update=context.pending_update, prerelease=prerelease, selected_tag=context.selected_tag, version_available=context.last_release_tag, version_installed=context.version_installed ) return render except Exception as exception: context.logger.warning("Error rendering info template {}".format(exception), "template") return content
e72a574b39888202e11ca51a279ff9306e4c2da3
19,485
def isJacobianOnS256Curve(x, y, z): """ isJacobianOnS256Curve returns boolean if the point (x,y,z) is on the secp256k1 curve. Elliptic curve equation for secp256k1 is: y^2 = x^3 + 7 In Jacobian coordinates, Y = y/z^3 and X = x/z^2 Thus: (y/z^3)^2 = (x/z^2)^3 + 7 y^2/z^6 = x^3/z^6 + 7 y^2 = x^3 + 7*z^6 """ fv = FieldVal y2, z2, x3, result = fv(), fv(), fv(), fv() y2.squareVal(y).normalize() z2.squareVal(z) x3.squareVal(x).mul(x) result.squareVal(z2).mul(z2).mulInt(7).add(x3).normalize() return y2.equals(result)
87ecd5d9c42d9a27cd40c49a6034c73a5f784855
19,486
def decode_base85(encoded_str): """Decodes a base85 string. The input string length must be a multiple of 5, and the resultant binary length is always a multiple of 4. """ if len(encoded_str) % 5 != 0: raise ValueError('Input string length is not a multiple of 5; ' + str(len(encoded_str))) if not _char_to_value: for i, ch in enumerate(_BASE85_CHARACTERS): _char_to_value[ch] = i result = '' i = 0 while i < len(encoded_str): acc = 0 for _ in range(5): ch = encoded_str[i] if ch not in _char_to_value: raise ValueError('Invalid base85 character; "{}"'.format(ch)) new_acc = acc * 85 + _char_to_value[ch] assert new_acc >= acc acc = new_acc i += 1 for _ in range(4): result += chr(acc >> 24) acc = (acc & 0x00ffffff) << 8 assert acc >= 0 return result
0cae86ac35cce55afdbbb6d48b86af3fb05eaf8f
19,487
def process_doc(doc: PDFDocument): """Process PDF Document, return info and metadata. Some PDF store infomations such as title in field info, some newer PDF sotre them in field metadata. The processor read raw XMP data and convert to dictionary. Parameters ---------- doc : PDFDocument PDF Document object to process. Returns ------- info : dict Field info of the doc, return {} if no info field. metadata : dict Field metadata of the doc, return {} if no metadata field. """ # if info is a list, resolve it info = doc.info if doc.info else {} if isinstance(info, list): info = info[0] # try to get metadata if 'Metadata' in doc.catalog: # resolve1 will resolve object recursively # result of resolve1(doc.catalog['Metadata']) is PDFStream metadata = resolve1(doc.catalog['Metadata']).get_data() # use xmp_to_dict to resolve XMP and get metadata metadata = xmp_to_dict(metadata) else: metadata = {} return info, metadata
fc8e4df6d00a4afb23a2693a4222b1809e7a8d6c
19,488
def score_lin_reg(est, X, y, sample_weight=None, level=1): """ Scores a fitted linear regression model. Parameters ----------- est: The fitted estimator. X: array-like, shape (n_samples, n_features) The test X data. y_true: array-like, shape (n_samples, ) The true responses. sample_weight: array-like shape (n_samples,) Sample weights. level: int How much data to return. Output ------ scores: dict Containts the scores. """ # get predictions y_pred = est.predict(X) out = {} out['r2'] = r2_score(y_true=y, y_pred=y_pred, sample_weight=sample_weight) if level >= 1: to_add = additional_regression_data(y_true=y, y_pred=y_pred, coef=est.coef_, sample_weight=sample_weight) out.update(to_add) return out
855c93e03dbcd30929e08e3b00670c9ba1755fb8
19,489
from typing import List def select_questions(db: Database) -> List[Row]: """ Selects a list of 20 questions from the database using a spaced-repetition algorithm. The questions are drawn from 3 quizzes in a set ratio: 10 from the first quiz, 7 from the second, and 3 from the third. The quizzes and questions are selected based on the strength of memorization and the time since they were last asked. """ quizzes = select_quizzes(db) questions = [] questions.extend(select_questions_from_quizzes(db, quizzes[0], 10)) questions.extend(select_questions_from_quizzes(db, quizzes[1], 7)) questions.extend(select_questions_from_quizzes(db, quizzes[2], 3)) return questions
b4c624e3fc7c3e1dc9962a77fb2238d97737fa43
19,490
def process_ona_webhook(instance_data: dict): """ Custom Method that takes instance data and creates or Updates an Instance Then Returns True if Instance was created or updated """ instance_obj = process_instance(instance_data) if instance_obj is None: return False return True
dabb1d81cbb91e0484ec66c101d7f1019fe08edc
19,491
import torch def test_quantized_conv2d_nonfunctional(): """Basic test of the PyTorch quantized conv2d Node with external quantized input on Glow.""" def test_f(a): q = torch.nn.quantized.Quantize(1/16, 0, torch.quint8) dq = torch.nn.quantized.DeQuantize() conv = torch.nn.quantized.Conv2d(1, 1, [2, 2]) return dq(conv(q(a))) x = torch.tensor([[[[5., 6.], [7., 8.]]]]) jitVsGlow(test_f, x, expected_fused_ops={"aten::quantize_per_tensor", "glow::unpacked_quantized_conv2d", "aten::dequantize"})
316cf6e8abbea810ec02a3dd94eccbc050e3915d
19,492
import math def distance(v, w): """the distance between two vectors""" return math.sqrt(squared_distance(v, w))
301e98d6c2bb8b5e10ae7efd9fa65abd375d048f
19,493
def QA_SU_save_stock_min_5(file_dir, client=DATABASE): """save stock_min5 Arguments: file_dir {[type]} -- [description] Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) Returns: [type] -- [description] """ return tdx_file.QA_save_tdx_to_mongo(file_dir, client)
85cfeeebf7673cfb6ee6957f29e8a3185ec14da2
19,495
def get_course_by_name(name): """ Return a course dict for the given name, or None { 'id':id, 'name':name, 'title':title } """ ret = run_sql( """SELECT course, title, description, owner, active, type, practice_visibility, assess_visibility FROM courses WHERE lower(title) LIKE lower(%s);""", [name, ]) course = None if ret: row = ret[0] course = { 'id': int(row[0]), 'name': row[1], 'title': row[2], 'owner': row[3], 'active': row[4], 'type': row[5], 'practice_visibility': row[6], 'assess_visibility': row[7] } if not course['practice_visibility']: course['practice_visibility'] = "all" if not course['assess_visibility']: course['assess_visibility'] = "all" return course
78485e616f42aabe095eaeff687ad9dab94c1dad
19,496
import re from datetime import datetime def timedelta(string): """ Parse :param string: into :class:`datetime.timedelta`, you can use any (logical) combination of Nw, Nd, Nh and Nm, e.g. `1h30m` for 1 hour, 30 minutes or `3w` for 3 weeks. Raises a ValueError if the input is invalid/unparseable. >>> print(timedelta("3w")) 21 days, 0:00:00 >>> print(timedelta("3w 12h 57m")) 21 days, 12:57:00 >>> print(timedelta("1h30m37s")) 1:30:37 >>> print(timedelta("1asdf3w")) Traceback (most recent call last): ... ValueError: invalid human-readable timedelta """ keys = ["weeks", "days", "hours", "minutes", "seconds"] regex = "".join(["((?P<%s>\\d+)%s ?)?" % (k, k[0]) for k in keys]) kwargs = {} for k, v in re.match(regex, string).groupdict(default="0").items(): kwargs[k] = int(v) rv = datetime.timedelta(**kwargs) if rv == datetime.timedelta(): raise ValueError("invalid human-readable timedelta") return datetime.timedelta(**kwargs)
664dfb9e46017b507d04174da94d6da8a542fb31
19,498
def englishTextNull(englishInputNull): """ This function returns true if input is empty """ if englishInputNull == '': return True
2ad18e38f474a164a21bdc790bee05c2a8e06e89
19,499
def authority_b(request, configuration, authority_a): """ Intermediate authority_a valid_authority -> authority_a -> authority_b """ authority = configuration.manager.get_or_create_ca("authority_b", hosts=["*.com"], certificate_authority=authority_a) request.addfinalizer(authority.delete_files) return authority
ac9ec3c33e06ba506efe51f4ceafa96cfe91f761
19,500
def derive(pattern): """ Calculate the first derivative pattern pattern. Smoothes the input first, so noisy patterns shouldn't be much of a problem. """ return np.gradient(smooth_pattern(pattern))
4f2bdbb34f4d36427b3d9c1effff4d13a7d41552
19,501