content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def create_xml_content( segmentation: list[dict], lang_text: list[str], split: str, src_lang: str, tgt_lang: str, is_src: bool, ) -> list[str]: """ Args: segmentation (list): content of the yaml file lang_text (list): content of the transcription or translation txt file split (str): the split name src_lang (str): source language id tgt_lang (str): target language id is_src (bool): whether lang_text is transcriptions Returns: xml_content (list) """ xml_content = [] xml_content.append('<?xml version="1.0" encoding="UTF-8"?>') xml_content.append("<mteval>") if is_src: xml_content.append(f'<srcset setid="{split}" srclang="{src_lang}">') else: xml_content.append( f'<refset setid="{split}" srclang="{src_lang}" trglang="{tgt_lang}" refid="ref">' ) prev_talk_id = -1 for sgm, txt in zip(segmentation, lang_text): talk_id = sgm["wav"].split(".wav")[0] if prev_talk_id != talk_id: if prev_talk_id != -1: xml_content.append("</doc>") # add content (some does not matter, but is added to replicate the required format) xml_content.append(f'<doc docid="{talk_id}" genre="lectures">') xml_content.append("<keywords>does, not, matter</keywords>") xml_content.append("<speaker>Someone Someoneson</speaker>") xml_content.append(f"<talkid>{talk_id}</talkid>") xml_content.append("<description>Blah blah blah.</description>") xml_content.append("<title>Title</title>") seg_id = 0 prev_talk_id = talk_id seg_id += 1 xml_content.append(f'<seg id="{seg_id}">{txt}</seg>') xml_content.append("</doc>") if is_src: xml_content.append("</srcset>") else: xml_content.append("</refset>") xml_content.append("</mteval") return xml_content
6af6b5fcdaccd5bd81ad202bdb22fad3910afc2b
9,333
def style_string(string: str, fg=None, stylename=None, bg=None) -> str: """Apply styles to text. It is able to change style (like bold, underline etc), foreground and background colors of text string.""" ascii_str = _names2ascii(fg, stylename, bg) return "".join(( ascii_str, string, _style_dict["reset"]))
6d61c33a632c88609cb551ae0a1d55d8ee836937
9,334
def select_all_genes(): """ Select all genes from SQLite database """ query = """ SELECT GENE_SYMBOL, HGNC_ID, ENTREZ_GENE_ID, ENSEMBL_GENE, MIM_NUMBER FROM GENE """ cur = connection.cursor() cur.execute(query) rows = cur.fetchall() genes = [] for row in rows: omim = row[4].split(';') if row[4] != "None" else [] gene = Gene(gene_symbol=row[0], hgnc_id=row[1], entrez_gene_id=row[2], ensembl_gene=row[3], omim=omim) genes.append(gene) cur.close() return genes
fb73e890d62f247939c1aa9a1e16a8e5f5a75866
9,335
def test_enum_handler(params): """ 测试枚举判断验证 """ return json_resp(params)
c3a4a9589b5d06813d6afaa55c8f6d9fafa80252
9,336
def get_staff_timetable(url, staff_name): """ Get Staff timetable via staff name :param url: base url :param staff_name: staff name string :return: a list of dicts """ url = url + 'TextSpreadsheet;Staff;name;{}?template=SWSCUST+Staff+TextSpreadsheet&weeks=1-52' \ '&days=1-7&periods=1-32&Width=0&Height=0'.format(staff_name) course_list, name = extract_text_spread_sheet(url, lambda _: False) for course in course_list: course['Name of Type'] = course['Module'] course['Module'] = course['Description'] return course_list, name
0e52604c08bef70d5cfc1fc889c8ced766f49ae5
9,337
def find_ccs(unmerged): """ Find connected components of a list of sets. E.g. x = [{'a','b'}, {'a','c'}, {'d'}] find_cc(x) [{'a','b','c'}, {'d'}] """ merged = set() while unmerged: elem = unmerged.pop() shares_elements = False for s in merged.copy(): if not elem.isdisjoint(s): merged.remove(s) merged.add(frozenset(s.union(elem))) shares_elements = True if not shares_elements: merged.add(frozenset(elem)) return [list(x) for x in merged]
4bff4cc32237dacac7737ff509b4a68143a03914
9,338
def read_match_df(_url: str, matches_in_section: int=None) -> pd.DataFrame: """各グループの試合リスト情報を自分たちのDataFrame形式で返す JFA形式のJSONは、1試合の情報が下記のような内容 {'matchTypeName': '第1節', 'matchNumber': '1', # どうやら、Competitionで通しの番号 'matchDate': '2021/07/22', # 未使用 'matchDateJpn': '2021/07/22', 'matchDateWeek': '木', # 未使用 'matchTime': '20:00', # 未使用 'matchTimeJpn': '20:00', 'venue': '東京スタジアム', 'venueFullName': '東京/東京スタジアム', # 未使用 'homeTeamName': '日本', 'homeTeamQualificationDescription': '', # 未使用 'awayTeamName': '南アフリカ', 'awayTeamQualificationDescription': '', # 未使用 'score': { 'homeWinFlag': False, # 未使用 'awayWinFlag': False, # 未使用 'homeScore': '', 'awayScore': '', 'homeTeamScore1st': '', # 未使用 前半得点 'awayTeamScore1st': '', # 未使用 前半得点 'homeTeamScore2nd': '', # 未使用 後半得点 'awayTeamScore2nd': '', # 未使用 後半得点 'exMatch': False, 'homeTeamScore1ex': '', # 未使用 延長前半得点 'awayTeamScore1ex': '', # 未使用 延長前半得点 'homeTeamScore2ex': '', # 未使用 延長後半得点 'awayTeamScore2ex': '', # 未使用 延長後半得点 'homePKScore': '', # 未使用 PK得点 'awayPKScore': '' # 未使用 PK得点 }, 'scorer': { 'homeScorer': [], # 未使用 'awayScorer': [] # 未使用 }, 'matchStatus': '', 'officialReportURL': '' # 未使用 } """ match_list = read_match_json(_url)[SCHEDULE_CONTAINER_NAME][SCHEDULE_LIST_NAME] # print(match_list) result_list = [] match_index_dict = {} for (_count, _match_data) in enumerate(match_list): _row = {} for (target_key, org_key) in REPLACE_KEY_DICT.items(): _row[target_key] = _match_data[org_key] for (target_key, org_key) in SCORE_DATA_KEY_LIST.items(): _row[target_key] = _match_data['score'][org_key] _regexp_result = SECTION_NO.match(_row['section_no']) if _regexp_result: section_no = _regexp_result[1] elif matches_in_section is not None: # 節数の記載が無く、節ごとの試合数が分かっている時は計算 section_no = int(_count / matches_in_section) + 1 else: # 節数不明 section_no = 0 _row['section_no'] = section_no if section_no not in match_index_dict: match_index_dict[section_no] = 1 else: match_index_dict[section_no] += 1 _row['match_index_in_section'] = match_index_dict[section_no] # U18高円宮杯プリンス関東リーグでの中止情報は、なぜか 'venueFullName' に入っていたので暫定対応 if '【中止】' in _match_data['venueFullName']: print('Cancel Game## ' + _match_data['venueFullName']) _row['status'] = '試合中止' else: print('No Cancel## ' + _match_data['venueFullName']) result_list.append(_row) return pd.DataFrame(result_list)
0dae5f1669c3e1a1a280967bc75780a7b1aa91a0
9,339
import re def tokenize(text): """Tokenise text with lemmatizer and case normalisation. Args: text (str): text required to be tokenized Returns: list: tokenised list of strings """ url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' detected_urls = re.findall(url_regex, text) for url in detected_urls: text = text.replace(url, "urlplaceholder") tokens = word_tokenize(text) lemmatizer = WordNetLemmatizer() clean_tokens = [] for tok in tokens: clean_tok = lemmatizer.lemmatize(tok).lower().strip() clean_tokens.append(clean_tok) return clean_tokens
56c7dc6ce557257f8716bd502958093eb01a8c50
9,340
def reinforce_loss_discrete(classification_logits_t, classification_labels_t, locations_logits_t, locations_labels_t, use_punishment=False): """Computes REINFORCE loss for contentious discrete action spaces. Args: classification_logits_t: List of classification logits at each time point. classification_labels_t: List of classification labels at each time point. locations_logits_t: List of location logits at each time point. locations_labels_t: List of location labels at each time point. use_punishment: (Boolean) Reward {-1, 1} if true else {0, 1}. Returns: reinforce_loss: REINFORCE loss. """ classification_logits = tf.concat(classification_logits_t, axis=0) classification_labels = tf.concat(classification_labels_t, axis=0) locations_logits = tf.concat(locations_logits_t, axis=0) locations_labels = tf.concat(locations_labels_t, axis=0) rewards = tf.cast( tf.equal( tf.argmax(classification_logits, axis=1, output_type=classification_labels.dtype), classification_labels), dtype=tf.float32) # size (batch_size) each if use_punishment: # Rewards is \in {-1 and 1} instead of {0, 1}. rewards = 2. * rewards - 1. neg_advs = tf.stop_gradient(rewards - tf.reduce_mean(rewards)) log_prob = -tf.nn.sparse_softmax_cross_entropy_with_logits( logits=locations_logits, labels=locations_labels) loss = -tf.reduce_mean(neg_advs * log_prob) return loss
7296f0647d792ce0698cd48d2b56e30941ca1afb
9,341
import itertools def distances(spike_times, ii_spike_times, epoch_length=1.0, metric='SPOTD_xcorr'): """Compute temporal distances based on various versions of the SPOTDis, using CPU parallelization. Parameters ---------- spike_times : numpy.ndarray 1 dimensional matrix containing all spike times ii_spike_times : numpy.ndarray MxNx2 dimensional matrix containing the start and end index for the spike_times array for any given epoch and channel combination metric : str Pick the specific metric by combining the metric ID with either 'xcorr' to compute it on pairwise xcorr histograms or 'times' to compute it directly on spike times. Currently available: * SPOTD_xcorr * SPOTD_xcorr_pooled * SPOTD_spikes Returns ------- distances : numpy.ndarray MxM distance matrix with numpy.nan for unknown distances """ n_epochs = ii_spike_times.shape[0] epoch_index_pairs = np.array( list(itertools.combinations(range(n_epochs), 2)), dtype=int) # SPOTDis comparing the pairwise xcorrs of channels if metric == 'SPOTD_xcorr': distances, percent_nan = xcorr_spotdis_cpu_( spike_times, ii_spike_times, epoch_index_pairs) distances = distances / (2*epoch_length) # SPOTDis comparing the xcorr of a channel with all other channels pooled elif metric == 'SPOTD_xcorr_pooled': distances, percent_nan = xcorr_pooled_spotdis_cpu_( spike_times, ii_spike_times, epoch_index_pairs) distances = distances / (2*epoch_length) # SPOTDis comparing raw spike trains elif metric == 'SPOTD_spikes': distances, percent_nan = spike_spotdis_cpu_( spike_times, ii_spike_times, epoch_index_pairs) distances = distances / epoch_length # Otherwise, raise exception else: raise NotImplementedError('Metric "{}" unavailable, check doc-string for alternatives.'.format( metric)) np.fill_diagonal(distances, 0) return distances
3696f33929150ac2f002aa6a78822654eeb50581
9,344
def format_object_translation(object_translation, typ): """ Formats the [poi/event/page]-translation as json :param object_translation: A translation object which has a title and a permalink :type object_translation: ~cms.models.events.event.Event or ~cms.models.pages.page.Page or ~cms.models.pois.poi.POI :param typ: The type of this object :type typ: str :return: A dictionary with the title, url and type of the translation object :rtype: dict """ return { "title": object_translation.title, "url": f"{WEBAPP_URL}/{object_translation.permalink}", "type": typ, }
11499d53d72e071d59176a00543daa0e8246f89a
9,345
def _FormatKeyValuePairsToLabelsMessage(labels): """Converts the list of (k, v) pairs into labels API message.""" sorted_labels = sorted(labels, key=lambda x: x[0] + x[1]) return [ api_utils.GetMessage().KeyValue(key=k, value=v) for k, v in sorted_labels ]
3f2dd78951f8f696c398ab906acf790d7923eb75
9,346
def gen_unique(func): """ Given a function returning a generator, return a function returning a generator of unique elements""" return lambda *args: unique(func(*args))
703dc6f80553fc534ca1390eb2c0c3d7d81b26eb
9,347
def admin_inventory(request): """ View to handle stocking up inventory, adding products... """ context = dict(product_form=ProductForm(), products=Product.objects.all(), categories=Category.objects.all(), transactions=request.user.account.transaction_set.all() ) return render(request, 'namubufferiapp/admin_handleinventory.html', context)
ec8f38947ab95f82a26fc6c6949d569a5ec83f7d
9,348
def snippet_list(request): """ List all code snippets, or create a new snippet. """ print(f'METHOD @ snippet_list= {request.method}') if request.method == 'GET': snippets = Snippet.objects.all() serializer = SnippetSerializer(snippets, many=True) return JsonResponse(serializer.data, safe=False) elif request.method == 'POST': data = JSONParser().parse(request) serializer = SnippetSerializer(data=data) if serializer.is_valid(): serializer.save() return JsonResponse(serializer.data, status=201) return JsonResponse(serializer.errors, status=400)
959245f7d194470c4bccef338ead8d0b35abe1bc
9,349
def generate_submission(args: ArgumentParser, submission: pd.DataFrame) -> pd.DataFrame: """Take Test Predictions for 4 classes to Generate Submission File""" image, kind = args.shared_indices df = submission.reset_index()[[image, args.labels[0]]] df.columns = ["Id", "Label"] df.set_index("Id", inplace=True) df["Label"] = 1. - df["Label"] print(f"\nSubmission Stats:\n{df.describe()}\nSubmission Head:\n{df.head()}") return df
e5b3f1c65adbe1436d638667cbc7bae9fb8a6a1e
9,350
import numba def nearest1d(vari, yi, yo, extrap="no"): """Nearest interpolation of nD data along an axis with varying coordinates Warning ------- `nxi` must be either a multiple or a divisor of `nxo`, and multiple of `nxiy`. Parameters ---------- vari: array_like(nxi, nyi) yi: array_like(nxiy, nyi) yo: array_like(nxo, nyo) Return ------ array_like(nx, nyo): varo With `nx=max(nxi, nxo)` """ # Shapes nxi, nyi = vari.shape nxiy = yi.shape[0] nxi, nyi = vari.shape nxo, nyo = yo.shape nx = max(nxi, nxo) # Init output varo = np.full((nx, nyo), np.nan, dtype=vari.dtype) # Loop on the varying dimension for ix in numba.prange(nx): # Index along x for coordinate arrays ixi = min(nxi-1, ix % nxi) ixiy = min(nxiy-1, ix % nxiy) ixoy = min(nxo-1, ix % nxo) # Loop on input grid iyimin, iyimax = get_iminmax(yi[ixiy]) iyomin, iyomax = get_iminmax(yo[ixoy]) for iyi in range(iyimin, iyimax): # Out of bounds if yi[ixiy, iyi+1] < yo[ixoy, iyomin]: continue if yi[ixiy, iyi] > yo[ixoy, iyomax]: break # Loop on output grid for iyo in range(iyomin, iyomax+1): dy0 = yo[ixoy, iyo] - yi[ixiy, iyi] dy1 = yi[ixiy, iyi+1] - yo[ixoy, iyo] # Above if dy1 < 0: # above break # Below if dy0 < 0: iyomin = iyo + 1 # Interpolations elif dy0 <= dy1: varo[ix, iyo] = vari[ixi, iyi] else: varo[ix, iyo] = vari[ixi, iyi+1] # Extrapolation if extrap != "no": varo = extrap1d(varo, extrap) return varo
f7a9c03b1cca3844a9aad3d954fa2a189134a69f
9,351
def registros(): """Records page.""" return render_template('records.html')
b72cffbdf966f8c94831da76fd901ce9cba60aac
9,352
def cal_evar(rss, matrix_v): """ Args: rss: matrix_v: Returns: """ evar = 1 - (rss / np.sum(matrix_v ** 2)) return evar
21f1d71ba98dafe948a5a24e4101968531ec1e30
9,353
def split_path(path): """ public static List<String> splitPath(String path) * Converts a path expression into a list of keys, by splitting on period * and unquoting the individual path elements. A path expression is usable * with a {@link Config}, while individual path elements are usable with a * {@link ConfigObject}. * <p> * See the overview documentation for {@link Config} for more detail on path * expressions vs. keys. * * @param path * a path expression * @return the individual keys in the path * @throws ConfigException * if the path expression is invalid """ return impl_util.split_path(path)
9e102d7f7b512331165f51e6055daeaf4f56b61a
9,354
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_d(): """Dilated hparams.""" hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated() hparams.gap_sizes = [0, 16, 64, 16, 64, 128, 256, 0] return hparams
b0a56031e06cff42df4cdeab55e01322be8e439d
9,356
def leaveOneOut_Input_v4( leaveOut ): """ Generate observation matrix and vectors Y, F Those observations are trimed for the leave-one-out evaluation. Therefore, the leaveOut indicates the CA id to be left out, ranging from 1-77 """ des, X = generate_corina_features('ca') X = np.delete(X, leaveOut-1, 0) popul = X[:,0].reshape(X.shape[0],1) pvt = X[:,2] # poverty index of each CA # poi_cnt = getFourSquareCount(leaveOut) # poi_cnt = np.divide(poi_cnt, popul) * 10000 poi_dist = getFourSquarePOIDistribution(leaveOut) poi_dist = np.divide(poi_dist, popul) * 10000 F_dist = generate_geographical_SpatialLag_ca( leaveOut=leaveOut ) F_flow = generate_transition_SocialLag(year=2010, lehd_type=0, region='ca', leaveOut=leaveOut) F_taxi = getTaxiFlow(leaveOut = leaveOut) Y = retrieve_crime_count(year=2010, col=['total'], region='ca') Y = np.delete(Y, leaveOut-1, 0) Y = np.divide(Y, popul) * 10000 F = [] n = Y.size Yd = [] for i in range(n): for j in range(n): if i != j: wij = np.array( [F_dist[i,j], actualFlowInteraction(pvt[i], pvt[j]) * F_flow[i,j], F_taxi[i,j] ]) # fij = np.concatenate( (X[i], poi_dist[i], wij * Y[j][0]), 0) fij = np.concatenate( (X[i], wij * Y[j][0]), 0) F.append(fij) Yd.append(Y[i]) F = np.array(F) np.append(F, np.ones( (F.shape[0], 1) ), axis=1) Yd = np.array(Yd) Yd.resize( (Yd.size, 1) ) return Yd, F
0078bda71345d31cf24f4d1c4ceeafa768357ad4
9,357
import logging def common_inroom_auth_response(name, request, operate, op_args): """ > 通用的需要通过验证用户存在、已登录、身处 Room 的操作。 参数: - name: 操作名,用于日志输出; - request: Flask 传来的 request; - operate: 具体的操作函数,参数为需要从 request.form 中提取的值,返回值为成功后的response json; - op_args: operate 函数的 参数名 str 组成的列表。 返回:response json 说明: 这个函数会从 request.form 中提取 from_uid 以及 op_args 中指定的所有值,若没有对应的值,会返回 unexpected; 然后该函数会对用户是否 exist、login、inRoom 进行检测,若有不满足,返回 from_not_exist,from_not_login 或 from_not_in_room; 通过了所有验证后,将调用 operate 函数,并用 argument unpacking 的方法把解析得到的 args 传给 operate。 """ try: assert request.method == 'POST', "method should be POST" assert isinstance(op_args, (tuple, list)), "op_args should be tuple or list" from_uid = None args = {} try: from_uid = request.form["from_uid"] for i in op_args: args[i] = request.form[i] except KeyError: raise RequestError("not enough param") # 发起用户验证 if not au.byUid.exist(from_uid): logging.critical('<{name}>: from_not_exist. from_uid = {from_uid}'.format(name=name, from_uid=from_uid)) return response_error(get_simple_error_content(ResponseError.from_not_exist)) if not au.byUid.logined(from_uid): logging.error('<{name}>: from_not_login. from_uid = {from_uid}'.format(name=name, from_uid=from_uid)) return response_error(get_simple_error_content(ResponseError.from_not_login)) if not au.byUid.inroom(from_uid): logging.error('<{name}>: from_not_in_room. from_uid = {from_uid}'.format(name=name, from_uid=from_uid)) return response_error(get_simple_error_content(ResponseError.from_not_in_room)) # 通过验证,可以操作 return operate(**args) except Exception as e: logging.error('<{name}>: unexpected. request = {request}, request.form = {form}'.format( name=name, request=request, form=request.form)) return response_unexpected(e)
b11607f2d0a6a656c65cf464010f10634389f0bf
9,358
def get_pca(acts, compute_dirns=False): """ Takes in neuron activations acts and number of components. Returns principle components and associated eigenvalues. Args: acts: numpy array, shape=(num neurons, num datapoints) n_components: integer, number of pca components to reduce to """ assert acts.shape[0] < acts.shape[1], ("input must be number of neurons" "by datapoints") # center activations means = np.mean(acts, axis=1, keepdims=True) cacts = acts - means # compute PCA using SVD U, S, V = np.linalg.svd(cacts, full_matrices=False) return_dict = {} return_dict["eigenvals"] = S return_dict["neuron_coefs"] = U.T if compute_dirns: return_dict["pca_dirns"] = np.dot(U.T, cacts) + means return return_dict
25620178e340f58b3d13ed0de4ee6d324abcb3ef
9,359
def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" #imgCopy = np.uint8(img) return cv2.Canny(img, low_threshold, high_threshold)
80e8d4ad99c769887e85577b46f6028ceea0b9f6
9,362
def pairwise_two_tables(left_table, right_table, allow_no_right=True): """ >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")], ... [("tag1", "R1"), ("tag3", "R3"), ("tag2", "R2")], ... ) [('L1', 'R1'), ('L2', 'R2'), ('L3', 'R3')] >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag2", "L2")], ... [("tag1", "R1"), ("tag3", "R3"), ("tag2", "R2")], ... ) Traceback (most recent call last): vrename.NoLeftValueError: ('tag3', 'R3') >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")], ... [("tag1", "R1"), ("tag3", "R3")], ... False, ... ) Traceback (most recent call last): vrename.NoRightValueError: ('tag2', 'L2') >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")], ... [("tag1", "R1"), ("tag3", "R3")], ... ) [('L1', 'R1'), ('L2', None), ('L3', 'R3')] >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag1", "L1-B")], ... [] ... ) Traceback (most recent call last): vrename.DuplicateTagError: ('tag1', ['L1', 'L1-B']) >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")], ... [("tag1", "R1"), ("tag3", "R3"), ("tag2", "R2"), ("tag1", "R1-B")], ... ) Traceback (most recent call last): vrename.MultipleRightValueError: ('tag1', 'L1', ['R1', 'R1-B']) """ pairs = [] for tag, (left, rights) in _confront_two_tables(left_table, right_table): if len(rights) > 1: raise MultipleRightValueError(tag, left, rights) if not rights: if allow_no_right: pairs.append((left, None)) else: raise NoRightValueError(tag, left) else: pairs.append((left, rights[0])) return pairs
aabcccc2ade9b00ed5bdac32f9cc4a7a4cc718c3
9,363
def augment_stochastic_shifts(seq, augment_shifts): """Apply a stochastic shift augmentation. Args: seq: input sequence of size [batch_size, length, depth] augment_shifts: list of int offsets to sample from Returns: shifted and padded sequence of size [batch_size, length, depth] """ shift_index = tf.random.uniform(shape=[], minval=0, maxval=len(augment_shifts), dtype=tf.int64) shift_value = tf.gather(tf.constant(augment_shifts), shift_index) seq = tf.cond(tf.not_equal(shift_value, 0), lambda: shift_sequence(seq, shift_value), lambda: seq) return seq
1afd682e1f665d4d0786e729e6789a6459b4457c
9,364
def _SourceArgs(parser): """Add mutually exclusive source args.""" source_group = parser.add_mutually_exclusive_group() def AddImageHelp(): """Returns detailed help for `--image` argument.""" template = """\ An image to apply to the disks being created. When using this option, the size of the disks must be at least as large as the image size. Use ``--size'' to adjust the size of the disks. {alias_table} This flag is mutually exclusive with ``--source-snapshot''. """ indent = template.find(template.lstrip()[0]) return template.format( alias_table=image_utils.GetImageAliasTable(indent=indent)) image = source_group.add_argument( '--image', help='An image to apply to the disks being created.') image.detailed_help = AddImageHelp image_utils.AddImageProjectFlag(parser) source_group.add_argument( '--image-family', help=('The family of the image that the boot disk will be initialized ' 'with. When a family is used instead of an image, the latest ' 'non-deprecated image associated with that family is used.') ) source_snapshot = source_group.add_argument( '--source-snapshot', help='A source snapshot used to create the disks.') source_snapshot.detailed_help = """\ A source snapshot used to create the disks. It is safe to delete a snapshot after a disk has been created from the snapshot. In such cases, the disks will no longer reference the deleted snapshot. To get a list of snapshots in your current project, run `gcloud compute snapshots list`. A snapshot from an existing disk can be created using the 'gcloud compute disks snapshot' command. This flag is mutually exclusive with ``--image''. When using this option, the size of the disks must be at least as large as the snapshot size. Use ``--size'' to adjust the size of the disks. """
dfa44ed54c4efba666f19c850a0eacffe85cafa0
9,365
def get_all_species_links_on_page(url): """Get all the species list on the main page.""" data, dom = get_dom(url) table = dom.find('.tableguides.table-responsive > table a') links = [] for link in table: if link is None or link.text is None: continue links.append(dict( name=link.text.strip().lower(), url=DAVES_URL_BY_SPECIES + link.get('href') )) return links
4a63d78b699150c37ccc9aa30d9fa6dae39d801b
9,366
def gen_image_name(reference: str) -> str: """ Generate the image name as a signing input, based on the docker reference. Args: reference: Docker reference for the signed content, e.g. registry.redhat.io/redhat/community-operator-index:v4.9 """ no_tag = reference.split(":")[0] image_parts = no_tag.split("/") return "/".join(image_parts[1:])
ccaecfe91b5b16a85e3a3c87b83bbc91e54080b1
9,367
def adaptive_confidence_interval(values, max_iterations=1000, alpha=0.05, trials=5, variance_threshold=0.5): """ Compute confidence interval using as few iterations as possible """ try_iterations = 10 while True: intervals = [confidence_interval(values, try_iterations, alpha) for _ in range(trials)] band_variance = variance([upper_bound - lower_bound for lower_bound, upper_bound in intervals]) print(try_iterations, band_variance) if band_variance < variance_threshold or try_iterations > max_iterations: return intervals[np.random.randint(0, trials)], try_iterations try_iterations *= 2
47c1861384d94a13beaf86eed5ad88a2ad2fb80f
9,368
def get_chat_id(update): """ Get chat ID from update. Args: update (instance): Incoming update. Returns: (int, None): Chat ID. """ # Simple messages if update.message: return update.message.chat_id # Menu callbacks if update.callback_query: return update.callback_query.message.chat_id return None
1669382fd430b445ea9e3a1306c1e68bf2ec0013
9,369
def chooseCommertialCity(commercial_cities): """ Parameters ---------- commercial_cities : list[dict] Returns ------- commercial_city : dict """ print(_('From which city do you want to buy resources?\n')) for i, city in enumerate(commercial_cities): print('({:d}) {}'.format(i + 1, city['name'])) selected_city_index = read(min=1, max=len(commercial_cities)) return commercial_cities[selected_city_index - 1]
6e39c1922a1560f6d3d442cf5d14b764f2c08437
9,371
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): """Reusable code for making a simple neural net layer. It does a matrix multiply, bias add, and then uses relu to nonlinearize. It also sets up name scoping so that the resultant graph is easy to read, and adds a number of summary ops. """ # Adding a name scope ensures logical grouping of the layers in the graph. with tf.name_scope(layer_name): # This Variable will hold the state of the weights for the layer with tf.name_scope('weights'): weights = weight_variable([input_dim, output_dim]) variable_summaries(weights) with tf.name_scope('biases'): biases = bias_variable([output_dim]) variable_summaries(biases) with tf.name_scope('Wx_plus_b'): preactivate = tf.matmul(input_tensor, weights) + biases tf.summary.histogram('pre_activations', preactivate) activations = act(preactivate, name="activation") tf.summary.histogram('activations', activations) return activations
38976aa68de06e131f0e2fd8056216ce9bfcba77
9,372
def get_validate_platform(cmd, platform): """Gets and validates the Platform from both flags :param str platform: The name of Platform passed by user in --platform flag """ OS, Architecture = cmd.get_models('OS', 'Architecture', operation_group='runs') # Defaults platform_os = OS.linux.value platform_arch = Architecture.amd64.value platform_variant = None if platform: platform_split = platform.split('/') platform_os = platform_split[0] platform_arch = platform_split[1] if len(platform_split) > 1 else Architecture.amd64.value platform_variant = platform_split[2] if len(platform_split) > 2 else None platform_os = platform_os.lower() platform_arch = platform_arch.lower() valid_os = get_valid_os(cmd) valid_arch = get_valid_architecture(cmd) valid_variant = get_valid_variant(cmd) if platform_os not in valid_os: raise CLIError( "'{0}' is not a valid value for OS specified in --platform. " "Valid options are {1}.".format(platform_os, ','.join(valid_os)) ) if platform_arch not in valid_arch: raise CLIError( "'{0}' is not a valid value for Architecture specified in --platform. " "Valid options are {1}.".format( platform_arch, ','.join(valid_arch)) ) if platform_variant and (platform_variant not in valid_variant): raise CLIError( "'{0}' is not a valid value for Variant specified in --platform. " "Valid options are {1}.".format( platform_variant, ','.join(valid_variant)) ) return platform_os, platform_arch, platform_variant
3b9150c400ed28e322108ba531c7f4c5ac450da1
9,374
def get_path_cost(slice, offset, parameters): """ part of the aggregation step, finds the minimum costs in a D x M slice (where M = the number of pixels in the given direction) :param slice: M x D array from the cost volume. :param offset: ignore the pixels on the border. :param parameters: structure containing parameters of the algorithm. :return: M x D array of the minimum costs for a given slice in a given direction. """ other_dim = slice.shape[0] disparity_dim = slice.shape[1] disparities = [d for d in range(disparity_dim)] * disparity_dim disparities = np.array(disparities).reshape(disparity_dim, disparity_dim) penalties = np.zeros(shape=(disparity_dim, disparity_dim), dtype=slice.dtype) penalties[np.abs(disparities - disparities.T) == 1] = parameters.P1 penalties[np.abs(disparities - disparities.T) > 1] = parameters.P2 minimum_cost_path = np.zeros(shape=(other_dim, disparity_dim), dtype=slice.dtype) minimum_cost_path[offset - 1, :] = slice[offset - 1, :] for i in range(offset, other_dim): previous_cost = minimum_cost_path[i - 1, :] current_cost = slice[i, :] costs = np.repeat(previous_cost, repeats=disparity_dim, axis=0).reshape(disparity_dim, disparity_dim) costs = np.amin(costs + penalties, axis=0) minimum_cost_path[i, :] = current_cost + costs - np.amin(previous_cost) return minimum_cost_path
06348e483cd7cba012354ecdcadcd0381b0b7dfb
9,375
def generate_cyclic_group(order, identity_name="e", elem_name="a", name=None, description=None): """Generates a cyclic group with the given order. Parameters ---------- order : int A positive integer identity_name : str The name of the group's identity element Defaults to 'e' elem_name : str Prefix for all non-identity elements Default is a1, a2, a3, ... name : str The group's name. Defaults to 'Zn', where n is the order. description : str A description of the group. Defaults to 'Autogenerated cyclic group of order n', where n is the group's order. Returns ------- Group A cyclic group of the given order """ if name: nm = name else: nm = "Z" + str(order) if description: desc = description else: desc = f"Autogenerated cyclic group of order {order}" elements = [identity_name, elem_name] + [f"{elem_name}^" + str(i) for i in range(2, order)] table = [[((a + b) % order) for b in range(order)] for a in range(order)] return Group(nm, desc, elements, table)
ed79547dfde64ece136456a8c5d7ce00c4317176
9,376
def loadTextureBMP(filepath): """ Loads the BMP file given in filepath, creates an OpenGL texture from it and returns the texture ID. """ data = np.array(Image.open(filepath)) width = data.shape[0] height = data.shape[1] textureID = glGenTextures(1) glBindTexture(GL_TEXTURE_2D, textureID) glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data, ) # default parameters for now. Can be parameterized in the future glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR) glGenerateMipmap(GL_TEXTURE_2D) return textureID
dd80584afc644fa23c2aef919a24152ea5b3696e
9,377
def get_pixeldata(ds: "Dataset") -> "np.ndarray": """Return a :class:`numpy.ndarray` of the pixel data. .. versionadded:: 2.1 Parameters ---------- ds : pydicom.dataset.Dataset The :class:`Dataset` containing an :dcm:`Image Pixel <part03/sect_C.7.6.3.html>` module and the *Pixel Data* to be converted. Returns ------- numpy.ndarray The contents of (7FE0,0010) *Pixel Data* as a 1D array. """ expected_len = get_expected_length(ds, 'pixels') frame_len = expected_len // getattr(ds, "NumberOfFrames", 1) # Empty destination array for our decoded pixel data arr = np.empty(expected_len, pixel_dtype(ds)) generate_offsets = range(0, expected_len, frame_len) for frame, offset in zip(generate_frames(ds, False), generate_offsets): arr[offset:offset + frame_len] = frame return arr
418603d30bf272affc0e63615e94d4cce11b1bf2
9,378
import time def timeit(method): """ Timing Decorator Function Written by Fahim Sakri of PythonHive (https://medium.com/pthonhive) """ def timed(*args, **kwargs): time_start = time.time() time_end = time.time() result = method(*args, **kwargs) if 'log_time' in kwargs: name = kwargs.get('log_name', method.__name__.upper()) kwargs['log_time'][name] = int((time_end - time_start) * 1000) else: print('\n{} {:5f} ms'.format(method.__name__, (time_end - time_start) * 1000)) return result return timed
598667950bc707b72239af9f4e5a3248dbe64d96
9,379
def allot_projects(): """ The primary function that allots the projects to the employees. It generates a maximum match for a bipartite graph of employees and projects. :return: A tuple having the allotments, count of employees allotted and total project headcount (a project where two people need to work will have a headcount ot two). """ allotments = [] try: emp_data = pd.read_pickle(EMPLOYEE_PICKLE_FILE) project_data = pd.read_pickle(PROJECT_PICKLE_FILE) except IOError as e: print("Either employee or project data is not present. No allocation done.") return [], 0, 0 employees = [] for _, emp_row in emp_data.iterrows(): transposed = emp_row.T transposed = transposed[transposed == 1] skills = set(transposed.index) employees.append( { 'name': emp_row['name'], 'value': skills } ) projects = [] for _, project_row in project_data.iterrows(): n = int(project_row['emp_count']) for i in range(n): projects.append( { 'absolute_name': project_row['name'], 'name': project_row['name'] + str(i), 'value': set(project_row[['domain', 'language', 'type']].values) } ) matrix = [] for e in employees: row = [] for p in projects: if len(e['value'].intersection(p['value'])) >= 2: row.append(1) else: row.append(0) matrix.append(row) employee_count = len(employees) project_count = len(projects) # An array to keep track of the employees assigned to projects. # The value of emp_project_match[i] is the employee number # assigned to project i. # If value = -1 indicates nobody is allocated that project. emp_project_match = [-1] * project_count def bipartite_matching(employee, match, seen): """ A recursive solution that returns true if a project mapping for employee is possible. :param employee: The employee for whom we are searching a project. :param match: Stores the assigned employees to projects. :param seen: An array to tell the projects available to employee. :return: `True` if match for employee is possible else `False`. """ # Try every project one by one. for project in range(project_count): # If employee is fit for the project and the project has not yet been # checked by the employee. if matrix[employee][project] and seen[project] is False: # Mark the project as checked by employee. seen[project] = True # If project is not assigned to anyone or previously assigned to someone else # (match[project]) but that employee could find an alternate project. # Note that since the project has been seen by the employee above, it will # not be available to match[project]. if match[project] == -1 or bipartite_matching(match[project], match, seen): match[project] = employee return True return False emp_allotted = 0 for emp in range(employee_count): # Mark all projects as not seen for next applicant. projects_seen = [False] * project_count # Find if the employee can be assigned a project if bipartite_matching(emp, emp_project_match, projects_seen): emp_allotted += 1 for p, e in enumerate(emp_project_match): if e != -1: allotments.append((employees[e]['name'], projects[p]['absolute_name'])) return allotments, emp_allotted, project_count
774df8714cd47eb2a7affe34480dfec682010341
9,380
import requests def upload_record(data, headers, rdr_project_id): """ Upload a supplied record to the research data repository """ request_url = f"https://api.figsh.com/v2/account/projects/{rdr_project_id}/articles" response = requests.post(request_url, headers=headers, json=data) return response.json()
7431234757668f9157f90aa8a9c335ee0e2a043b
9,381
def datetime_to_ts(str_datetime): """ Transform datetime representation to unix epoch. :return: """ if '1969-12-31' in str_datetime: # ignore default values return None else: # convert to timestamp if '.' in str_datetime: # check whether it has milliseconds or not dt = tutil.strff_to_date(str_datetime) else: dt = tutil.strf_to_date(str_datetime) ts = tutil.date_to_ts(dt) return ts
83b40abc6c5ce027cf04cd2335b2f35e235451d0
9,382
import functools def is_codenames_player(funct): """ Decorator that ensures the method is called only by a codenames player. Args: funct (function): Function being decorated Returns: function: Decorated function which calls the original function if the user is a codenames player, and returns otherwise """ @functools.wraps(funct) def wrapper(*args, **kwargs): if not current_user.is_authenticated or current_user.codenames_player is None: return None return funct(*args, **kwargs) return wrapper
814bc929bbd20e8c527bd5c922a25823a4bdbefc
9,383
def get(args) -> str: """Creates manifest in XML format. @param args: Arguments provided by the user from command line @return: Generated xml manifest string """ arguments = { 'target': args.target, 'targetType': None if args.nohddl else args.targettype, 'path': args.path, 'nohddl': args.nohddl } manifest = ('<?xml version="1.0" encoding="utf-8"?>' + '<manifest>' + '<type>config</type>' + '<config>' + '<cmd>get_element</cmd>' + '{0}' + '<configtype>' + '{1}' + '<get>' + '{2}' + '</get>' + '</configtype>' + '</config>' + '</manifest>').format( create_xml_tag(arguments, "targetType"), create_xml_tag(arguments, "target"), create_xml_tag(arguments, "path") ) print("manifest {0}".format(manifest)) return manifest
7b859952d7eda9d6dedd916bb3534d225c3d9593
9,385
from typing import Callable def elementwise(op: Callable[..., float], *ds: D) -> NumDict: """ Apply op elementwise to a sequence of numdicts. If any numdict in ds has None default, then default is None, otherwise the new default is calculated by running op on all defaults. """ keys: set = set() keys.update(*ds) grouped: dict = {} defaults: list = [] for d in ds: defaults.append(d.default) for k in keys: grouped.setdefault(k, []).append(d[k]) if any([d is None for d in defaults]): default = None else: default = op(defaults) return NumDict({k: op(grouped[k]) for k in grouped}, default)
4e7dce60d01e8bcec722a5a6d60d15920a6a91c5
9,386
import torch def sigmoid_focal_loss( inputs: torch.Tensor, targets: torch.Tensor, alpha: float = -1, gamma: float = 2, reduction: str = "none", ) -> torch.Tensor: """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. Returns: Loss tensor with the reduction option applied. """ p = torch.sigmoid(inputs) ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") p_t = p * targets + (1 - p) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss if reduction == "mean": loss = loss.mean() elif reduction == "sum": loss = loss.sum() return loss
e792c1bea37bcc26ff323a764fc56e0f4bbd0bc5
9,387
def arcsin(x): """Return the inverse sine or the arcsin. INPUTS x (Variable object or real number) RETURNS if x is a Variable, then return a Variable with val and der. if x is a real number, then return the value of arcsin(x). EXAMPLES >>> x = Variable(0, name='x') >>> t = arcsin(x) >>> print(t.val, t.der['x']) 0.0 1.0 """ try: val = np.arcsin(x.val) ders = defaultdict(float) sec_ders = defaultdict(float) for key in x.der: ders[key] += 1/((1 - x.val**2)**0.5) * (x.der[key]) sec_ders[key] += (x.val*x.der[key]**2-x.sec_der[key]*(x.val**2-1))/((1-x.val**2)**1.5) return Variable(val, ders, sec_ders) except AttributeError: return np.arcsin(x)
a5d899dae9b4fc33b6ddf2e2786ec6eee8508541
9,388
import tqdm def preprocessing(texts, words, label, coef=0.3, all_tasks=False, include_repeat=True, progressbar=True): """ the function returns the processed array for the Spacy standard """ train = [] enit = {} assert 0 < coef <= 1, f"The argument must be in the range (0 < coef <= 1) --> {coef}" if all_tasks: words_f = unique(flatten(words, coef)) if coef == 1: include_repeat = False else: assert len(texts) == len(words), f"Data must be same length: ({len(texts)}, {len(words)})" print("\n\033[31mcoef is ignored because you are using all_tasks=False") for i in tqdm(range((len(texts))), disable=not progressbar): if all_tasks: if include_repeat: words_f = unique(chain(words_f, words[i])) enit['entities'] = to_format(texts[i], words_f, label) else: enit['entities'] = to_format(texts[i], words[i], label) train.append((texts[i], deepcopy(enit))) return train
f10c27f8ed686d45a1c778bdf557f88ad3f3bdfa
9,389
import numpy import math def rotate( input, angle, axes=(1, 0), reshape=True, output=None, order=3, mode="constant", cval=0.0, prefilter=True, *, allow_float32=True, ): """Rotate an array. The array is rotated in the plane defined by the two axes given by the ``axes`` parameter using spline interpolation of the requested order. Args: input (cupy.ndarray): The input array. angle (float): The rotation angle in degrees. axes (tuple of 2 ints): The two axes that define the plane of rotation. Default is the first two axes. reshape (bool): If ``reshape`` is True, the output shape is adapted so that the input array is contained completely in the output. Default is True. output (cupy.ndarray or ~cupy.dtype): The array in which to place the output, or the dtype of the returned array. order (int): The order of the spline interpolation. If it is not given, order 1 is used. It is different from :mod:`scipy.ndimage` and can change in the future. The order has to be in the range 0-5. mode (str): Points outside the boundaries of the input are filled according to the given mode (``'constant'``, ``'nearest'``, ``'mirror'`` or ``'opencv'``). Default is ``'constant'``. cval (scalar): Value used for points outside the boundaries of the input if ``mode='constant'`` or ``mode='opencv'``. Default is 0.0 prefilter (bool): It is not used yet. It just exists for compatibility with :mod:`scipy.ndimage`. Returns: cupy.ndarray or None: The rotated input. Notes ----- This implementation handles boundary modes 'wrap' and 'reflect' correctly, while SciPy prior to release 1.6.0 does not. So, if comparing to older SciPy, some disagreement near the borders may occur. For ``order > 1`` with ``prefilter == True``, the spline prefilter boundary conditions are implemented correctly only for modes 'mirror', 'reflect' and 'grid-wrap'. .. seealso:: :func:`scipy.ndimage.zoom` """ _check_parameter("rotate", order, mode) if mode == "opencv": mode = "_opencv_edge" input_arr = input axes = list(axes) if axes[0] < 0: axes[0] += input_arr.ndim if axes[1] < 0: axes[1] += input_arr.ndim if axes[0] > axes[1]: axes = [axes[1], axes[0]] if axes[0] < 0 or input_arr.ndim <= axes[1]: raise ValueError("invalid rotation plane specified") ndim = input_arr.ndim rad = numpy.deg2rad(angle) sin = math.sin(rad) cos = math.cos(rad) # determine offsets and output shape as in scipy.ndimage.rotate rot_matrix = numpy.array([[cos, sin], [-sin, cos]]) img_shape = numpy.asarray(input_arr.shape) in_plane_shape = img_shape[axes] if reshape: # Compute transformed input bounds iy, ix = in_plane_shape out_bounds = rot_matrix @ [[0, 0, iy, iy], [0, ix, 0, ix]] # Compute the shape of the transformed input plane out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int) else: out_plane_shape = img_shape[axes] out_center = rot_matrix @ ((out_plane_shape - 1) / 2) in_center = (in_plane_shape - 1) / 2 output_shape = img_shape output_shape[axes] = out_plane_shape output_shape = tuple(output_shape) matrix = numpy.identity(ndim) matrix[axes[0], axes[0]] = cos matrix[axes[0], axes[1]] = sin matrix[axes[1], axes[0]] = -sin matrix[axes[1], axes[1]] = cos offset = numpy.zeros(ndim, dtype=float) offset[axes] = in_center - out_center matrix = cupy.asarray(matrix) offset = cupy.asarray(offset) return affine_transform( input, matrix, offset, output_shape, output, order, mode, cval, prefilter, allow_float32=allow_float32, )
04b7f3dc66d09c0b69ba97579972e131cc96b375
9,390
def generate_url_fragment(title, blog_post_id): """Generates the url fragment for a blog post from the title of the blog post. Args: title: str. The title of the blog post. blog_post_id: str. The unique blog post ID. Returns: str. The url fragment of the blog post. """ lower_title = title.lower() hyphenated_title = lower_title.replace(' ', '-') lower_id = blog_post_id.lower() return hyphenated_title + '-' + lower_id
c846e6203fa4782c6dc92c892b9e0b6c7a0077b5
9,391
def update_cluster(cluster, cluster_args, args, api=None, path=None, session_file=None): """Updates cluster properties """ if api is None: api = bigml.api.BigML() message = dated("Updating cluster. %s\n" % get_url(cluster)) log_message(message, log_file=session_file, console=args.verbosity) cluster = api.update_cluster(cluster, cluster_args) check_resource_error(cluster, "Failed to update cluster: %s" % cluster['resource']) cluster = check_resource(cluster, api.get_cluster, query_string=FIELDS_QS) if is_shared(cluster): message = dated("Shared cluster link. %s\n" % get_url(cluster, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, cluster) return cluster
d07e3969e90cbc84f5329845e540c3b1a03d86b5
9,392
def get_post_by_user(user_id: int, database: Session) -> Post: """ """ post = database.query(Post).filter( Post.user == user_id).order_by(Post.id.desc()).all() logger.info("FOI RETORNADO DO BANCO AS SEGUINTES CONTRIBUIÇÕES: %s", post) return post
9274caf4d484e68bdc7c852aff6360d9674b2957
9,393
import yaml def unformat_bundle(formattedBundle): """ Converts a push-ready bundle into a structured object by changing stringified yaml of 'customResourceDefinitions', 'clusterServiceVersions', and 'packages' into lists of objects. Undoing the format helps simplify bundle validation. :param formattedBundle: A push-ready bundle """ bundle = BuildCmd()._get_empty_bundle() if 'data' not in formattedBundle: return bundle if 'customResourceDefinitions' in formattedBundle['data']: customResourceDefinitions = yaml.safe_load( formattedBundle['data']['customResourceDefinitions']) if customResourceDefinitions: bundle['data']['customResourceDefinitions'] = customResourceDefinitions if 'clusterServiceVersions' in formattedBundle['data']: clusterServiceVersions = yaml.safe_load( formattedBundle['data']['clusterServiceVersions']) if clusterServiceVersions: bundle['data']['clusterServiceVersions'] = clusterServiceVersions if 'packages' in formattedBundle['data']: packages = yaml.safe_load(formattedBundle['data']['packages']) if packages: bundle['data']['packages'] = packages return bundle
fcc6067fab89dffa8e31e47da42060ca11a48478
9,394
def supports_box_chars() -> bool: """Check if the encoding supports Unicode box characters.""" return all(map(can_encode, "│─└┘┌┐"))
82a3f57429d99dc2b16055d2b7103656ec2e05e5
9,395
def calculate_intersection_over_union(box_data, prior_boxes): """Calculate intersection over union of box_data with respect to prior_boxes. Arguments: ground_truth_data: numpy array with shape (4) indicating x_min, y_min, x_max and y_max coordinates of the bounding box. prior_boxes: numpy array with shape (num_boxes, 4). Returns: intersections_over_unions: numpy array with shape (num_boxes) which corresponds to the intersection over unions of box_data with respect to all prior_boxes. """ x_min = box_data[0] y_min = box_data[1] x_max = box_data[2] y_max = box_data[3] prior_boxes_x_min = prior_boxes[:, 0] prior_boxes_y_min = prior_boxes[:, 1] prior_boxes_x_max = prior_boxes[:, 2] prior_boxes_y_max = prior_boxes[:, 3] # calculating the intersection intersections_x_min = np.maximum(prior_boxes_x_min, x_min) intersections_y_min = np.maximum(prior_boxes_y_min, y_min) intersections_x_max = np.minimum(prior_boxes_x_max, x_max) intersections_y_max = np.minimum(prior_boxes_y_max, y_max) intersected_widths = intersections_x_max - intersections_x_min intersected_heights = intersections_y_max - intersections_y_min intersected_widths = np.maximum(intersected_widths, 0) intersected_heights = np.maximum(intersected_heights, 0) intersections = intersected_widths * intersected_heights # calculating the union prior_box_widths = prior_boxes_x_max - prior_boxes_x_min prior_box_heights = prior_boxes_y_max - prior_boxes_y_min prior_box_areas = prior_box_widths * prior_box_heights box_width = x_max - x_min box_height = y_max - y_min ground_truth_area = box_width * box_height unions = prior_box_areas + ground_truth_area - intersections intersection_over_union = intersections / unions return intersection_over_union
6ac634953a92f1b81096f72209ae5d25d46aa4e6
9,396
def get_report(analytics, start_date, end_date = 'today'): """Queries the Analytics Reporting API V4. Args: analytics: An authorized Analytics Reporting API V4 service object. Returns: The Analytics Reporting API V4 response. """ return analytics.reports().batchGet( body={ 'reportRequests': [ { 'viewId': VIEW_ID, 'dateRanges': [{'startDate': start_date, 'endDate': end_date}], 'metrics': [{'expression': 'ga:userTimingValue'}], 'dimensions': [ {'name': 'ga:userTimingVariable'}] }] } ).execute()
cac0b27a40f6a648a4d3f41aa9615dc114700f84
9,397
def write_pinout_xml(pinout, out_xml=None): """ write the pinout dict to xml format with no attributes. this is verbose but is the preferred xml format """ ar = [] for k in sort_alpha_num(pinout.keys()): d = pinout[k] d['number'] = k # ar.append({'pin': d}) ar.append( d) # x = dicttoxml(pinout, custom_root='pin_map', attr_type=True) my_item_func = lambda x: 'pin' # x = dicttoxml(ar, custom_root='pin_map', attr_type=False) x = dicttoxml(ar, custom_root='pin_map', item_func=my_item_func, attr_type=False) reparsed = minidom.parseString(x) xml_pretty = reparsed.toprettyxml(indent=" ") if out_xml != None: fo = open(out_xml, "w") fo.write(xml_pretty) fo.close() return xml_pretty
7f2fff341b11eb29bf672a4f78b0fc0971a26cbc
9,398
import json def get_solution(request, level=1): """Returns a render of answers.html""" context = RequestContext(request) cheat_message = '\\text{Ulovlig tegn har blitt brukt i svar}' required_message = '\\text{Svaret ditt har ikke utfylt alle krav}' render_to = 'game/answer.html' if request.method == 'POST': form = QuestionForm(request.POST) if form.is_valid(): form_values = form.process() template = Template.objects.get(pk=form_values['primary_key']) user_answer = form_values['user_answer'] try: disallowed = json.loads(template.disallowed) except ValueError: disallowed = [] try: required = json.loads(template.required) except ValueError: required = [] context_dict = make_answer_context_dict(form_values) if (cheat_check(user_answer, disallowed, form_values['variable_dictionary'].split('§'))) and\ (form_values['template_type'] == 'normal') and (context_dict['user_won']): context_dict['answer'] = cheat_message return render_to_response(render_to, context_dict, context) elif (required_check(user_answer, required, form_values['variable_dictionary'].split('§'))) and \ (form_values['template_type'] == 'normal') and (context_dict['user_won']): context_dict['answer'] = required_message return render_to_response(render_to, context_dict, context) if request.is_ajax(): new_user_rating, new_star = change_level_rating(template, request.user, context_dict['user_won'], form_values['template_type'], level) context_dict['chapter_id'] = request.POST['chapter_id'] context_dict['ulp'] = int(new_user_rating) context_dict['new_star'] = new_star context_dict['stars'] = get_user_stars_for_level(request.user, Level.objects.get(pk=level)) return render_to_response(render_to, context_dict, context) else: change_elo(template, request.user, context_dict['user_won'], form_values['template_type']) render_to_response(render_to, context_dict, context) else: print(form.errors)
f6d5b7c90b656d2302c1aaf2935fc39bcf882a03
9,399
def get_work_log_queue(): """ json格式为:: {'func':'transform', 'kw':{ ... # 和前面task_queue相同 }, "runtime":{ # 队列运行相关信息 'created':12323423 #进入原始队列时间 'queue':'q01' # 是在哪个原子原子队列 'start':123213123 #转换开始时间 'end':123213123 #转换结束时间 'worker':'w01', # 转换器名 'thread':'131231', # 'return':-1, # 返回的错误代号, 0表示成功 'reason':'失败原因' # 详细的原因 } } """ work__log_queue = "ztq:queue:worker_log" return get_limit_queue(work__log_queue, 200)
26b2e3c73f7dd05b44659d3a02ca8d2b8205057e
9,400
def is_first_buy(ka, ka1, ka2=None, pf=False): """确定某一级别一买 注意:如果本级别上一级别的 ka 不存在,无法识别本级别一买,返回 `无操作` !!! 一买识别逻辑: 1)必须:上级别最后一个线段标记和最后一个笔标记重合且为底分型; 2)必须:上级别最后一个向下线段内部笔标记数量大于等于6,且本级别最后一个线段标记为底分型; 3)必须:本级别向下线段背驰 或 本级别向下笔背驰; 4)辅助:下级别向下线段背驰 或 下级别向下笔背驰。 :param ka: KlineAnalyze 本级别 :param ka1: KlineAnalyze 上级别 :param ka2: KlineAnalyze 下级别,默认为 None :param pf: bool pf 为 precision first 的缩写, 控制是否使用 `高精度优先模式` ,默认为 False ,即 `高召回优先模式`。 在 `高精度优先模式` 下,会充分利用辅助判断条件提高识别准确率。 :return: dict """ detail = { "标的代码": ka.symbol, "操作提示": "无操作", "出现时间": None, "基准价格": None, "其他信息": None } if not isinstance(ka1, KlineAnalyze): return detail # 上级别最后一个线段标记和最后一个笔标记重合且为底分型; if len(ka1.xd) >= 2 and ka1.xd[-1]['xd'] == ka1.bi[-1]['bi'] \ and ka1.xd[-1]['fx_mark'] == ka1.bi[-1]['fx_mark'] == 'd': bi_inside = [x for x in ka1.bi if ka1.xd[-2]['dt'] <= x['dt'] <= ka1.xd[-1]['dt']] # 上级别最后一个向下线段内部笔标记数量大于等于6,且本级别最后一个线段标记为底分型; if len(bi_inside) >= 6 and ka.xd[-1]['fx_mark'] == 'd': # 本级别向下线段背驰 或 本级别向下笔背驰; if (ka.xd_bei_chi() or (ka.bi[-1]['fx_mark'] == 'd' and ka.bi_bei_chi())): detail['操作提示'] = "一买" detail['出现时间'] = ka.xd[-1]['dt'] detail['基准价格'] = ka.xd[-1]['xd'] if pf and detail["操作提示"] == "一买" and isinstance(ka2, KlineAnalyze): # 下级别线段背驰 或 下级别笔背驰 if not ((ka2.xd[-1]['fx_mark'] == 'd' and ka2.xd_bei_chi()) or (ka2.bi[-1]['fx_mark'] == 'd' and ka2.bi_bei_chi())): detail['操作提示'] = "无操作" return detail
5ea35d728f3ddfaa5cff09a2e735c480f1e3c622
9,401
def preprocess(path, l_pass=0.7, h_pass=0.01, bandpass=True, short_ch_reg=False, tddr=True, negative_correlation=False, verbose=False, return_all=False): """ Load raw data and preprocess :param str path: path to the raw data :param float l_pass: low pass frequency :param float h_pass: high pass frequency :param bool bandpass: apply bandpass filter :param bool short_ch_reg: apply short channel regression :param bool tddr: apply tddr :param bool negative_correlation: apply negative correlation :param bool verbose: print progress :return: preprocessed data """ if verbose: ic("Loading ", path) raw_intensity = mne.io.read_raw_snirf(path, preload=True) step_od = mne.preprocessing.nirs.optical_density(raw_intensity) # sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od, l_freq=0.7, h_freq=1.5) # raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) if verbose: ic("Apply short channel regression.") if short_ch_reg: step_od = mne_nirs.signal_enhancement.short_channel_regression(step_od) if verbose: ic("Do temporal derivative distribution repair on:", step_od) if tddr: step_od = mne.preprocessing.nirs.tddr(step_od) if verbose: ic("Convert to haemoglobin with the modified beer-lambert law.") step_haemo = beer_lambert_law(step_od, ppf=6) if verbose: ic("Apply further data cleaning techniques and extract epochs.") if negative_correlation: step_haemo = mne_nirs.signal_enhancement.enhance_negative_correlation( step_haemo) if not return_all: if verbose: ic("Separate the long channels and short channels.") short_chs = get_short_channels(step_haemo) step_haemo = get_long_channels(step_haemo) if verbose: ic("Bandpass filter on:", step_haemo) if bandpass: step_haemo = step_haemo.filter( h_pass, l_pass, h_trans_bandwidth=0.3, l_trans_bandwidth=h_pass*0.25) return step_haemo
01d508de322fa007886e34838911d2cccea79aab
9,402
def geomapi_To2d(*args): """ * To intersect a curve and a surface. This function builds (in the parametric space of the plane P) a 2D curve equivalent to the 3D curve C. The 3D curve C is considered to be located in the plane P. Warning The 3D curve C must be of one of the following types: - a line - a circle - an ellipse - a hyperbola - a parabola - a Bezier curve - a BSpline curve Exceptions Standard_NoSuchObject if C is not a defined type curve. :param C: :type C: Handle_Geom_Curve & :param P: :type P: gp_Pln :rtype: Handle_Geom2d_Curve """ return _GeomAPI.geomapi_To2d(*args)
7a8a6436f364e933d71ba8fb47617f01b0e13b47
9,403
import yaml def get_object_list(): """Returns the object name list for APC2015. Args: None. Returns: objects (list): List of object name. """ pkg_path = rospkg.RosPack().get_path(PKG) yaml_file = osp.join(pkg_path, 'data/object_list.yml') with open(yaml_file) as f: objects = yaml.load(f) return objects
7fd1268ef8804eb394a42a6b2fdc9fc223cd4316
9,404
def gtMakeTAKBlobMsg(callsign, text, aesKey=False): """ Assemble an ATAK plugin compatible chat message blob (suitable for feeding to gtMakeAPIMsg() ) With optional AES encryption, if a key is provided """ body = (callsign + b': ' + text)[:230] # Apply optional encryption (and base64 encoding only for chats) if aesKey: body = b64encode(aesEncrypt(body, aesKey)) return gtMakeGTABlobMsg(body, 'A')
ecc562e92a72a0a6e0d5cc45563d1c89962d931b
9,405
import re def validate_json_with_extensions(value, rule_obj, path): """ Performs the above match, but also matches a dict or a list. This it just because it seems like you can't match a dict OR a list in pykwalify """ validate_extensions(value, rule_obj, path) if not isinstance(value, (list, dict)): raise BadSchemaError("Error at {} - expected a list or dict".format(path)) def nested_values(d): if isinstance(d, dict): for v in d.values(): if isinstance(v, dict): for v_s in v.values(): yield v_s else: yield v else: yield d if any(isinstance(i, ApproxScalar) for i in nested_values(value)): # If this is a request data block if not re.search(r"^/stages/\d/(response/body|mqtt_response/json)", path): raise BadSchemaError( "Error at {} - Cannot use a '!approx' in anything other than an expected http response body or mqtt response json".format( path ) ) return True
ef4d5744adf0c2d3ca326da66cbe608b306a2ca3
9,406
def artists_by_rating(formatter, albums): """Returns the artists sorted by decreasing mean album rating. Only artists with more than 1 reviewed albums are considered. """ artist_tags = set([album["artist_tag"] for album in albums]) artists = [] # build the list of artists and compute their ratings for artist_tag in artist_tags: specific_albums = [x for x in albums if x["artist_tag"] == artist_tag] if len(specific_albums) > 1: rating = compute_artist_rating([x["rating"] for x in specific_albums]) artists.append( { "artist_tag": artist_tag, "artist": specific_albums[0]["artist"], "rating": rating, } ) sorted_artists = sorted( artists, key=lambda x: (x["rating"], x["artist"]), reverse=True ) return formatter.parse_list(sorted_artists, formatter.format_artist_rating)
fdf443973b4187650d95f76f8cde2a61ea7a1a3f
9,407
def st_max(*args): """Max function. Parameters ---------- x : float, int, MissingValue instance, or None (2 or more such inputs allowed) Returns ------- max(x1, x2, ...) if any x is non-missing (with missing values ignored). Otherwise, MISSING (".") returned. """ if len(args) <= 1: raise TypeError("need at least 2 arguments") vectors = [a for a in args if isinstance(a, StataVarVals)] scalars = [ a for a in args if not isinstance(a, StataVarVals) and not _is_missing(a) ] if len(vectors) != 0: sca_max = max(scalars) if not len(scalars) == 0 else None return StataVarVals([_max(*v, sub_max=sca_max) for v in zip(*vectors)]) elif len(scalars) == 0: return mv return max(scalars)
978cab7522250541890c723fcf33d2ded9539293
9,408
def is_button_controller(device: Device) -> bool: """Return true if the device is a stateless button controller.""" return ( CAP_PUSHABLE_BUTTON in device.capabilities or CAP_HOLDABLE_BUTTON in device.capabilities or CAP_DOUBLE_TAPABLE_BUTTON in device.capabilities )
aa16170469f6a65d2ed94ab251817e722082ef16
9,409
def list_parts(bucket, key, upload_id): """Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload ). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.list_parts Request Syntax -------------- response = client.list_parts( Bucket='string', Key='string', MaxParts=123, PartNumberMarker=123, UploadId='string', RequestPayer='requester', ExpectedBucketOwner='string' ) Response Syntax --------------- { 'AbortDate': datetime(2015, 1, 1), 'AbortRuleId': 'string', 'Bucket': 'string', 'Key': 'string', 'UploadId': 'string', 'PartNumberMarker': 123, 'NextPartNumberMarker': 123, 'MaxParts': 123, 'IsTruncated': True|False, 'Parts': [ { 'PartNumber': 123, 'LastModified': datetime(2015, 1, 1), 'ETag': 'string', 'Size': 123 }, ], 'Initiator': { 'ID': 'string', 'DisplayName': 'string' }, 'Owner': { 'DisplayName': 'string', 'ID': 'string' }, 'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER'|'DEEP_ARCHIVE'|'OUTPOSTS', 'RequestCharged': 'requester' } Parameters ---------- bucket : str Name of the S3 bucket key : str Name of the key for the multipart upload upload_id : str The unique identifier returned on creation of the multipart upload Returns ------- response : obj A requests.Response object """ client = boto3.client("s3") try: response = client.list_parts( Bucket=bucket, Key=key, UploadId=upload_id, ) logger.info( f"Listed parts for multipart upload {upload_id} for key {key} in bucket {bucket}" ) except Exception as e: logger.error( f"Could not list parts for multipart upload {upload_id} for key {key} in bucket {bucket}: {e}" ) return response
eb343e071ce72ea326fc479934984fdff425dfec
9,411
def leap_year(): """ This functions seeks to return a leap year after user input << integer(4). Rules for a leap year: As you surely know, due to some astronomical reasons, years may be leap or common. The former are 366 days long, while the latter are 365 days long. Since the introduction of the Gregorian calendar (in 1582), the following rule is used to determine the kind of year: -->if the year number isn't divisible by four, it's a common year; -->otherwise, if the year number isn't divisible by 100, it's a leap year; -->otherwise, if the year number isn't divisible by 400, it's a common year; -->otherwise, it's a leap year. :return: Year --> Integer """ year = int(input("Enter a year: ")) mess_1 = 'It\'s a common year!' mess_2 = 'It\'s a leap year!' if year <= 1582: return f'{year} does not fall under Gregorian Calendar!!' elif year % 4 != 0: return mess_1 elif year % 100 != 0: return mess_2 elif year % 400 != 0: return mess_1 else: return mess_2
5cf459514ce768c1cf633fdddab5f986004bc1c8
9,412
import math def parse(files, **kwargs): """Parse all BAM files.""" parsed = [] if kwargs["meta"].has_field("base_coverage"): cov_range = kwargs["meta"].field_meta("base_coverage")["range"] else: cov_range = [math.inf, -math.inf] if kwargs["meta"].has_field("read_coverage"): read_cov_range = kwargs["meta"].field_meta("read_coverage")["range"] else: read_cov_range = [math.inf, -math.inf] names = base_names(files) for file in names: if ".json" in file: fields = parse_json_cov( file, **kwargs, cov_range=cov_range, read_cov_range=read_cov_range ) else: fields = parse_bam( file, **kwargs, cov_range=cov_range, read_cov_range=read_cov_range ) if "cov" in fields: parsed.append(fields["cov"]) cov_range = fields["cov_range"] if "y" not in kwargs["meta"].plot: kwargs["meta"].plot.update({"y": fields["cov_id"]}) if "read_cov" in fields: parsed.append(fields["read_cov"]) read_cov_range = fields["read_cov_range"] return parsed
c12b068f2a32052cbaa583a4704f86c25e577947
9,413
def login(request): """Login view for GET requests.""" logged_in = request.authenticated_userid is not None if logged_in: return {'logged_in': True, 'form_enabled': False, 'status': u'Already logged in', 'status_type': u'info'} status = u'' status_type = u'' return { 'form_enabled': True, 'status_type': status_type, 'status': status, 'logged_in': False, 'username': request.params.get('username', u''), }
8cab36d8d059d0683ef2e84a40cca5c99a27c6fc
9,414
def of_type(_type, value_1, *args) -> bool: """ Check if a collection of values are of the same type. Parameters: _type (any): The type to check for. value_1 (any): The first value to check. *args (any): Rest of values to check against given type. Returns: (bool) whether or not all inputs of given type. """ all_of_type = isinstance(value_1, _type) i = len(args) while i > 0 and all_of_type != False: all_of_type = isinstance(args[i-1], _type) i -= 1 return all_of_type
eab1e70655ff74b1cbfc338a893719b7f0681f4a
9,415
import yaml def read_config(path): """ Reads the Kong config file (YAML). """ if path is None: raise Exception( "empty path provided. please provide a path using `--config=<config.yml>`" ) with open(path, "r") as stream: try: return yaml.safe_load(stream) except yaml.YAMLError as exc: raise exc
343fabb8fa1c4cc78ace63466c864e50cf5dc974
9,417
def generate_grid_world(grid, prob, pos_rew, neg_rew, gamma=.9, horizon=100): """ This Grid World generator requires a .txt file to specify the shape of the grid world and the cells. There are five types of cells: 'S' is the starting position where the agent is; 'G' is the goal state; '.' is a normal cell; '*' is a hole, when the agent steps on a hole, it receives a negative reward and the episode ends; '#' is a wall, when the agent is supposed to step on a wall, it actually remains in its current state. The initial states distribution is uniform among all the initial states provided. The grid is expected to be rectangular. Args: grid (str): the path of the file containing the grid structure; prob (float): probability of success of an action; pos_rew (float): reward obtained in goal states; neg_rew (float): reward obtained in "hole" states; gamma (float, .9): discount factor; horizon (int, 100): the horizon. Returns: A FiniteMDP object built with the provided parameters. """ grid_map, cell_list = parse_grid(grid) p = compute_probabilities(grid_map, cell_list, prob) r = compute_reward(grid_map, cell_list, pos_rew, neg_rew) mu = compute_mu(grid_map, cell_list) return FiniteMDP(p, r, mu, gamma, horizon)
753fa30327f2dddfb4a459fbb40e842b28b0eda8
9,418
def sqrt_quadrature_scheme(N_poly, N_poly_log): """ Returns quadrature rule that is exact on 0^1 for p(x) + q(x)sqrt(x) for deg(p) <= N_poly and deg(q) <= N_poly_sqrt. """ nodes, weights = sqrt_quadrature_rule(N_poly, N_poly_log) return QuadScheme1D(nodes, weights)
c39539604955f473c0a77816090fe180645670ae
9,419
def check_dataset_update(args, dataset): """Checks if the dataset information must be updated. """ return (args.dataset_attributes or args.import_fields or (args.shared_flag and r.shared_changed(args.shared, dataset)) or (((hasattr(args, 'max_categories') and args.max_categories > 0) or (hasattr(args, 'multi_label') and args.multi_label)) and args.objective_field))
005700a0d544333f018ec423a6e3d287ab982553
9,420
from typing import Dict from typing import List import json def get_package_extras(provider_package_id: str) -> Dict[str, List[str]]: """ Finds extras for the package specified. :param provider_package_id: id of the package """ if provider_package_id == 'providers': return {} with open(DEPENDENCIES_JSON_FILE) as dependencies_file: cross_provider_dependencies: Dict[str, List[str]] = json.load(dependencies_file) extras_dict = ( { module: [get_pip_package_name(module)] for module in cross_provider_dependencies[provider_package_id] } if cross_provider_dependencies.get(provider_package_id) else {} ) provider_yaml_dict = get_provider_yaml(provider_package_id) additional_extras = provider_yaml_dict.get('additional-extras') if additional_extras: for key in additional_extras: if key in extras_dict: extras_dict[key].append(additional_extras[key]) else: extras_dict[key] = additional_extras[key] return extras_dict
15ac01740e60d2af73458b7ef46330708831a0ca
9,421
def e(a: float, b: float) -> float: """ e = sqrt(1 + (b * b) / (a * a)) :param a: semi-major axis :type a: float :param b: semi-minor axis :type b: float :return: eccentricity :rtype: float """ return np.sqrt(1 + (b * b) / (a * a))
f2eec5065d735984daa5197b8401ec3a60914d25
9,422
from pathlib import Path import sh def parse_note(path: Path) -> dict: """ convert note in plain text to a dictionary. Line #1 ~ #5 are meta data of the note. Line #9 to end is the body. """ header_line_number = 5 body_start_line = 9 res = {} with open(path) as f: for x in range(header_line_number): the_line = next(f).strip() if the_line.endswith(':'): the_line += ' ' # fix 'Tags: ' striped to 'Tags:' problem header_sections = the_line.split(': ') assert len(header_sections) == 2, f'Please fix header {the_line} of note {path}' res[header_sections[0]] = header_sections[1] body = sh.sed('-n', f'{body_start_line},$p', path).stdout.decode('utf-8') res['body'] = body return res
792f4bace60fa52b1a7cbeeaf0dabd881ffd4a24
9,423
def get_previous_sle_for_warehouse(last_sle, exclude_current_voucher=False): """get stock ledger entries filtered by specific posting datetime conditions""" last_sle['time_format'] = '%H:%i:%s' if not last_sle.get("posting_date"): last_sle["posting_date"] = "1900-01-01" if not last_sle.get("posting_time"): last_sle["posting_time"] = "00:00" sle = frappe.db.sql(""" select *, timestamp(posting_date, posting_time) as "timestamp" from `tabStock Ledger Entry` where item_code = %(item_code)s and warehouse = %(warehouse)s and is_cancelled = 0 and timestamp(posting_date, time_format(posting_time, %(time_format)s)) < timestamp(%(posting_date)s, time_format(%(posting_time)s, %(time_format)s)) order by timestamp(posting_date, posting_time) desc, creation desc limit 1 for update""", last_sle, as_dict=1) return sle[0] if sle else frappe._dict()
7fdc0db05564cc54555784c474c7bc4cb33e280a
9,424
import networkx as nx def forest_str(graph, with_labels=True, sources=None, write=None, ascii_only=False): """ Creates a nice utf8 representation of a directed forest Parameters ---------- graph : nx.DiGraph | nx.Graph Graph to represent (must be a tree, forest, or the empty graph) with_labels : bool If True will use the "label" attribute of a node to display if it exists otherwise it will use the node value itself. Defaults to True. sources : List Mainly relevant for undirected forests, specifies which nodes to list first. If unspecified the root nodes of each tree will be used for directed forests; for undirected forests this defaults to the nodes with the smallest degree. write : callable Function to use to write to, if None new lines are appended to a list and returned. If set to the `print` function, lines will be written to stdout as they are generated. If specified, this function will return None. Defaults to None. ascii_only : Boolean If True only ASCII characters are used to construct the visualization Returns ------- str | None : utf8 representation of the tree / forest Example ------- >>> graph = nx.balanced_tree(r=2, h=3, create_using=nx.DiGraph) >>> print(nx.forest_str(graph)) ╙── 0 ├─╼ 1 │   ├─╼ 3 │   │   ├─╼ 7 │   │   └─╼ 8 │   └─╼ 4 │   ├─╼ 9 │   └─╼ 10 └─╼ 2 ├─╼ 5 │   ├─╼ 11 │   └─╼ 12 └─╼ 6 ├─╼ 13 └─╼ 14 >>> graph = nx.balanced_tree(r=1, h=2, create_using=nx.Graph) >>> print(nx.forest_str(graph)) ╙── 0 └── 1 └── 2 >>> print(nx.forest_str(graph, ascii_only=True)) +-- 0 L-- 1 L-- 2 """ printbuf = [] if write is None: _write = printbuf.append else: _write = write # Define glphys # Notes on available box and arrow characters # https://en.wikipedia.org/wiki/Box-drawing_character # https://stackoverflow.com/questions/2701192/triangle-arrow if ascii_only: glyph_empty = "+" glyph_newtree_last = "+-- " glyph_newtree_mid = "+-- " glyph_endof_forest = " " glyph_within_forest = ":   " glyph_within_tree = "|   " glyph_directed_last = "L-> " glyph_directed_mid = "|-> " glyph_undirected_last = "L-- " glyph_undirected_mid = "|-- " else: glyph_empty = "╙" glyph_newtree_last = "╙── " glyph_newtree_mid = "╟── " glyph_endof_forest = " " glyph_within_forest = "╎   " glyph_within_tree = "│   " glyph_directed_last = "└─╼ " glyph_directed_mid = "├─╼ " glyph_undirected_last = "└── " glyph_undirected_mid = "├── " if len(graph.nodes) == 0: _write(glyph_empty) else: if not nx.is_forest(graph): raise nx.NetworkXNotImplemented("input must be a forest or the empty graph") is_directed = graph.is_directed() succ = graph.succ if is_directed else graph.adj if sources is None: if is_directed: # use real source nodes for directed trees sources = [n for n in graph.nodes if graph.in_degree[n] == 0] else: # use arbitrary sources for undirected trees sources = [ min(cc, key=lambda n: graph.degree[n]) for cc in nx.connected_components(graph) ] # Populate the stack with each source node, empty indentation, and mark # the final node. Reverse the stack so sources are popped in the # correct order. last_idx = len(sources) - 1 stack = [(node, "", (idx == last_idx)) for idx, node in enumerate(sources)][ ::-1 ] seen = set() while stack: node, indent, islast = stack.pop() if node in seen: continue seen.add(node) if not indent: # Top level items (i.e. trees in the forest) get different # glyphs to indicate they are not actually connected if islast: this_prefix = indent + glyph_newtree_last next_prefix = indent + glyph_endof_forest else: this_prefix = indent + glyph_newtree_mid next_prefix = indent + glyph_within_forest else: # For individual tree edges distinguish between directed and # undirected cases if is_directed: if islast: this_prefix = indent + glyph_directed_last next_prefix = indent + glyph_endof_forest else: this_prefix = indent + glyph_directed_mid next_prefix = indent + glyph_within_tree else: if islast: this_prefix = indent + glyph_undirected_last next_prefix = indent + glyph_endof_forest else: this_prefix = indent + glyph_undirected_mid next_prefix = indent + glyph_within_tree if with_labels: label = graph.nodes[node].get("label", node) else: label = node _write(this_prefix + str(label)) # Push children on the stack in reverse order so they are popped in # the original order. children = [child for child in succ[node] if child not in seen] for idx, child in enumerate(children[::-1], start=1): islast_next = idx <= 1 try_frame = (child, next_prefix, islast_next) stack.append(try_frame) if write is None: # Only return a string if the custom write function was not specified return "\n".join(printbuf)
3486545035b9c2a8954102bdb92ebe9dd7b1fa24
9,425
import copy def rotated_shower(shower, alt, az): """ Return a rotated shower object from a shower object and a direction (alt, az) Parameters ---------- shower: shower class object Returns ------- copy of the given shower but rotated """ rot_shower = copy(shower) rot_shower.particles = shower_array_rot(shower.particles, shower.alt, shower.az) return rot_shower
d420c408083a54837c87db405a8d65abfe46a5f8
9,426
def angle2circle(angles): """from degree to radians multipled by 2""" return np.deg2rad(2 * (np.array(angles) + 7.5))
4c944725fd44480b5f7261c24608b3e06cec013a
9,427
def _make_source(cls_source: str, cls_name: str, instance_method: str): """Converts a class source to a string including necessary imports. Args: cls_source (str): A string representing the source code of a user-written class. cls_name (str): The name of the class cls_source represents. instance_method (str): The method within the class that should be called from __main__ Returns: A string representing a user-written class that can be written to a file in order to yield an inner script for the ModelBuilder SDK. The only difference between the user-written code and the string returned by this method is that the user has the option to specify a method to call from __main__. """ src = "\n".join(["import torch", "import pandas as pd", cls_source]) src = src + "if __name__ == '__main__':\n" + f"\t{cls_name}().{instance_method}()" return src
105ca5d34c0de2bfc81937aaaf14b4d610eaa35a
9,428
def prepend_pass_statement(line: str) -> str: """Prepend pass at indent level and comment out the line.""" colno = num_indented(line) right_side = line[colno:] indent = " " * colno return indent + "pass # " + right_side
7d7156581167fcd6ec5c4afc482cf8bf3dea11bc
9,429
from datetime import datetime import time def download_spot_by_dates(start=datetime(2011, 1, 1)): """ 下载数据,存储为csv文件 :param start: 2011-01-01 最早数据 :return: True 下载文件 False 没有下载文件 """ file_index = get_download_file_index(SPREAD_DIR, start=start) if file_index.empty: return False for date in file_index: date_str = date.strftime('%Y-%m-%d') file_path = SPREAD_DIR / '{}.csv'.format(date_str) if file_path.exists(): continue table = download_spot_by_date(date_str) if len(table) != 0: print(date) spread_df = pd.DataFrame(table, columns=HEADER) spread_df.to_csv(str(file_path), index=False, encoding='gb2312') time.sleep(np.random.rand() * 5) return True
34574d4cd5d1985850fe681c3e5e4f6a3ebdc1a4
9,430
def truncate_range(data, percMin=0.25, percMax=99.75, discard_zeros=True): """Truncate too low and too high values. Parameters ---------- data : np.ndarray Image to be truncated. percMin : float Percentile minimum. percMax : float Percentile maximum. discard_zeros : bool Discard voxels with value 0 from truncation. Returns ------- data : np.ndarray """ if discard_zeros: msk = ~np.isclose(data, 0) pMin, pMax = np.nanpercentile(data[msk], [percMin, percMax]) else: pMin, pMax = np.nanpercentile(data, [percMin, percMax]) temp = data[~np.isnan(data)] temp[temp < pMin], temp[temp > pMax] = pMin, pMax # truncate min and max data[~np.isnan(data)] = temp if discard_zeros: data[~msk] = 0 # put back masked out voxels return data
a273db14c8f651dcbdaa39825e1150bd0cdc119b
9,431
async def payment_list(request): """ --- description: Show outgoing payments, regarding {bolt11} or {payment_hash} if set Can only specify one of {bolt11} or {payment_hash} tags: - payments produces: - application/json parameters: - in: body name: body required: false schema: type: object properties: bolt11: type: string payment_hash: type: string responses: "200": description: successful operation. """ data = await request.json() bolt11 = data.get('bolt11', None) payment_hash = data.get('payment_hash', None) return web.json_response(request.app['rpc'].listpayments(bolt11=bolt11, payment_hash=payment_hash))
3a4fe428adb10dd53e9b2564fea59cdc4b7c87ff
9,432
import io def write_opened(dir, file_dict, data_dict, verbose=True): """ read in dictionary with open files as values and write data to files """ for game_id, vals in data_dict.items(): f = file_dict.get(game_id) if not f: fn = dir + str(game_id) + ".csv" f = io.init_csv(fn, header=bm.LINE_COLUMNS, close=False) file_dict[game_id] = f io.write_list(f, vals) if verbose: print(f"writing {vals} to game [{game_id}]") return file_dict
eb3ac9b95b70df31eb1ea24b94b5e416966b7bc5
9,433
def get_accessible_cases(item, user): """Return all accessible for a cohort and user.""" return getattr(item, "get_accessible_cases_for_user")(user)
42d54ebf672ce401ac311f9868f6b19f93418065
9,434
def aux_conv5(A, B, n, idx): """ Performs the convolution of A and B where B = A* (enumerate-for-loop) :param A: Coefficients matrix 1 (orders, buses) :param B: Coefficients matrix 2 (orders, buses) :param c: last order of the coefficients in while loop :param indices: bus indices array :return: Array with the convolution for the buses given by "indices" """ suma = np.zeros(len(idx), dtype=nb.complex128) for m in range(0, n): for i, k in enumerate(idx): suma[i] += A[m, k] * B[n-1-m, k] return suma.real
0acaece3da86ac578672b7ab7e0f506117e752d3
9,435
def plot_phaseogram(phaseogram, phase_bins, time_bins, unit_str='s', ax=None, **plot_kwargs): """Plot a phaseogram. Parameters ---------- phaseogram : NxM array The phaseogram to be plotted phase_bins : array of M + 1 elements The bins on the x-axis time_bins : array of N + 1 elements The bins on the y-axis Other Parameters ---------------- unit_str : str String indicating the time unit (e.g. 's', 'MJD', etc) ax : `matplotlib.pyplot.axis` instance Axis to plot to. If None, create a new one. plot_kwargs : dict Additional arguments to be passed to pcolormesh Returns ------- ax : `matplotlib.pyplot.axis` instance Axis where the phaseogram was plotted. """ if ax is None: plt.figure('Phaseogram') ax = plt.subplot() ax.pcolormesh(phase_bins, time_bins, phaseogram.T, **plot_kwargs) ax.set_ylabel('Time ({})'.format(unit_str)) ax.set_xlabel('Phase') ax.set_xlim([0, np.max(phase_bins)]) ax.set_ylim([np.min(time_bins), np.max(time_bins)]) return ax
b7a3b8aa0cf6a16e67e3d5059049082b6d308d7e
9,436
def load_rapidSTORM_track_header(path): """ Load xml header from a rapidSTORM (track) single-molecule localization file and identify column names. Parameters ---------- path : str, bytes, os.PathLike, file-like File path for a rapidSTORM file to load. Returns ------- list of str A list of valid dataset property keys as derived from the rapidSTORM identifiers. """ # read xml part in header with open_path_or_file_like(path) as file: return _read_rapidSTORM_track_header(file)
584baa4bd0a634608bb2c254314ad80a9c7650de
9,437
def hex_to_byte(hexStr): """ Convert hex strings to bytes. """ bytes = [] hexStr = ''.join(hexStr.split(" ")) for i in range(0, len(hexStr), 2): bytes.append(chr(int(hexStr[i:i + 2], 16))) return ''.join(bytes)
a424d65b0a02c0d10ee5c7c25409f4a0ce477528
9,438
def _vital_config_update(cfg, cfg_in): """ Treat a vital Config object like a python dictionary Args: cfg (kwiver.vital.config.config.Config): config to update cfg_in (dict | kwiver.vital.config.config.Config): new values """ # vital cfg.merge_config doesnt support dictionary input if isinstance(cfg_in, dict): for key, value in cfg_in.items(): if cfg.has_value(key): cfg.set_value(key, str(value)) else: raise KeyError('cfg has no key={}'.format(key)) else: cfg.merge_config(cfg_in) return cfg
35a0092013229f3b71a1ba06bbb660f861ef391c
9,439
def SubscriberReceivedStartEncKeyVector(builder, numElems): """This method is deprecated. Please switch to Start.""" return StartEncKeyVector(builder, numElems)
7c2875af0ba92e66f747bdeb2754f3123c337372
9,440
import struct def _read_extended_field_value(value, rawdata): """Used to decode large values of option delta and option length from raw binary form.""" if value >= 0 and value < 13: return (value, rawdata) elif value == 13: return (rawdata[0] + 13, rawdata[1:]) elif value == 14: return (struct.unpack('!H', rawdata[:2])[0] + 269, rawdata[2:]) else: raise ValueError("Value out of range.")
12a1f665f133f6ea5ffc817bf69ec0a9e0e07dbc
9,441
def add_uint(a, b): """Returns the sum of two uint256-ish tuples.""" a = from_uint(a) b = from_uint(b) c = a + b return to_uint(c)
0da42542210e72f30f00b1a41919cdad882963d0
9,442