content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def n(request) -> int: """A test fixture enumerate values for `n`.""" return request.param
faec9637483670bec5d2bc687f2ee03d8c3839ea
3,100
def decode(ciphered_text): """ Decodes the ciphered text into human readable text. Returns a string. """ text = ciphered_text.replace(' ', '') # We remove all whitespaces return ''.join([decode_map[x] if decode_map.get(x) else x for x in text])
717e837a4750d4c281e2ca635e141f40cf1e30ee
3,101
from datetime import datetime def parse(s): """ Date parsing tool. Change the formats here cause a changement in the whole application. """ formats = ['%Y-%m-%dT%H:%M:%S.%fZ','%d/%m/%Y %H:%M:%S','%d/%m/%Y%H:%M:%S', '%d/%m/%Y','%H:%M:%S'] d = None for format in formats: try: d = datetime.strptime(s, format) break except ValueError: pass return d
c665dd91a03a6d9876b8c36a46699b813c540cea
3,102
def openstack(request): """ Context processor necessary for OpenStack Dashboard functionality. The following variables are added to the request context: ``authorized_tenants`` A list of tenant objects which the current user has access to. ``regions`` A dictionary containing information about region support, the current region, and available regions. """ context = {} # Auth/Keystone context context.setdefault('authorized_tenants', []) current_dash = request.horizon['dashboard'] needs_tenants = getattr(current_dash, 'supports_tenants', False) if request.user.is_authenticated() and needs_tenants: context['authorized_tenants'] = request.user.authorized_tenants # Region context/support available_regions = getattr(settings, 'AVAILABLE_REGIONS', []) regions = {'support': len(available_regions) > 1, 'current': {'endpoint': request.session.get('region_endpoint'), 'name': request.session.get('region_name')}, 'available': [{'endpoint': region[0], 'name':region[1]} for region in available_regions]} context['regions'] = regions context['cluster'] = {'title': "Cluster"} return context
c914beb55a8609f2c363ac5e070f5531d7ce6abc
3,103
def get_align_mismatch_pairs(align, ref_genome_dict=None) -> list: """input a pysam AlignedSegment object Args: align (pysam.AlignedSeqment object): pysam.AlignedSeqment object ref_genome_dict (dict, optional): returned dict from load_reference_fasta_as_dict(). Defaults to None. Returns: list/None: it returns mismatch_pair_list, just like [ref_index, align_index, ref_base, align_base]; and the "ref_index" is the same coordinate with UCSC genome browser; When NM == 0, it returns None. """ # No mismatch try: if align.get_tag("NM") == 0: return None except: return None MD_tag_state = align.has_tag("MD") if MD_tag_state: # parse softclip, insertion and deletion info_index_list = [] accu_index = 0 for cigar_type, cigar_len in align.cigartuples: if cigar_type == 1 or cigar_type == 4: info_index_list.append((accu_index + 1, cigar_len)) elif cigar_type == 2: info_index_list.append((accu_index + 1, -cigar_len)) accu_index += cigar_len # parse MD tag mismatch_pair_list = [] cur_base = "" cur_index = 0 bases = align.get_tag("MD") i = 0 while i < len(bases): base = bases[i] if base.isdigit(): cur_base += base i += 1 else: cur_index += int(cur_base) cur_base = "" if base == "^": i += 1 del_str = "" while (bases[i].isalpha()) and (i < len(bases)): del_str += bases[i] i += 1 cur_index += len(del_str) del_str = "" elif base.isalpha(): cur_index += 1 ref_base = base i += 1 # add into list fix_index = cur_index + back_indel_shift(info_index_list, cur_index) if fix_index < len(align.query_sequence): mismatch_pair_list.append( [ cur_index + align.reference_start, cur_index - 1, ref_base, align.query_sequence[fix_index - 1], ] ) else: return None return mismatch_pair_list else: mismatch_pair_list = [] for align_idx, ref_idx in align.get_aligned_pairs(): if (align_idx is not None) and (ref_idx is not None): align_base = align.query_sequence[align_idx] ref_base = ref_genome_dict[align.reference_name][ref_idx] if align_base != ref_base: mismatch_pair_list.append( [ref_idx + 1, align_idx, ref_base, align_base] ) return mismatch_pair_list
79886dbbdc764e115a72728060faaf155f3fea7a
3,104
def get_int(name, default=None): """ :type name: str :type default: int :rtype: int """ return int(get_parameter(name, default))
4a07f1286e54fd9e55b97868af1aa1bae595b795
3,105
def run_test(series: pd.Series, randtest_name, **kwargs) -> TestResult: """Run a statistical test on RNG output Parameters ---------- series : ``Series`` Output of the RNG being tested randtest_name : ``str`` Name of statistical test **kwargs Keyword arguments to pass to statistical test Returns ------- result : ``TestResult`` or ``MultiTestResult`` Data containers of the test's result(s). Raises ------ TestNotFoundError If `randtest_name` does not match any available statistical tests TestError Errors raised when running ``randtest_name`` """ try: func = getattr(_randtests, randtest_name) except AttributeError as e: raise TestNotFoundError() from e with Progress(*columns, console=console, transient=True) as progress: abbrv = f_randtest_abbreviations[randtest_name] task = progress.add_task(abbrv) try: result = func(series, ctx=(progress, task), **kwargs) color = "yellow" if result.failures else "green" print_randtest_name(randtest_name, color) console.print(result) return result except TestError as e: print_randtest_name(randtest_name, "red") print_error(e) raise e
045ebe4756c24672cffdb3c43d6f0158809967d1
3,106
def radians(x): """ Convert degrees to radians """ if isinstance(x, UncertainFunction): mcpts = np.radians(x._mcpts) return UncertainFunction(mcpts) else: return np.radians(x)
49facfcfbeac91e9ac40b91ed8dc43b25ce157a6
3,107
def screen_poisson_objective(pp_image, hp_w,hp_b, data): """Objective function.""" return (stencil_residual(pp_image, hp_w,hp_b, data) ** 2).sum()
43a1ff6594cd493a0122e8c1305b84f25550fb59
3,108
def learn(request, artwork_genre=None): """ Returns an art genre. """ _genre = get_object_or_404(Genre, slug=artwork_genre) return render_to_response('t_learn.html', {'genre': _genre}, context_instance=RequestContext(request))
fe4e4477e7d2764ac41a58967ad1bc5296f10715
3,109
def plot_column(path: str, column: str, outpath: str = ""): """Plot a single column and save to file.""" df = to_df(path) col_df = df.set_index(["name", "datetime"])[column].unstack("name") ax = col_df.plot(grid=True) ax.set_xlabel("Time") ax.set_ylabel(LABEL_MAP[column]) if outpath: ax.get_figure().savefig(outpath, bbox_inches="tight") return ax
58d5033a3bb86986e30582bf3fedf36842aeded9
3,110
def get_loader(): """Returns torch.utils.data.DataLoader for custom Pypipes dataset. """ data_loader = None return data_loader
0e3b0107e355169049dbdfa45cba9abdf479dcbe
3,111
def attention_decoder_cell_fn(decoder_rnn_cell, memories, attention_type, decoder_type, decoder_num_units, decoder_dropout, mode, batch_size, beam_width=1, decoder_initial_state=None, reuse=False): """Create an decoder cell with attention. It takes decoder cell as argument Args: - memories: (encoder_outputs, encoder_state, input_length) tuple - attention_type: "luong", "bahdanau" - mode: "train", "test" """ if mode == "train": beam_width = 1 with tf.variable_scope('attention_decoder_cell', reuse=reuse): attention_mechanisms = [] attention_layers = [] for idx, (encoder_outputs, encoder_state, input_length) in enumerate(memories): # Tile batch for beam search, if beam_width == 1, then nothing happens encoder_outputs, input_length, encoder_state, beam_batch_size = prepare_beam_search_decoder_inputs( beam_width, encoder_outputs, input_length, encoder_state, batch_size) # Temporal attention along time step if attention_type == "luong": attention_mechanism = tf.contrib.seq2seq.LuongAttention( decoder_num_units, memory=encoder_outputs, memory_sequence_length=input_length) elif attention_type == "bahdanau": attention_mechanism = tf.contrib.seq2seq.BahdanauAttention( decoder_num_units, memory=encoder_outputs, memory_sequence_length=input_length) attention_layer = tf.layers.Dense(decoder_num_units, name="{}th_attention".format(idx), use_bias=False, dtype=tf.float32, _reuse=reuse) attention_mechanisms.append(attention_mechanism) attention_layers.append(attention_layer) #decoder_rnn_cell = single_rnn_cell(decoder_type, decoder_num_units, decoder_dropout, mode, reuse=reuse) attention_rnn_cell = tf.contrib.seq2seq.AttentionWrapper( decoder_rnn_cell, attention_mechanisms, attention_layer=attention_layers, initial_cell_state=None, name="AttentionWrapper") # Set decoder initial state initial_state = attention_rnn_cell.zero_state(dtype=tf.float32, batch_size=beam_batch_size) if decoder_initial_state: decoder_initial_state = tf.contrib.seq2seq.tile_batch(decoder_initial_state, multiplier=beam_width) initial_state = initial_state.clone(cell_state=decoder_initial_state) return attention_rnn_cell, initial_state
ffd0199d7c0bf9f9bfb5d4a6e1d7eac4767e84d3
3,112
async def post_user_income(user: str, income: Income): """ This functions create a new income in the DB. It checks whether the user exists and returns a message in case no user exists. In the other case, creates a new document in DB with the users new Income. user: users uuid. income: Income (check pyndatic model) parameters to save in DB. """ user_bool = user_exist(user) if user_bool: income_created = await create_new_income(income) if income_created: return {"Message": "sucesful", "payload": "Income creates sucessfully."} else: return {"Message": "error", "payload": "There was an error creating Income."} else: return {"Message": "error", "payload": "User not found."}
3d155d72fc1e00f45ca93b804d25d1051e7c47ab
3,113
from typing import Optional def bind_rng_to_host_device(rng: jnp.ndarray, axis_name: str, bind_to: Optional[str] = None) -> jnp.ndarray: """Binds a rng to the host/device we are on. Must be called from within a pmapped function. Note that when binding to "device", we also bind the rng to hosts, as we fold_in the rng with axis_index which is unique for devices across all hosts. Args: rng: A jax.random.PRNGKey. axis_name: The axis of the devices we are binding rng across. bind_to: Must be one of the 'host' or 'device'. None means no binding. Returns: jax.random.PRNGKey specialized to host/device. """ if bind_to is None: return rng if bind_to == 'host': return jax.random.fold_in(rng, jax.process_index()) elif bind_to == 'device': return jax.random.fold_in(rng, jax.lax.axis_index(axis_name)) else: raise ValueError( "`bind_to` should be one of the `[None, 'host', 'device']`")
a7b50e6be3fd88f6a1341e0e43017baea305c31c
3,114
def get_child_ids(pid, models, myself=True, ids: set = None) -> set: """ 获取models模型的子id集合 :param pid: models模型类ID :param models: models模型对象 :param myself: 是否包含pid :param ids: 所有ID集合(默认为None) :return: ids(所有ID集合) """ if ids is None: ids = set() queryset = models.objects.filter(pid=pid) for instance in queryset: ids.add(instance.id) get_child_ids(instance.id, models, myself, ids) if myself: ids.add(pid) return ids
b5d9b10497eada8b3cafc32f4260ace091bbc0bf
3,115
def get_tenant_id(khoros_object, community_details=None): """This function retrieves the tenant ID of the environment. .. versionadded:: 2.1.0 :param khoros_object: The core :py:class:`khoros.Khoros` object :type khoros_object: class[khoros.Khoros] :param community_details: Dictionary containing community details (optional) :type community_details: dict, None :returns: The tenant ID in string format :raises: :py:exc:`khoros.errors.exceptions.GETRequestError` """ return get_community_field(khoros_object, 'id', community_details)
7fe29990d7b6b99e4b677edfdb1cd32ca785654a
3,116
import tokenize def obfuscatable_variable(tokens, index): """ Given a list of *tokens* and an *index* (representing the current position), returns the token string if it is a variable name that can be safely obfuscated. Returns '__skipline__' if the rest of the tokens on this line should be skipped. Returns '__skipnext__' if the next token should be skipped. If *ignore_length* is ``True``, even variables that are already a single character will be obfuscated (typically only used with the ``--nonlatin`` option). """ tok = tokens[index] token_type = tok[0] token_string = tok[1] line = tok[4] if token_type != tokenize.NAME: return None # Skip this token if token_string in analyze.storageLocation_scope_words: return None # Skip this token if token_string == "pragma" or token_string == "import": return "__skipline__" if token_string == '_': return None # skipnext = ['(', ')', '{', '}', ';'] # if token_string in skipnext: # return '__skipnext__' if index > 0: prev_tok = tokens[index-1] else: # Pretend it's a newline (for simplicity) prev_tok = (54, '\n', (1, 1), (1, 2), '#\n') prev_tok_type = prev_tok[0] prev_tok_string = prev_tok[1] if index > 1: pre_prev_tok = tokens[index-2] else: # Pretend it's a newline (for simplicity) pre_prev_tok = (54, '\n', (1, 1), (1, 2), '#\n') pre_prev_tok_type = pre_prev_tok[0] pre_prev_tok_string = pre_prev_tok[1] try: next_tok = tokens[index+1] except IndexError: # Pretend it's a newline next_tok = (54, '\n', (1, 1), (1, 2), '#\n') next_tok_string = next_tok[1] # if token_string == "=": # return '__skipline__' if prev_tok_string == '.' and pre_prev_tok_string in ('msg', 'abi', 'block', 'tx'): return None if prev_tok_string == '.' and token_string in ('balance', 'send', 'transfer'): return None #if token_string.startswith('__'): # return None if next_tok_string == ".": if token_string in analyze.global_variable: return None #if prev_tok_string == 'import': # return '__skipline__' # if prev_tok_string == ".": # return '__skipnext__' if prev_tok_string in analyze.type_words: # declare variable return token_string if prev_tok_string in analyze.storageLocation_scope_words and pre_prev_tok_string in analyze.type_words: # declare variable return token_string if token_string[0:5] == 'fixed' or token_string[0:6] =='ufixed': return None if prev_tok_string[0:5] =='fixed' or prev_tok_string[0:6] =='ufixed': # declare variable return token_string if pre_prev_tok_string[0:5] =='fixed' or pre_prev_tok_string[0:6] =='ufixed': if prev_tok_string in analyze.storageLocation_scope_words: # declare variable return token_string # if token_string == ']' and prev_tok_string == '[': # if next_tok_string in analyze.storageLocation_scope_words: # return '__skipnext__' # if prev_tok_string == "for": # if len(token_string) > 2: # return token_string if token_string in analyze.reserved_words: return None # if token_string in keyword_args.keys(): # return None # if prev_tok_type != tokenize.INDENT and next_tok_string != '=': # return '__skipline__' # if not ignore_length: # if len(token_string) < 3: # return None # if token_string in RESERVED_WORDS: # return None return token_string
487bfd926b77260980875496991a7bcc2bc8df3f
3,117
import csv def concat_data(labelsfile, notes_file): """ INPUTS: labelsfile: sorted by hadm id, contains one label per line notes_file: sorted by hadm id, contains one note per line """ with open(labelsfile, 'r') as lf: print("CONCATENATING") with open(notes_file, 'r') as notesfile: outfilename = '%s/notes_labeled.csv' % MIMIC_3_DIR with open(outfilename, 'w') as outfile: w = csv.writer(outfile) w.writerow(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) labels_gen = next_labels(lf) notes_gen = next_notes(notesfile) for i, (subj_id, text, hadm_id) in enumerate(notes_gen): if i % 10000 == 0: print(str(i) + " done") cur_subj, cur_labels, cur_hadm = next(labels_gen) if cur_hadm == hadm_id: w.writerow([subj_id, str(hadm_id), text, ';'.join(cur_labels)]) else: print("couldn't find matching hadm_id. data is probably not sorted correctly") break return outfilename
b6403c4ec7797cd7d08e01e7b9a9365708bdee6f
3,118
def replace_text_comment(comments, new_text): """Replace "# text = " comment (if any) with one using new_text instead.""" new_text = new_text.replace('\n', ' ') # newlines cannot be represented new_text = new_text.strip(' ') new_comments, replaced = [], False for comment in comments: if comment.startswith('# text ='): new_comments.append('# text = {}'.format(new_text)) replaced = True else: new_comments.append(comment) if not replaced: new_comments.append('# text = {}'.format(new_text)) return new_comments
4b1284966eb02ca2a6fd80f8f639adcb4f1fde6c
3,119
def init_show_booking_loader(response, item=None): """ init ShowingBookingLoader with optional ShowingBooking item """ loader = ShowingBookingLoader(response=response) if item: loader.add_value(None, item) return loader
2d9c790e487ab7009c70e83a8ecb5d5e93732ff7
3,120
def get_dpifac(): """get user dpi, source: node_wrangler.py""" prefs = bpy.context.preferences.system return prefs.dpi * prefs.pixel_size / 72
dc598635eb8fdf0b3fe8b6acc3f497a65a18f099
3,121
from typing import List def seq_row( repeats: int = 1, trigger: str = Trigger.IMMEDIATE, position: int = 0, half_duration: int = MIN_PULSE, live: int = 0, dead: int = 0, ) -> List: """Create a 50% duty cycle pulse with phase1 having given live/dead values""" row = [ repeats, trigger, position, # Phase1 half_duration, live, dead, 0, 0, 0, 0, # Phase2 half_duration, 0, 0, 0, 0, 0, 0, ] return row
5d331f1f67f5799165f3966249e199ca43e0ec27
3,122
def call_nelder_mead_method( f, verts, x_tolerance=1e-6, y_tolerance=1e-6, computational_budget=1000, f_difference=10, calls=0, terminate_criterion=terminate_criterion_x, alpha=1, gamma=2, rho=0.5, sigma=0.5, values=[], ): """Return an approximation of a local optimum. Args: f: a real valued n_dimensional function verts: an array with n+1 n-dimensional vectors dim: a integer (equal to n) f_difference: the difference between the last and second last best approximation calls: the number of evaluations of f so far terminate_criterion: the termination criterion we are using (a function that returns a boolean) x_tolerance: A positive real number y_tolerance: A positive real number computational_budget: An integer: the maximum number of funciton evaluations alpha, gamma, rho, sigma: positive real numbers that influence how the algorithms behaves values: previously evaluated function values Returns: out_1: an approximation of a local optimum of the function out_2: number of evaluations of f """ # Pseudo code can be found on: https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method # 0 Order if values == []: values = np.array([f(vert) for vert in verts]) calls = calls + len(verts) indexes = np.argsort(values) x_0 = np.array([0, 0]) for index in indexes[:-1]: x_0 = x_0 + verts[index] x_0 = x_0 / (len(verts) - 1) x_r = x_0 + alpha * (x_0 - verts[indexes[-1]]) x_e = x_0 + gamma * (x_r - x_0) x_c = x_0 + rho * (verts[indexes[-1]] - x_0) # 1 Termination if ( terminate_criterion(verts, f_difference, x_tolerance, y_tolerance) or f_difference < y_tolerance or calls >= computational_budget ): return [np.array(np.round(verts[indexes[0]])), calls] # 3 Reflection f_x_r = f(x_r) calls += 1 if values[indexes[0]] <= f_x_r: if f_x_r < values[indexes[-2]]: f_difference = abs(f_x_r - values[indexes[0]]) values[indexes[-1]] = f_x_r return call_nelder_mead_method( f, nm_replace_final(verts, indexes, x_r), x_tolerance, y_tolerance, computational_budget, f_difference, calls, terminate_criterion, alpha, gamma, rho, sigma, values, ) # 4 Expansion if f_x_r < values[indexes[0]]: # x_e = x_0 + gamma * (x_r - x_0) f_x_e = f(x_e) calls += 1 if f_x_e < f_x_r: f_difference = abs(f_x_e - values[indexes[0]]) values[indexes[-1]] = f_x_e return call_nelder_mead_method( f, nm_replace_final(verts, indexes, x_e), x_tolerance, y_tolerance, computational_budget, f_difference, calls, terminate_criterion, alpha, gamma, rho, sigma, values, ) else: f_difference = abs(f_x_r - values[indexes[0]]) values[indexes[-1]] = f_x_r return call_nelder_mead_method( f, nm_replace_final(verts, indexes, x_r), x_tolerance, y_tolerance, computational_budget, f_difference, calls, terminate_criterion, alpha, gamma, rho, sigma, values, ) # 5 Contraction # x_c = x_0 + rho * (verts[indexes[-1]] - x_0) f_x_c = f(x_c) if f_x_c < f(verts[indexes[-1]]): calls += 1 f_difference = abs(f_x_c - values[indexes[0]]) values[indexes[-1]] = f_x_c return call_nelder_mead_method( f, nm_replace_final(verts, indexes, x_c), x_tolerance, y_tolerance, computational_budget, f_difference, calls, terminate_criterion, alpha, gamma, rho, sigma, values, ) # 6 Shrink return call_nelder_mead_method( f, nm_shrink(verts, indexes, sigma), x_tolerance, y_tolerance, computational_budget, f_difference, calls, terminate_criterion, alpha, gamma, rho, sigma, values, )
02e16b477a977239a8dc256150cdbc983235f81c
3,123
def get_auth_claims_from_request(request): """Authenticates the request and returns claims about its authorizer. Oppia specifically expects the request to have a Subject Identifier for the user (Claim Name: 'sub'), and an optional custom claim for super-admin users (Claim Name: 'role'). Args: request: webapp2.Request. The HTTP request to authenticate. Returns: AuthClaims|None. Claims about the currently signed in user. If no user is signed in, then returns None. """ claims = _verify_id_token(request.headers.get('Authorization', '')) auth_id = claims.get('sub', None) email = claims.get('email', None) role_is_super_admin = ( claims.get('role', None) == feconf.FIREBASE_ROLE_SUPER_ADMIN) if auth_id: return auth_domain.AuthClaims(auth_id, email, role_is_super_admin) return None
1e2aaf4f26b11defea65796331f160fd22267cf2
3,124
import io def decrypt(**kwargs): """ Returns a CryptoResult containing decrypted bytes. This function requires that 'data' is in the format generated by the encrypt functionality in this SDK as well as other OCI SDKs that support client side encryption. Note this function cannot decrypt data encrypted by the KMS 'encrypt' APIs. :param oci.encryption.MasterKeyProvider master_key_provider: (required) A MasterKeyProvider to use for decrypting the data. :param bytes data: (required) The data to be decrypted. If a string is passed, it will be converted to bytes using UTF-8 encoding. Note that this conversion will require creating a copy of the data which may be undesirable for large payloads. :rtype: oci.encryption.CryptoResult """ _ensure_required_kwargs_present(required_kwargs=['master_key_provider', 'data'], provided_kwargs=kwargs) # leaves input alone if it is alread bytes, otherwise converts to bytes using default encoding # this is for convenience of the caller, but will create a copy of the data if it is not already a # bytes-like object data = convert_to_bytes(kwargs.get('data')) # as long as we only read from the stream, BytesIO does not create a copy of the data so this doesn't # add memory overhead with io.BytesIO(data) as stream_to_decrypt: decryptor = StreamDecryptor( stream_to_decrypt=stream_to_decrypt, master_key_provider=kwargs.get('master_key_provider') ) return CryptoResult(data=decryptor.read(), encryption_context=decryptor.get_encryption_context())
ba27e39abd0c72db5acd2530b7a4128b3f073dc6
3,125
from typing import Union from typing import Tuple from typing import Dict from typing import Any import io def format_result(result: Union[Pose, PackedPose]) -> Tuple[str, Dict[Any, Any]]: """ :param: result: Pose or PackedPose object. :return: tuple of (pdb_string, metadata) Given a `Pose` or `PackedPose` object, return a tuple containing the pdb string and a scores dictionary. """ _pdbstring = io.to_pdbstring(result) _scores_dict = io.to_dict(result) _scores_dict.pop("pickled_pose", None) return (_pdbstring, _scores_dict)
15b3c6ce32a3a5ab860d045ba8679e0299f122f6
3,126
def str_array( listString): """ Becase the way tha Python prints an array is different from CPLEX, this function goes the proper CPLEX writing of Arrays :param listString: A list of values :type listString: List[] :returns: The String printing of the array, in CPLEX format :rtype: String """ ret = "{" for i in range(0, len(listString)-1): ret = ret + "\"" + listString[i] + "\"," ret = ret + "\"" + listString[i] + "\"}" return ret
46737bb05f310387d69be6516ebd90afd3d91b08
3,127
def read_gene_annos(phenoFile): """Read in gene-based metadata from an HDF5 file Args: phenoFile (str): filename for the relevant HDF5 file Returns: dictionary with feature annotations """ fpheno = h5py.File(phenoFile,'r') # Feature annotations: geneAnn = {} for key in fpheno['gene_info'].keys(): geneAnn[key] = fpheno['gene_info'][key][:] fpheno.close() return geneAnn
86e1b26a5600e1d52a3beb127ef8e7c3ac41721a
3,128
from typing import Optional import binascii def hex_xformat_decode(s: str) -> Optional[bytes]: """ Reverse :func:`hex_xformat_encode`. The parameter is a hex-encoded BLOB like .. code-block:: none "X'CDE7A24B1A9DBA3148BCB7A0B9DA5BB6A424486C'" Original purpose and notes: - SPECIAL HANDLING for BLOBs: a string like ``X'01FF'`` means a hex-encoded BLOB. Titanium is rubbish at BLOBs, so we encode them as special string literals. - SQLite uses this notation: https://sqlite.org/lang_expr.html - Strip off the start and end and convert it to a byte array: http://stackoverflow.com/questions/5649407 """ if len(s) < 3 or not s.startswith("X'") or not s.endswith("'"): return None return binascii.unhexlify(s[2:-1])
8f868d4bbd5b6843632f9d3420fe239f688ffe15
3,129
def threshold(data, direction): """ Find a suitable threshold value which maximizes explained variance of the data projected onto direction. NOTE: the chosen hyperplane would be described mathematically as $ x \dot direction = threshold $. """ projected_data = np.inner(data, direction) sorted_x = np.sort(projected_data) best_sep_index = explained_variance_list(sorted_x).argmax() return (sorted_x[best_sep_index] + sorted_x[best_sep_index + 1]) / 2
7fdab7f87c3c2e6d937da146ce5a27074ea92f52
3,130
def StrType_any(*x): """ Ignores all parameters to return a StrType """ return StrType()
d1faac14a91cd6149811a553113b25f34d5d4a54
3,131
import requests import os def _download(url, dest, timeout=30): """Simple HTTP/HTTPS downloader.""" # Optional import: requests is not needed for local big data setup. dest = os.path.abspath(dest) with requests.get(url, stream=True, timeout=timeout) as r: with open(dest, 'w+b') as data: for chunk in r.iter_content(chunk_size=0x4000): data.write(chunk) return dest
7fe29752866707e3bbcb4f5e5b8a97507a7b71f8
3,132
def height(tree): """Return the height of tree.""" if tree.is_empty(): return 0 else: return 1+ max(height(tree.left_child()),\ height(tree.right_child()))
a469216fc13ed99acfb1bab8db7e031acc759f90
3,133
def applyTelluric(model, tell_alpha=1.0, airmass=1.5, pwv=0.5): """ Apply the telluric model on the science model. Parameters ---------- model : model object BT Settl model alpha : float telluric scaling factor (the power on the flux) Returns ------- model : model object BT Settl model times the corresponding model """ # read in a telluric model wavelow = model.wave[0] - 10 wavehigh = model.wave[-1] + 10 #telluric_model = smart.getTelluric(wavelow=wavelow, wavehigh=wavehigh, alpha=alpha, airmass=airmass) telluric_model = smart.Model() telluric_model.wave, telluric_model.flux = smart.InterpTelluricModel(wavelow=wavelow, wavehigh=wavehigh, airmass=airmass, pwv=pwv) # apply the telluric alpha parameter telluric_model.flux = telluric_model.flux**(tell_alpha) #if len(model.wave) > len(telluric_model.wave): # print("The model has a higher resolution ({}) than the telluric model ({})."\ # .format(len(model.wave),len(telluric_model.wave))) # model.flux = np.array(smart.integralResample(xh=model.wave, # yh=model.flux, xl=telluric_model.wave)) # model.wave = telluric_model.wave # model.flux *= telluric_model.flux #elif len(model.wave) < len(telluric_model.wave): ## This should be always true telluric_model.flux = np.array(smart.integralResample(xh=telluric_model.wave, yh=telluric_model.flux, xl=model.wave)) telluric_model.wave = model.wave model.flux *= telluric_model.flux #elif len(model.wave) == len(telluric_model.wave): # model.flux *= telluric_model.flux return model
7eaf7cafe1f8b5f4c273858f289d2c1c3865680b
3,134
def max_power_rule(mod, g, tmp): """ **Constraint Name**: DAC_Max_Power_Constraint **Enforced Over**: DAC_OPR_TMPS Power consumption cannot exceed capacity. """ return ( mod.DAC_Consume_Power_MW[g, tmp] <= mod.Capacity_MW[g, mod.period[tmp]] * mod.Availability_Derate[g, tmp] )
2c1845253524a8383f2256a7d67a8231c2a69485
3,135
import os async def list_subs(request: Request): """ List all subscription objects """ # Check for master token if not request.headers.get("Master") == os.environ.get("MASTER_TOKEN"): raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid authentication token", ) subs = await engine.find(Subscription) return subs
226a7de95b6557d6ff006401190816ad2a5d0222
3,136
def check_archs( copied_libs, # type: Mapping[Text, Mapping[Text, Text]] require_archs=(), # type: Union[Text, Iterable[Text]] stop_fast=False, # type: bool ): # type: (...) -> Set[Union[Tuple[Text, FrozenSet[Text]], Tuple[Text, Text, FrozenSet[Text]]]] # noqa: E501 """Check compatibility of archs in `copied_libs` dict Parameters ---------- copied_libs : dict dict containing the (key, value) pairs of (``copied_lib_path``, ``dependings_dict``), where ``copied_lib_path`` is a library real path that has been copied during delocation, and ``dependings_dict`` is a dictionary with key, value pairs where the key is a path in the target being delocated (a wheel or path) depending on ``copied_lib_path``, and the value is the ``install_name`` of ``copied_lib_path`` in the depending library. require_archs : str or sequence, optional Architectures we require to be present in all library files in wheel. If an empty sequence, just check that depended libraries do have the architectures of the depending libraries, with no constraints on what these architectures are. If a sequence, then a set of required architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel architectures. If a string, then a standard architecture name as returned by ``lipo -info``, or the string "intel", corresponding to the sequence ``['i386', 'x86_64']``, or the string "universal2", corresponding to ``['x86_64', 'arm64']``. stop_fast : bool, optional Whether to give up collecting errors after the first Returns ------- bads : set set of length 2 or 3 tuples. A length 2 tuple is of form ``(depending_lib, missing_archs)`` meaning that an arch in `require_archs` was missing from ``depending_lib``. A length 3 tuple is of form ``(depended_lib, depending_lib, missing_archs)`` where ``depended_lib`` is the filename of the library depended on, ``depending_lib`` is the library depending on ``depending_lib`` and ``missing_archs`` is a set of missing architecture strings giving architectures present in ``depending_lib`` and missing in ``depended_lib``. An empty set means all architectures were present as required. """ if isinstance(require_archs, str): require_archs = _ARCH_LOOKUP.get(require_archs, [require_archs]) require_archs_set = frozenset(require_archs) bads = ( [] ) # type: List[Union[Tuple[Text, FrozenSet[Text]], Tuple[Text, Text, FrozenSet[Text]]]] # noqa: E501 for depended_lib, dep_dict in copied_libs.items(): depended_archs = get_archs(depended_lib) for depending_lib, install_name in dep_dict.items(): depending_archs = get_archs(depending_lib) all_required = depending_archs | require_archs_set all_missing = all_required.difference(depended_archs) if len(all_missing) == 0: continue required_missing = require_archs_set.difference(depended_archs) if len(required_missing): bads.append((depending_lib, required_missing)) else: bads.append((depended_lib, depending_lib, all_missing)) if stop_fast: return set(bads) return set(bads)
d500e0b89ca3edd4e76630a628d9e4d970fadbf1
3,137
def create_data_table(headers, columns, match_tol=20) -> pd.DataFrame: """Based on headers and column data, create the data table.""" # Store the bottom y values of all of the row headers header_tops = np.array([h.top for h in headers]) # Set up the grid: nrows by ncols nrows = len(headers) ncols = len(columns) + 1 # Initialize the grid grid = np.empty((nrows, ncols), dtype=object) grid[:, :] = "" # Default value # Add in the headers grid[:, 0] = [h.text for h in headers] # Loop over each column for col_num, xval in enumerate(columns): col = columns[xval] word_tops = np.array([w.top for w in col]) # Find closest row header for row_num, h in enumerate(headers): # Find closest word ot this row heasder word_diff = np.abs(word_tops - h.top) word_diff[word_diff > match_tol] = np.nan # Make sure the row header is vertically close enough if np.isnan(word_diff).sum() < len(word_diff): # Get the matching word for this row header notnull = ~np.isnan(word_diff) order = np.argsort(word_diff[notnull]) for word_index in np.where(notnull)[0][order]: word = col[word_index] # IMPORTANT: make sure this is the closest row header # Sometimes words will match to more than one header header_diff = np.abs(header_tops - word.top) header_index = np.argmin(header_diff) closest_header = headers[header_index] if closest_header == h: grid[row_num, col_num + 1] = col[word_index].text break return pd.DataFrame(grid)
56b1cb21afa7813138d03b56849b594e18664348
3,138
def interp2d_vis(model, model_lsts, model_freqs, data_lsts, data_freqs, flags=None, kind='cubic', flag_extrapolate=True, medfilt_flagged=True, medfilt_window=(3, 7), fill_value=None): """ Interpolate complex visibility model onto the time & frequency basis of a data visibility. See below for notes on flag propagation if flags is provided. Parameters: ----------- model : type=DataContainer, holds complex visibility for model keys are antenna-pair + pol tuples, values are 2d complex visibility with shape (Ntimes, Nfreqs). model_lsts : 1D array of the model time axis, dtype=float, shape=(Ntimes,) model_freqs : 1D array of the model freq axis, dtype=float, shape=(Nfreqs,) data_lsts : 1D array of the data time axis, dtype=float, shape=(Ntimes,) data_freqs : 1D array of the data freq axis, dtype=float, shape=(Nfreqs,) flags : type=DataContainer, dictionary containing model flags. Can also contain model wgts as floats and will convert to booleans appropriately. kind : type=str, kind of interpolation, options=['linear', 'cubic', 'quintic'] medfilt_flagged : type=bool, if True, before interpolation, replace flagged pixels with output from a median filter centered on each flagged pixel. medfilt_window : type=tuple, extent of window for median filter across the (time, freq) axes. Even numbers are rounded down to odd number. flag_extrapolate : type=bool, flag extrapolated data_lsts if True. fill_value : type=float, if fill_value is None, extrapolated points are extrapolated else they are filled with fill_value. Output: (new_model, new_flags) ------- new_model : interpolated model, type=DataContainer new_flags : flags associated with interpolated model, type=DataContainer Notes: ------ If the data has flagged pixels, it is recommended to turn medfilt_flagged to True. This runs a median filter on the flagged pixels and replaces their values with the results, but they remain flagged. This happens *before* interpolation. This means that interpolation near flagged pixels aren't significantly biased by their presence. In general, if flags are fed, flags are propagated if a flagged pixel is a nearest neighbor of an interpolated pixel. """ # make flags new_model = odict() new_flags = odict() # get nearest neighbor points freq_nn = np.array(list(map(lambda x: np.argmin(np.abs(model_freqs - x)), data_freqs))) time_nn = np.array(list(map(lambda x: np.argmin(np.abs(model_lsts - x)), data_lsts))) freq_nn, time_nn = np.meshgrid(freq_nn, time_nn) # get model indices meshgrid mod_F, mod_L = np.meshgrid(np.arange(len(model_freqs)), np.arange(len(model_lsts))) # raise warning on flags if flags is not None and medfilt_flagged is False: print("Warning: flags are fed, but medfilt_flagged=False. \n" "This may cause weird behavior of interpolated points near flagged data.") # ensure flags are booleans if flags is not None: if np.issubdtype(flags[list(flags.keys())[0]].dtype, np.floating): flags = DataContainer(odict(list(map(lambda k: (k, ~flags[k].astype(np.bool)), flags.keys())))) # loop over keys for i, k in enumerate(list(model.keys())): # get model array m = model[k] # get real and imag separately real = np.real(m) imag = np.imag(m) # median filter flagged data if desired if medfilt_flagged and flags is not None: # get extent of window along freq and time f_ext = int((medfilt_window[1] - 1) / 2.) t_ext = int((medfilt_window[0] - 1) / 2.) # set flagged data to nan real[flags[k]] *= np.nan imag[flags[k]] *= np.nan # get flagged indices f_indices = mod_F[flags[k]] l_indices = mod_L[flags[k]] # construct fill arrays real_fill = np.empty(len(f_indices), np.float) imag_fill = np.empty(len(f_indices), np.float) # iterate over flagged data and replace w/ medfilt for j, (find, tind) in enumerate(zip(f_indices, l_indices)): tlow, thi = tind - t_ext, tind + t_ext + 1 flow, fhi = find - f_ext, find + f_ext + 1 ll = 0 while True: # iterate until window has non-flagged data in it # with a max of 10 iterations if tlow < 0: tlow = 0 if flow < 0: flow = 0 r_med = np.nanmedian(real[tlow:thi, flow:fhi]) i_med = np.nanmedian(imag[tlow:thi, flow:fhi]) tlow -= 2 thi += 2 flow -= 2 fhi += 2 ll += 1 if not (np.isnan(r_med) or np.isnan(i_med)): break if ll > 10: break real_fill[j] = r_med imag_fill[j] = i_med # fill real and imag real[l_indices, f_indices] = real_fill imag[l_indices, f_indices] = imag_fill # flag residual nans resid_nans = np.isnan(real) + np.isnan(imag) flags[k] += resid_nans # replace residual nans real[resid_nans] = 0.0 imag[resid_nans] = 0.0 # propagate flags to nearest neighbor if flags is not None: f = flags[k][time_nn, freq_nn] # check f is boolean type if np.issubdtype(f.dtype, np.floating): f = ~(f.astype(np.bool)) else: f = np.zeros_like(real, bool) # interpolate interp_real = interpolate.interp2d(model_freqs, model_lsts, real, kind=kind, copy=False, bounds_error=False, fill_value=fill_value)(data_freqs, data_lsts) interp_imag = interpolate.interp2d(model_freqs, model_lsts, imag, kind=kind, copy=False, bounds_error=False, fill_value=fill_value)(data_freqs, data_lsts) # flag extrapolation if desired if flag_extrapolate: time_extrap = np.where((data_lsts > model_lsts.max() + 1e-6) | (data_lsts < model_lsts.min() - 1e-6)) freq_extrap = np.where((data_freqs > model_freqs.max() + 1e-6) | (data_freqs < model_freqs.min() - 1e-6)) f[time_extrap, :] = True f[:, freq_extrap] = True # rejoin new_model[k] = interp_real + 1j * interp_imag new_flags[k] = f return DataContainer(new_model), DataContainer(new_flags)
6ac4fad738691f470e36252fc7544e857c8fdca0
3,139
def eps_divide(n, d, eps=K.epsilon()): """ perform division using eps """ return (n + eps) / (d + eps)
2457e5fc4458521b4098cfb144b7ff07e163ba9c
3,140
import requests def get_mc_uuid(username): """Gets the Minecraft UUID for a username""" url = f"https://api.mojang.com/users/profiles/minecraft/{username}" res = requests.get(url) if res.status_code == 204: raise ValueError("Users must have a valid MC username") else: return res.json().get("id")
fceeb1d9eb096cd3e29f74d389c7c851422ec022
3,141
import os import netrc import trace def _resolve_credentials(fqdn, login): """Look up special forms of credential references.""" result = login if "$" in result: result = os.path.expandvars(result) if result.startswith("netrc:"): result = result.split(':', 1)[1] if result: result = os.path.abspath(os.path.expanduser(result)) accounts = netrc.netrc(result or None) account = accounts.authenticators(fqdn) if not account or not(account[0] or account[1]): raise dputhelper.DputUploadFatalException("Cannot find account for host %s in %s netrc file" % ( fqdn, result or "default")) # account is (login, account, password) user, pwd = account[0] or account[1], account[2] or "" result = "%s:%s" % (user, pwd) else: if result.startswith("file:"): result = os.path.abspath(os.path.expanduser(result.split(':', 1)[1])) with closing(open(result, "r")) as handle: result = handle.read().strip() try: user, pwd = result.split(':', 1) except ValueError: user, pwd = result, "" trace("Resolved login credentials to %(user)s:%(pwd)s", user=user, pwd='*' * len(pwd)) return result
0b2a31d23f2937d41a8c81e02b7a64cf013e5580
3,142
def api(default=None, api=None, **kwargs): """Returns the api instance in which this API function is being ran""" return api or default
3d636408914e2888f4dc512aff3f729512849ddf
3,143
def parse_json(data): """Parses the PupleAir JSON file, returning a Sensors protobuf.""" channel_a = [] channel_b = {} for result in data["results"]: if "ParentID" in result: channel_b[result["ParentID"]] = result else: channel_a.append(result) sensors = list(_parse_results(channel_a, channel_b)) return model_pb2.Sensors(sensors=sensors)
11ded094b71d6557cc7c1c7ed489cdcbfe881e0b
3,144
import logging import traceback def asynchronize_tornado_handler(handler_class): """ A helper function to turn a blocking handler into an async call :param handler_class: a tornado RequestHandler which should be made asynchronus :return: a class which does the same work on a threadpool """ class AsyncTornadoHelper(handler_class): """ A hollow wrapper class which runs requests asynchronously on a threadpool """ def _do_work_and_report_error(self, work): try: # call the "real" method from the handler_class work() except HTTPError as ex: # request handler threw uncaught error logging.error(traceback.format_exc()) # send http errors to client self.write(str(ex)) self.set_status(ex.status_code) except Exception: # request handler threw uncaught error logging.error(traceback.format_exc()) # send 500 error to client. Do not pass on error message self.write("500 Internal Server Error \n") self.set_status(500) finally: # finished needs to be reported from main tornado thread tornado.ioloop.IOLoop.instance().add_callback( # report finished to main tornado thread: lambda: self.finish() ) @asynchronous def get(self, path=None): # bind the "real" method from the handler_class to run in another thread blocking_method = lambda: self._do_work_and_report_error( lambda: handler_class.get(self, path)) # launch in another thread REQUEST_HANDLER_THREAD_POOL.run_as_asynch(blocking_method) @asynchronous def put(self, path=None): # bind the "real" method from the handler_class to run in another thread blocking_method = lambda: self._do_work_and_report_error( lambda: handler_class.put(self, path)) # launch in another thread REQUEST_HANDLER_THREAD_POOL.run_as_asynch(blocking_method) @asynchronous def post(self, path=None): # bind the "real" method from the handler_class to run in another thread blocking_method = lambda: self._do_work_and_report_error( lambda: handler_class.post(self, path)) # launch in another thread REQUEST_HANDLER_THREAD_POOL.run_as_asynch(blocking_method) # return the wrapped class instead of the original for Tornado to run asynchronously return AsyncTornadoHelper
0e7d3b46b199cdf1aa1a31a19ed3d54f0abbce16
3,145
def convert_single_example(ex_index, example: InputExample, tokenizer, label_map, dict_builder=None): """Converts a single `InputExample` into a single `InputFeatures`.""" # label_map = {"B": 0, "M": 1, "E": 2, "S": 3} # tokens_raw = tokenizer.tokenize(example.text) tokens_raw = list(example.text) labels_raw = example.labels # Account for [CLS] and [SEP] with "- 2" # The convention in BERT is: # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] label_ids = [] for token, label in zip(tokens_raw, labels_raw): tokens.append(token) label_ids.append(label_map[label]) input_ids = tokenizer.convert_tokens_to_ids(tokens) if dict_builder is None: input_dicts = np.zeros_like(tokens_raw, dtype=np.int64) else: input_dicts = dict_builder.extract(tokens) seq_length = len(tokens) assert seq_length == len(input_ids) assert seq_length == len(input_dicts) assert seq_length == len(label_ids) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. if ex_index < 1: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % example.guid) tf.logging.info("tokens: %s" % " ".join( [utils.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_dicts])) tf.logging.info("labels: %s" % " ".join([str(x) for x in example.labels])) tf.logging.info("labels_ids: %s" % " ".join([str(x) for x in label_ids])) feature = InputFeatures( input_ids=input_ids, input_dicts=input_dicts, label_ids=label_ids, seq_length=seq_length) return feature
3dca77aa191f821c9785c8431c4637e47582a588
3,146
def create_scenariolog(sdata, path, recording, logfilepath): """ シナリオのプレイデータを記録したXMLファイルを作成する。 """ element = cw.data.make_element("ScenarioLog") # Property e_prop = cw.data.make_element("Property") element.append(e_prop) e = cw.data.make_element("Name", sdata.name) e_prop.append(e) e = cw.data.make_element("WsnPath", sdata.fpath) e_prop.append(e) e = cw.data.make_element("RoundAutoStart", str(sdata.autostart_round)) e_prop.append(e) e = cw.data.make_element("NoticeInfoView", str(sdata.notice_infoview)) e_prop.append(e) e = cw.data.make_element("PartyEnvironment") if not sdata.party_environment_backpack: e.append(cw.data.make_element("Backpack", "Disable")) if len(e): e_prop.append(e) if cw.cwpy.setting.write_playlog: e = cw.data.make_element("LogFile", logfilepath) e_prop.append(e) if cw.cwpy.areaid >= 0: areaid = cw.cwpy.areaid elif cw.cwpy.pre_areaids: areaid = cw.cwpy.pre_areaids[0][0] else: areaid = 0 if not recording: e = cw.data.make_element("Debug", str(cw.cwpy.debug)) e_prop.append(e) e = cw.data.make_element("AreaId", str(areaid)) e_prop.append(e) e_music = cw.data.make_element("MusicPaths") for i, music in enumerate(cw.cwpy.music): if music.path.startswith(cw.cwpy.skindir): fpath = music.path.replace(cw.cwpy.skindir + "/", "", 1) else: fpath = music.path.replace(sdata.scedir + "/", "", 1) e = cw.data.make_element("MusicPath", fpath, attrs={"channel": str(music.channel), "volume": str(music.subvolume), "loopcount": str(music.loopcount), "inusecard": str(music.inusecard)}) e_music.append(e) e_prop.append(e_music) e = cw.data.make_element("Yado", cw.cwpy.ydata.name) e_prop.append(e) e = cw.data.make_element("Party", cw.cwpy.ydata.party.name) e_prop.append(e) # bgimages e_bgimgs = cw.data.make_element("BgImages") element.append(e_bgimgs) def make_colorelement(name, color): e = cw.data.make_element(name, attrs={"r": str(color[0]), "g": str(color[1]), "b": str(color[2])}) if 4 <= len(color): e.set("a", str(color[3])) else: e.set("a", "255") return e for bgtype, d in cw.cwpy.background.bgs: if bgtype == cw.sprite.background.BG_IMAGE: fpath, inusecard, scaledimage, mask, smoothing, size, pos, flag, visible, layer, cellname = d attrs = {"mask": str(mask), "visible": str(visible)} if cellname: attrs["cellname"] = cellname if smoothing <> "Default": attrs["smoothing"] = smoothing e_bgimg = cw.data.make_element("BgImage", attrs=attrs) if inusecard: e = cw.data.make_element("ImagePath", fpath, attrs={"inusecard":str(inusecard), "scaledimage": str(scaledimage)}) else: e = cw.data.make_element("ImagePath", fpath) e_bgimg.append(e) elif bgtype == cw.sprite.background.BG_TEXT: text, namelist, face, tsize, color, bold, italic, underline, strike, vertical, antialias,\ btype, bcolor, bwidth, loaded, updatetype, size, pos, flag, visible, layer, cellname = d attrs = {"visible": str(visible), "loaded": str(loaded)} if cellname: attrs["cellname"] = cellname e_bgimg = cw.data.make_element("TextCell", attrs=attrs) e = cw.data.make_element("Text", text) e_bgimg.append(e) e = cw.data.make_element("Font", face, attrs={"size": str(tsize), "bold": str(bold), "italic": str(italic), "underline": str(underline), "strike": str(strike)}) e_bgimg.append(e) e = cw.data.make_element("Vertical", str(vertical)) e_bgimg.append(e) e = cw.data.make_element("Antialias", str(antialias)) e_bgimg.append(e) e = make_colorelement("Color", color) e_bgimg.append(e) e = cw.data.make_element("UpdateType", updatetype) e_bgimg.append(e) if btype <> "None": e = cw.data.make_element("Bordering", attrs={"type": btype, "width": str(bwidth)}) e.append(make_colorelement("Color", bcolor)) e_bgimg.append(e) if namelist: e = cw.data.make_element("Names") for item in namelist: e_name = cw.data.make_element("Name", unicode(item.name))#PyLite:コモンのnumberがバグるためUnicodeに変換 if isinstance(item.data, cw.data.YadoData): e_name.set("type", "Yado") elif isinstance(item.data, cw.data.Party): e_name.set("type", "Party") elif isinstance(item.data, cw.character.Player) and item.data in cw.cwpy.get_pcards(): e_name.set("type", "Player") e_name.set("number", str(cw.cwpy.get_pcards().index(item.data)+1)) elif isinstance(item.data, cw.data.Flag): e_name.set("type", "Flag") e_name.set("flag", item.data.name) elif isinstance(item.data, cw.data.Step): e_name.set("type", "Step") e_name.set("step", item.data.name) elif isinstance(item.data, cw.data.Variant): e_name.set("type", "Variant") e_name.set("variant", item.data.name) e_name.set("valuetype", item.data.type) elif item.data == "Number": e_name.set("type", "Number") e.append(e_name) e_bgimg.append(e) elif bgtype == cw.sprite.background.BG_COLOR: blend, color1, gradient, color2, size, pos, flag, visible, layer, cellname = d attrs = {"visible": str(visible)} if cellname: attrs["cellname"] = cellname e_bgimg = cw.data.make_element("ColorCell", attrs=attrs) e = cw.data.make_element("BlendMode", blend) e_bgimg.append(e) e = make_colorelement("Color", color1) e_bgimg.append(e) if gradient <> "None": e = cw.data.make_element("Gradient", attrs={"direction": gradient}) e.append(make_colorelement("EndColor", color2)) e_bgimg.append(e) elif bgtype == cw.sprite.background.BG_PC: pcnumber, expand, smoothing, size, pos, flag, visible, layer, cellname = d attrs = {"visible": str(visible), "expand": str(expand)} if cellname: attrs["cellname"] = cellname if smoothing <> "Default": attrs["smoothing"] = smoothing e_bgimg = cw.data.make_element("PCCell", attrs=attrs) e = cw.data.make_element("PCNumber", str(pcnumber)) e_bgimg.append(e) else: assert bgtype == cw.sprite.background.BG_SEPARATOR e_bgimg = cw.data.make_element("Redisplay") e_bgimgs.append(e_bgimg) continue e = cw.data.make_element("Flag", flag) e_bgimg.append(e) e = cw.data.make_element("Location", attrs={"left": str(pos[0]), "top": str(pos[1])}) e_bgimg.append(e) e = cw.data.make_element("Size", attrs={"width": str(size[0]), "height": str(size[1])}) e_bgimg.append(e) if layer <> cw.LAYER_BACKGROUND: e = cw.data.make_element("Layer", str(layer)) e_bgimg.append(e) e_bgimgs.append(e_bgimg) # カード再配置情報 if cw.cwpy.sdata.moved_mcards: e_movedmcards = cw.data.make_element("MovedCards") for (cardgroup, index), (x, y, scale, layer) in cw.cwpy.sdata.moved_mcards.iteritems(): e_movedmcard = cw.data.make_element("MovedCard", attrs={"cardgroup":cardgroup, "index":str(index)}) e_movedmcard.append(cw.data.make_element("Location", attrs={"left":str(x), "top":str(y)})) if scale <> -1: e_movedmcard.append(cw.data.make_element("Size", attrs={"scale":str(scale)})) if layer <> -1: e_movedmcard.append(cw.data.make_element("Layer", str(layer))) if len(e_movedmcard): e_movedmcards.append(e_movedmcard) element.append(e_movedmcards) # flag e_flag = cw.data.make_element("Flags") element.append(e_flag) if cw.cwpy.setting.enable_equalbug: for name, flag in sdata.flags.iteritems(): if name.find(u"=") == -1: # BUG:PyLite :「=」を含む変数はセーブされない(1.50変数バグ) e = cw.data.make_element("Flag", name, {"value": str(flag.value)}) e_flag.append(e) else: for name, flag in sdata.flags.iteritems(): e = cw.data.make_element("Flag", name, {"value": str(flag.value)}) e_flag.append(e) # step e_step = cw.data.make_element("Steps") element.append(e_step) if cw.cwpy.setting.enable_equalbug: for name, step in sdata.steps.iteritems(): if name.find(u"=") == -1: # BUG:PyLite :「=」を含む変数はセーブされない(1.50変数バグ) e = cw.data.make_element("Step", name, {"value": str(step.value)}) e_step.append(e) else: for name, step in sdata.steps.iteritems(): e = cw.data.make_element("Step", name, {"value": str(step.value)}) e_step.append(e) # variant if sdata.variants: e_variant = cw.data.make_element("Variants") element.append(e_variant) for name, variant in sdata.variants.iteritems(): #PyLite:UnicodeErrorを回避 e = cw.data.make_element("Variant", name, {"type": variant.type, "value": unicode(variant.value)}) e_variant.append(e) if not recording: # gossip e_gossip = cw.data.make_element("Gossips") element.append(e_gossip) for key, value in sdata.gossips.iteritems(): e = cw.data.make_element("Gossip", key, {"value": str(value)}) e_gossip.append(e) # completestamps e_compstamp = cw.data.make_element("CompleteStamps") element.append(e_compstamp) for key, value in sdata.compstamps.iteritems(): e = cw.data.make_element("CompleteStamp", key, {"value": str(value)}) e_compstamp.append(e) # InfoCard e_info = cw.data.make_element("InfoCards") element.append(e_info) for resid in sdata.get_infocards(order=True): e = cw.data.make_element("InfoCard", str(resid)) e_info.append(e) # FriendCard e_cast = cw.data.make_element("CastCards") element.append(e_cast) for fcard in sdata.friendcards: e_cast.append(fcard.data.getroot()) if not recording: # DeletedFile e_del = cw.data.make_element("DeletedFiles") element.append(e_del) for fpath in sdata.deletedpaths: e = cw.data.make_element("DeletedFile", fpath) e_del.append(e) # LostAdventurer e_lost = cw.data.make_element("LostAdventurers") element.append(e_lost) for fpath in sdata.lostadventurers: e = cw.data.make_element("LostAdventurer", fpath) e_lost.append(e) # ファイル書き込み etree = cw.data.xml2etree(element=element) etree.write(path) return path
d55bee540361496d5a9d456be7a99d88b2f0dcf6
3,147
async def converter_self_interaction_target(client, interaction_event): """ Internal converter for returning the received interaction event's target. Applicable for context application commands. This function is a coroutine. Parameters ---------- client : ``Client`` The client who received the respective ``InteractionEvent``. interaction_event : ``InteractionEvent`` The received application command interaction. Returns ------- target : `None` or ``DiscordEntity`` The resolved entity if any. """ if interaction_event.type is not INTERACTION_TYPE_APPLICATION_COMMAND: return None return interaction_event.interaction.target
66975897b9f8a7d0b224f80d1827af3ea07eb51d
3,148
from typing import List def publication_pages(publication_index_page) -> List[PublicationPage]: """Fixture providing 10 PublicationPage objects attached to publication_index_page""" rv = [] for _ in range(0, 10): p = _create_publication_page( f"Test Publication Page {_}", publication_index_page ) rv.append(p) return rv
5d9b75bcbdc5c9485cc83ddc2befeb946f447227
3,149
from typing import List def rp_completion( rp2_metnet, sink, rp2paths_compounds, rp2paths_pathways, cache: rrCache = None, upper_flux_bound: float = default_upper_flux_bound, lower_flux_bound: float = default_lower_flux_bound, max_subpaths_filter: int = default_max_subpaths_filter, logger: Logger = getLogger(__name__) ) -> List[rpPathway]: """Process to the completion of metabolic pathways generated by RetroPath2.0 and rp2paths. (1) rp2paths generates a sets of master pathways which each of them is a set of chemical transformations. (2) Each chemical transformation refers to one or multiple reaction rule. (3) Each reaction rule comes from one or multiple template (original) chemical reaction The completion consists in: 1. exploring all possible metabolic pathways through steps (2) and (3) 2. putting back chemical species removed during reaction rules building process The completion is done for all master pathways of step (1). Parameters ---------- rp2_metnet: str Path to the file containing the metabolic network sink: str Path to the file containing the list of species in the sink rp2paths_compounds: str Path to the file containing the chemical species involved in master metabolic pathways rp2paths_pathways: str Path to the file containing the master metabolic pathways cache: rrCache, optional Cache that contains reaction rules data upper_flux_bound: float, optional Upper flux bound for all new reactions created (default: default_upper_flux_bound from Args file), lower_flux_bound: float, optional Lower flux bound for all new reactions created (default: default_lower_flux_bound from Args file), max_subpaths_filter: int, optional Number of pathways (best) kept per master pathway (default: 10) logger: Logger, optional Returns ------- List of rpPathway objects """ if cache is None: cache = rrCache( attrs=[ 'rr_reactions', 'template_reactions', 'cid_strc', 'deprecatedCompID_compid', ] ) ## READ __rp2paths_compounds_in_cache( infile=rp2paths_compounds, cache=cache, logger=logger ) pathways, transfos = __read_pathways( infile=rp2paths_pathways, logger=logger ) ec_numbers = __read_rp2_metnet( infile=rp2_metnet, logger=logger ) sink_molecules = __read_sink( infile=sink, logger=logger ) # COMPLETE TRANSFORMATIONS full_transfos = __complete_transformations( transfos=transfos, ec_numbers=ec_numbers, cache=cache, logger=logger ) # GENERATE THE COMBINATORY OF SUB-PATHWAYS # Build pathways over: # - multiple reaction rules per transformation (TRS) and # - multiple template reactions per reaction rule pathway_combinatorics = __build_pathway_combinatorics( full_transfos, pathways, cache=cache, logger=logger ) # BUILD + RANK SUB-PATHWAYS all_pathways = __build_all_pathways( pathways=pathway_combinatorics, transfos=full_transfos, sink_molecules=sink_molecules, rr_reactions=cache.get('rr_reactions'), compounds_cache=cache.get('cid_strc'), max_subpaths_filter=max_subpaths_filter, # compartment_id=compartment_id, lower_flux_bound=lower_flux_bound, upper_flux_bound=upper_flux_bound, logger=logger ) # # Return flat list of overall topX pathways # return sum( # [ # pathways # for pathways in all_pathways.values() # ], [])[:max_subpaths_filter] return all_pathways # for sub_pathways in all_pathways.values(): # for sub_pathway in sub_pathways: # print(sub_pathway) # from chemlite import Pathway # print(all_pathways) # for sub_pathways in all_pathways.values(): # for i in range(len(sub_pathways)): # for j in range(i+1, len(sub_pathways)): # if sub_pathways[i] == sub_pathways[j]: # print(f'Equality between {sub_pathways[i].get_id()} and {sub_pathways[j].get_id()}') # print() # print(Pathway._to_dict(all_pathways[1][0]))
ac7539d1d8f0f9388c9d6bef570362d62ec90414
3,150
import torch def wrap(func, *args, unsqueeze=False): """ Wrap a torch function so it can be called with NumPy arrays. Input and return types are seamlessly converted. """ args = list(args) for i, arg in enumerate(args): if type(arg) == np.ndarray: args[i] = torch.from_numpy(arg) if unsqueeze: args[i] = args[i].unsqueeze(0) result = func(*args) if isinstance(result, tuple): result = list(result) for i, res in enumerate(result): if type(res) == torch.Tensor: if unsqueeze: res = res.squeeze(0) result[i] = res.numpy() return tuple(result) elif type(result) == torch.Tensor: if unsqueeze: result = result.squeeze(0) result = result.numpy() return result else: return result
5a5491e2b911235d7bf858b19d0d32a9e8da20e6
3,151
def STOCHF(data, fastk_period=5, fastd_period=3, fastd_ma_type=0): """ Stochastic %F :param pd.DataFrame data: pandas DataFrame with open, high, low, close data :param int fastk_period: period used for K fast indicator calculation :param int fastd_period: period used for D fast indicator calculation :param int fastd_ma_type: fast D moving average type (0 simple, 1 exponential) :return pd.Series: with indicator data calculation results """ fn = Function('STOCHF') return fn(data, fastk_period=fastk_period, fastd_period=fastd_period, fastd_matype=fastd_ma_type)
3412a6832f54b2dfbaff7eb25de0f6644d914934
3,152
def playerid_reverse_lookup(player_ids, key_type=None): """Retrieve a table of player information given a list of player ids :param player_ids: list of player ids :type player_ids: list :param key_type: name of the key type being looked up (one of "mlbam", "retro", "bbref", or "fangraphs") :type key_type: str :rtype: :class:`pandas.core.frame.DataFrame` """ key_types = ('mlbam', 'retro', 'bbref', 'fangraphs', ) if not key_type: key_type = key_types[0] # default is "mlbam" if key_type not provided elif key_type not in key_types: raise ValueError( '[Key Type: {}] Invalid; Key Type must be one of "{}"'.format(key_type, '", "'.join(key_types)) ) table = get_lookup_table() key = 'key_{}'.format(key_type) results = table[table[key].isin(player_ids)] results = results.reset_index().drop('index', 1) return results
e5bbe46567d1c8e517020d9cb9f551249ea8f515
3,153
def get_delta_z(z, rest_ghz, ghz=None): """ Take a measured GHz value, and calculates the restframe GHz value based on the given z of the matched galaxy :param z: :param ghz: :return: """ # First step is to convert to nm rom rest frame GHz set_zs = [] for key, values in transitions.items(): if values[0] - 0.3 <= z <= values[1] + 0.3: sghz = values[2] * u.GHz # Gets the GHz of the CO line rest_ghz /= (z+1) set_z = np.round((sghz - rest_ghz)/ rest_ghz, 3) # (Freq_emitted - Freq_obs)/ Freq_obs = z set_z = z - set_z rest_ghz *= (z+1) print("Z: {} Set Z: {}".format(z, set_z)) set_zs.append((key, set_z)) set_z = np.min([np.abs(i[1]) for i in set_zs]) print(set_zs) print(set_z) for element in set_zs: if np.isclose(np.abs(element[1]),set_z): return element[1], element[0]
acb0069c56fb34aeaba302368131400f3c35d643
3,154
def hist_orientation(qval, dt): """ provided with quats, and time spent* in the direction defined by quat produces grouped by ra, dec and roll quaternions and corresponding time, spent in quats params: qval a set of quats stored in scipy.spatial.transfrom.Rotation class params: dt corresponding to the set of quaternions, set of time intervals duration (which sc spent in the dirrection defined by quaternion) return: exptime, qval - histogramed set of quaterninons with corresponding times """ oruniq, uidx, invidx = hist_quat(qval) exptime = np.zeros(uidx.size, np.double) np.add.at(exptime, invidx, dt) return exptime, qval[uidx]
bbdecc58a9a3dc248d68b73018cd5f1d803ddbfd
3,155
def tcpip(port=5555, debug=False): """ 切换到tcpip模式(网络模式) :param port: 端口(默认值5555) :param debug: 调试开关(默认关闭) :return: 不涉及 """ return adb_core.execute(f'tcpip {port}', debug=debug)
3794ceeff32c20a8f4a525b0083866a781973ec8
3,156
def proclamadelcaucacom_story(soup): """ Function to pull the information we want from Proclamadelcauca.com stories :param soup: BeautifulSoup object, ready to parse """ hold_dict = {} #text try: article_body = soup.find('div', attrs={"class": "single-entradaContent"}) maintext = [para.text.strip() for para in article_body.find_all('p')] hold_dict['maintext'] = '\n '.join(maintext).strip() except: article_body = None return hold_dict
e8bcf0faaa7731b71e7b9db33e454b422b3285bc
3,157
def asarray_fft(x, inverse): """Recursive implementation of the 1D Cooley-Tukey FFT using np asarray to prevent copying. Parameters: x (array): the discrete amplitudes to transform. inverse (bool): perform the inverse fft if true. Returns: x (array): the amplitudes of the original signal. OR X (complex number array): the phase and amplitude of the transformation. """ coef = 1 if inverse else -1 N = x.shape[0] # validating input array if np.log2(N) % 1 > 0: raise ValueError('array size must be a power of 2') # 32 was arbitrarily chosen as "good enough" elif N <= 32: return dft(x, inverse) # perform DFT on all N <= 32 sub-arrays else: even_terms = asarray_fft(x[::2], inverse) odd_terms = asarray_fft(x[1::2], inverse) exp_array = np.exp(coef * 2j * np.pi * np.arange(N) / N) return np.concatenate([even_terms + exp_array[:(N >> 1)] * odd_terms, even_terms + exp_array[(N >> 1):] * odd_terms])
87f86f8529f5c54535d9a188c454f762f96a7a58
3,158
import os def get_long_description(): """ Returns the long description of Wapyce. :return: The long description of Wapyce. :rtype: str """ with open( os.path.join(BASE_DIRECTORY, 'README.md'), 'r', encoding='utf-8' ) as readme_file: return readme_file.read()
f9dd5ba3cc94907b8b38a3bfacfd6c8e551b3c98
3,159
import jinja2 def wrap_locale_context(func): """Wraps the func with the current locale.""" @jinja2.contextfilter def _locale_filter(ctx, value, *args, **kwargs): doc = ctx['doc'] if not kwargs.get('locale', None): kwargs['locale'] = str(doc.locale) return func(value, *args, **kwargs) return _locale_filter
a2f720e9ed38eb2bf0f546ab392f295d969f7ab7
3,160
from numpy import loadtxt from scipy.interpolate import UnivariateSpline def mu_Xe(keV=12): """Returns inverse 1/e penetration depth [mm-1 atm-1] of Xe given the x-ray energy in keV. The transmission through a 3-mm thick slab of Xe at 6.17 atm (76 psi) was calculated every 100 eV over an energy range spanning 5-17 keV using: http://henke.lbl.gov/optical_constants/filter2.html This result was then converted to mu and saved as a tab-delimited text file. The returned result is calculated using a univariate spline and should be valid over the range 5-17 keV.""" E_mu = loadtxt('mu_Xe.txt',dtype=float,delimiter='\t') us_mu = UnivariateSpline(E_mu[:,0],E_mu[:,1],s=0) return us_mu(1000*keV)
5f106871e7517ef910739b74a13d2139e03ed480
3,161
import hashlib def file_md5(input_file): """ :param input_file: Path to input file. :type input_file: str :return: Returns the encoded data in the inputted file in hexadecimal format. """ with open(input_file, 'rb') as f: data = f.read() return hashlib.md5(data).hexdigest()
4a7ea12e3b5e0429787eb65e651852e49b40ecf7
3,162
def signup(request): """Register a new user. This view has multiple behaviours based on the request composition. When some user is already signed in, the response ask the user to first sign out. When the request has no data about the new user, then the response carries the registration form. When the request has valid data about the new user, the response informs him the registration success. Args: request (HttpRequest): the user request. Returns: HttpResponse: The response accordingly to the request composition. """ keys = request.POST.keys() if request.user.is_authenticated(): return render(request, 'account/signup_already_signedin.html', None) elif len(keys) == 0: return render(request, 'account/signup.html', None) else: username = request.POST.get('username') first_name = request.POST.get('first_name') last_name = request.POST.get('last_name') email = request.POST.get('email') password = request.POST.get('password') # confirm_password = request.POST.get('confirm_password') # TODO: Check password and confirmation. ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND) ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, settings.LDAP_CACERTFILE) l = ldap.initialize(secret.LDAP_URI) l.protocol_version = ldap.VERSION3 l.start_tls_s() l.simple_bind_s(secret.LDAP_BIND_DN, secret.LDAP_BIND_PASSWORD) dn = settings.LDAP_USER_DN_TEMPLATE % str(username) user = { 'cn': str(first_name), 'sn': str(last_name), 'mail': str(email), 'userPassword': str(utils.hashPassword(password)), 'objectClass': ['person', 'organizationalPerson', 'inetOrgPerson'] } ldif = modlist.addModlist(user) l.add_s(dn, ldif) l.unbind_s() context = {'username': username} return render(request, 'account/signup_successful.html', context)
f1534df25459396485ed83d96fd8e488d39b0925
3,163
from typing import Dict def message_args() -> Dict[str, str]: """A formatted message.""" return {"subject": "Test message", "message": "This is a test message"}
4d25d5c9f54aa0997f2e619f90eb6632717cf0d3
3,164
def to_drive_type(value): """Convert value to DriveType enum.""" if isinstance(value, DriveType): return value.value sanitized = str(value).upper().strip().replace(" ", "_") try: return DriveType[sanitized].value except KeyError as err: raise ValueError(f"Unknown drive type: {value}") from err
10183ac3ad15c2e01d9abf262f097dc6b366e7ab
3,165
def upload_authorized_key(host, port, filepath): """UPLOAD (key) upload_authorized_key""" params = {'method': 'upload_authorized_key'} files = [('key', filepath, file_get_contents(filepath))] return _check(https.client.https_post(host, port, '/', params, files=files))
68ada5d834ff77c4ee1b09815a6b094c30a42c1b
3,166
def thermalize_cutoff(localEnergies, smoothing_window, tol): """Return position where system is thermalized according to some tolerance tol, based on the derivative of the smoothed local energies """ mean = np.mean(localEnergies) smoothLocalEnergies = smoothing(localEnergies, smoothing_window) check_slope = derivative(smoothLocalEnergies) < tol cutoff = np.where(check_slope)[0][0] return cutoff
d72904596ab88298232e9c2ed0fac151e3e66a71
3,167
def annualize_metric(metric: float, holding_periods: int = 1) -> float: """ Annualize metric of arbitrary periodicity :param metric: Metric to analyze :param holding_periods: :return: Annualized metric """ days_per_year = 365 trans_ratio = days_per_year / holding_periods return (1 + metric) ** trans_ratio - 1
0c84816f29255d49e0f2420b17abba66e2387c99
3,168
import argparse import sys def parse_args(): """Command line arguments parser.""" app = argparse.ArgumentParser() app.add_argument("in_chain", help="Input chain file or stdin") app.add_argument("reference_2bit", help="Reference 2bit file") app.add_argument("query_2bit", help="Query 2bit file") app.add_argument("output", help="Output chain or stdout") app.add_argument("-linearGap", choices=['loose', 'medium', 'filename'], help="loose|medium|filename") app.add_argument("-scoreScheme", help="Read the scoring matrix from a blastz-format file") if len(sys.argv) < 5: app.print_help() sys.exit(0) args = app.parse_args() return args
a0ef04f4769e247dea8816b70807f10c7efd5571
3,169
def get_latest_tag(): """ Find the value of the latest tag for the Adafruit CircuitPython library bundle. :return: The most recent tag value for the project. """ global LATEST_BUNDLE_VERSION # pylint: disable=global-statement if LATEST_BUNDLE_VERSION == "": LATEST_BUNDLE_VERSION = get_latest_release_from_url( "https://github.com/adafruit/Adafruit_CircuitPython_Bundle/releases/latest" ) return LATEST_BUNDLE_VERSION
2195d2cde7e2ff67a110b1a1af2aa8cebad52294
3,170
def detail(request, name): """ List all details about a single service. """ service = CRITsService.objects(name=name, status__ne="unavailable").first() if not service: error = 'Service "%s" is unavailable. Please review error logs.' % name return render_to_response('error.html', {'error': error}, RequestContext(request)) # TODO: fix code so we don't have to do this service = service.to_dict() service_class = crits.service_env.manager.get_service_class(name) if user_is_admin(request.user): clean = False # Only show errors if the user is an admin. error = _get_config_error(service) else: # Clean all non-public values for a non-admin clean = True error = None # Replace the dictionary with a list (to preserve order the options # were defined in the Service class), and remove data from any which # are not editable) service['config_list'] = service_class.format_config(service['config'], clean=clean) del service['config'] return render_to_response('services_detail.html', {'service': service, 'config_error': error}, RequestContext(request))
bca02ed15926222899db625ba908075ca5c9a13c
3,171
def read_gold_conll2003(gold_file): """ Reads in the gold annotation from a file in CoNLL 2003 format. Returns: - gold: a String list containing one sequence tag per token. E.g. [B-Kochschritt, L-Kochschritt, U-Zutat, O] - lines: a list list containing the original line split at "\t" """ gold = [] lines = [] with open(gold_file, encoding="utf-8") as f: for line in f: if line == "\n": continue line = line.strip().split("\t") gold.append(line[3]) lines.append(line) return gold, lines
1e11513c85428d20e83d54cc2fa2d42ddd903341
3,172
import functools def translate_nova_exception(method): """Transforms a cinder exception but keeps its traceback intact.""" @functools.wraps(method) def wrapper(self, ctx, *args, **kwargs): try: res = method(self, ctx, *args, **kwargs) except (nova_exceptions.ConnectionRefused, keystone_exceptions.ConnectionError) as exc: err_msg = encodeutils.exception_to_unicode(exc) _reraise(exception.NovaConnectionFailed(reason=err_msg)) except (keystone_exceptions.BadRequest, nova_exceptions.BadRequest)as exc: err_msg = encodeutils.exception_to_unicode(exc) _reraise(exception.BadRequest(reason=err_msg)) except (keystone_exceptions.Forbidden, nova_exceptions.Forbidden): _reraise(exception.NotAuthorized()) except (keystone_exceptions.NotFound, nova_exceptions.NotFound) as exc: err_msg = encodeutils.exception_to_unicode(exc) _reraise(exception.InstanceNotFound(reason=err_msg)) return res return wrapper
186b7f944b03073c758b4af5f4ccfcaa80e8f5e8
3,173
def _update_form(form): """ """ if not form.text(): return form.setStyleSheet(error_css) return form.setStyleSheet(success_css)
94e241a98aa6c8305965d4149f4d60e28843aea7
3,174
import torch from torch_adapter import TorchAdapter from openvino.inference_engine import IEPlugin from openvino_adapter import OpenvinoAdapter from torch_adapter import TorchAdapter import importlib def create_adapter(openvino, cpu_only, force_torch, use_myriad): """Create the best adapter based on constraints passed as CLI arguments.""" if use_myriad: openvino = True if cpu_only: raise Exception("Cannot run with both cpu-only and Myriad options") if force_torch and openvino: raise Exception("Cannot run with both Torch and OpenVINO") if not openvino: if importlib.util.find_spec("torch") is None: logger.info("Could not find Torch") openvino = True elif not cpu_only: if torch.cuda.is_available(): logger.info("Detected GPU / CUDA support") return TorchAdapter(False, DEFAULT_STYLE) else: logger.info("Failed to detect GPU / CUDA support") if not force_torch: if importlib.util.find_spec("openvino") is None: logger.info("Could not find Openvino") if openvino: raise Exception("No suitable engine") else: if not cpu_only and not use_myriad: try: IEPlugin("GPU") logger.info("Detected iGPU / clDNN support") except RuntimeError: logger.info("Failed to detect iGPU / clDNN support") cpu_only = True logger.info("Using OpenVINO") logger.info("CPU Only: %s", cpu_only) logger.info("Use Myriad: %s", use_myriad) adapter = OpenvinoAdapter(cpu_only, DEFAULT_STYLE, use_myriad=use_myriad) return adapter logger.info("Using Torch with CPU") return TorchAdapter(True, DEFAULT_STYLE)
4fd0fc51c7758d32a1eac4d86d1b5dc6b90d20b7
3,175
def _make_ecg(inst, start, stop, reject_by_annotation=False, verbose=None): """Create ECG signal from cross channel average.""" if not any(c in inst for c in ['mag', 'grad']): raise ValueError('Unable to generate artificial ECG channel') for ch in ['mag', 'grad']: if ch in inst: break logger.info('Reconstructing ECG signal from {}' .format({'mag': 'Magnetometers', 'grad': 'Gradiometers'}[ch])) picks = pick_types(inst.info, meg=ch, eeg=False, ref_meg=False) if isinstance(inst, BaseRaw): reject_by_annotation = 'omit' if reject_by_annotation else None ecg, times = inst.get_data(picks, start, stop, reject_by_annotation, True) elif isinstance(inst, BaseEpochs): ecg = np.hstack(inst.copy().crop(start, stop).get_data()) times = inst.times elif isinstance(inst, Evoked): ecg = inst.data times = inst.times return ecg.mean(0, keepdims=True), times
27d1ef6da9c9d491de4b9806c85528f1226b2c3d
3,176
def lorentzian(freq, freq0, area, hwhm, phase, offset, drift): """ Lorentzian line-shape function Parameters ---------- freq : float or float array The frequencies for which the function is evaluated freq0 : float The center frequency of the function area : float hwhm: float Half-width at half-max """ oo2pi = 1/(2*np.pi) df = freq - freq0 absorptive = oo2pi * area * np.ones(freq.shape[0])*(hwhm / (df**2 + hwhm**2)) dispersive = oo2pi * area * df/(df**2 + hwhm**2) return (absorptive * np.cos(phase) + dispersive * np.sin(phase) + offset + drift * freq)
2f9b2ede75773c2100941e16fd14210b1a85a453
3,177
def findTilt(root): """ :type root: TreeNode :rtype: int """ if root is None: return 0 return findTilt_helper(root)[1]
1338a704f754678f88dedaf5a968aa3bfe4ff17f
3,178
def generate_report(): """ Get pylint analization report and write it to file """ files = get_files_to_check() dir_path = create_report_dir() file_path = create_report_file(dir_path) config_opts = get_config_opts() pylint_opts = '--load-plugins pylint_flask' + config_opts pylint_stdout, pylint_stderr = epylint.py_run(files + ' ' + pylint_opts, return_std=True) with open(file_path, 'w+') as report: report.write(pylint_stdout.getvalue()) report.write(pylint_stderr.getvalue()) return True
655345a128847285712f683274637201ee264010
3,179
import os import pickle def read_files(year,month,day,station): """ """ doy,cdoy,cyyyy,cyy = ymd2doy(year,month,day) # i have a function for this .... rinexfile = station + cdoy + '0.' + cyy + 'o' navfilename = 'auto' + cdoy + '0.' + cyy + 'n' if os.path.isfile(rinexfile): print('rinexfile exists') else: print(rinexfile) print('get the rinex file') rinex_unavco(station, year, month, day) # organize the file names print('get the sp3 and clock file names') sp3file, cname = igsname(year,month,day) # define some names of files if os.path.isfile(navfilename): print('nav exists') else: print('get nav') navname,navdir,foundit = getnavfile(year,month,day) print('read in the broadcast ephemeris') ephemdata = myreadnav(navfilename) if os.path.isfile(cname): print('file exists') else: print('get the CODE clock file') codclock(year,month,day) pname = cname[0:9] + 'pckl' print('pickle', pname) # if file exists already if os.path.isfile(pname): print('read existing pickle file') f = open(pname, 'rb') [prns,ts,clks] = pickle.load(f) f.close() else: print('read and save as pickle') prns, ts, clks = readPreciseClock(cname) # and then save them f = open(pname, 'wb') pickle.dump([prns,ts,clks], f) f.close() if os.path.isfile(sp3file): print('sp3 exsts') else: print('get sp3') getsp3file(year,month,day) print('read in the sp3 file', sp3file) sweek, ssec, sprn, sx, sy, sz, sclock = read_sp3(sp3file) #20 print('len returned data', len(ephemdata), navfilename rinexpickle = rinexfile[0:11] + 'pclk' if os.path.isfile(rinexpickle): print('rinex pickle exists') f=open(rinexpickle,'rb') [obs,x,y,z]=pickle.load(f) f.close() else: print('read the RINEX file ', rinexfile) obs,x,y,z = myscan(rinexfile) print('save as pickle file') f=open(rinexpickle,'wb') pickle.dump([obs,x,y,z], f) f.close() return ephemdata, prns, ts, clks, sweek, ssec, sprn, sx, sy,sz,sclock,obs,x,y,z
4a3bb0344607fce34037cd4a50ac273fde166027
3,180
def make_trampoline(func_name): """ Create a main function that calls another function """ mod = ir.Module('main') main = ir.Procedure('main') mod.add_function(main) entry = ir.Block('entry') main.add_block(entry) main.entry = entry entry.add_instruction(ir.ProcedureCall(func_name, [])) entry.add_instruction(ir.Exit()) return mod
1dcaf61cbadde4fdd8e94958658ce8b1b69612f1
3,181
import os import sys def main(global_config, **settings): """Return a Pyramid WSGI application.""" if not settings.get('sqlalchemy.url'): try: settings['sqlalchemy.url'] = os.environ['BLOG2017_DB'] except KeyError: print('Required BLOG2017_DB not set in global os environ.') sys.exit() authentication_policy = AuthTktAuthenticationPolicy(os.environ.get('AUTH_STRING')) authorization_policy = ACLAuthorizationPolicy() config = Configurator(settings=settings, authentication_policy=authentication_policy, authorization_policy=authorization_policy) config.include('pyramid_jinja2') config.include('.models') config.include('.routes') config.scan() return config.make_wsgi_app()
19e2bd9e097fc812c980626183a4ea98e88697d0
3,182
def model_fields(dbo, baseuri=None): """Extract known fields from a BQ object, while removing any known from C{excluded_fields} @rtype: dict @return fields to be rendered in XML """ attrs = {} try: dbo_fields = dbo.xmlfields except AttributeError: # This occurs when the object is a fake DB objects # The dictionary is sufficient dbo_fields= dbo.__dict__ for fn in dbo_fields: fn = mapping_fields.get(fn, fn) # Skip when map is None if fn is None: continue # Map is callable, then call if callable(fn): fn, attr_val = fn(dbo, fn, baseuri) else: attr_val = getattr(dbo, fn, None) # Put value in attribute dictionary if attr_val is not None and attr_val!='': if isinstance(attr_val, basestring): attrs[fn] = attr_val else: attrs[fn] = str(attr_val) #unicode(attr_val,'utf-8') return attrs
59a07057dccb116cc4753a4973a3128ccc79c558
3,183
def get_bsj(seq, bsj): """Return transformed sequence of given BSJ""" return seq[bsj:] + seq[:bsj]
d1320e5e3257ae22ca982ae4dcafbd4c6def9777
3,184
from typing import Dict import warnings import math def sample(problem: Dict, N: int, calc_second_order: bool = True, skip_values: int = 0): """Generates model inputs using Saltelli's extension of the Sobol' sequence. Returns a NumPy matrix containing the model inputs using Saltelli's sampling scheme. Saltelli's scheme extends the Sobol' sequence in a way to reduce the error rates in the resulting sensitivity index calculations. If `calc_second_order` is False, the resulting matrix has ``N * (D + 2)`` rows, where ``D`` is the number of parameters. If `calc_second_order` is True, the resulting matrix has ``N * (2D + 2)`` rows. These model inputs are intended to be used with :func:`SALib.analyze.sobol.analyze`. If `skip_values` is > 0, raises a UserWarning in cases where sample sizes may be sub-optimal. The convergence properties of the Sobol' sequence requires ``N < skip_values`` and that both `N` and `skip_values` are base 2 (e.g., ``N = 2^n``). See discussion in [4] for context and information. If skipping values, one recommendation is that the largest possible `n` such that ``(2^n)-1 <= N`` is skipped (see [5]). Parameters ---------- problem : dict The problem definition N : int The number of samples to generate. Must be an exponent of 2 and < `skip_values`. calc_second_order : bool Calculate second-order sensitivities (default True) skip_values : int Number of points in Sobol' sequence to skip, ideally a value of base 2 (default 0, see Owen [3] and Discussion [4]) References ---------- .. [1] Saltelli, A., 2002. Making best use of model evaluations to compute sensitivity indices. Computer Physics Communications 145, 280–297. https://doi.org/10.1016/S0010-4655(02)00280-1 .. [2] Sobol', I.M., 2001. Global sensitivity indices for nonlinear mathematical models and their Monte Carlo estimates. Mathematics and Computers in Simulation, The Second IMACS Seminar on Monte Carlo Methods 55, 271–280. https://doi.org/10.1016/S0378-4754(00)00270-6 .. [3] Owen, A. B., 2020. On dropping the first Sobol' point. arXiv:2008.08051 [cs, math, stat]. Available at: http://arxiv.org/abs/2008.08051 (Accessed: 20 April 2021). .. [4] Discussion: https://github.com/scipy/scipy/pull/10844 https://github.com/scipy/scipy/pull/10844#issuecomment-673029539 .. [5] Johnson, S. G. Sobol.jl: The Sobol module for Julia https://github.com/stevengj/Sobol.jl """ # bit-shift test to check if `N` == 2**n if not ((N & (N-1) == 0) and (N != 0 and N-1 != 0)): msg = f""" Convergence properties of the Sobol' sequence is only valid if `N` ({N}) is equal to `2^n`. """ warnings.warn(msg) if skip_values > 0: M = skip_values if not ((M & (M-1) == 0) and (M != 0 and M-1 != 0)): msg = f""" Convergence properties of the Sobol' sequence is only valid if `skip_values` ({M}) is equal to `2^m`. """ warnings.warn(msg) n_exp = int(math.log(N, 2)) m_exp = int(math.log(M, 2)) if n_exp >= m_exp: msg = f"Convergence may not be valid as 2^{n_exp} ({N}) is >= 2^{m_exp} ({M})." warnings.warn(msg) D = problem['num_vars'] groups = _check_groups(problem) if not groups: Dg = problem['num_vars'] else: G, group_names = compute_groups_matrix(groups) Dg = len(set(group_names)) # Create base sequence - could be any type of sampling base_sequence = sobol_sequence.sample(N + skip_values, 2 * D) if calc_second_order: saltelli_sequence = np.zeros([(2 * Dg + 2) * N, D]) else: saltelli_sequence = np.zeros([(Dg + 2) * N, D]) index = 0 for i in range(skip_values, N + skip_values): # Copy matrix "A" for j in range(D): saltelli_sequence[index, j] = base_sequence[i, j] index += 1 # Cross-sample elements of "B" into "A" for k in range(Dg): for j in range(D): if (not groups and j == k) or (groups and group_names[k] == groups[j]): saltelli_sequence[index, j] = base_sequence[i, j + D] else: saltelli_sequence[index, j] = base_sequence[i, j] index += 1 # Cross-sample elements of "A" into "B" # Only needed if you're doing second-order indices (true by default) if calc_second_order: for k in range(Dg): for j in range(D): if (not groups and j == k) or (groups and group_names[k] == groups[j]): saltelli_sequence[index, j] = base_sequence[i, j] else: saltelli_sequence[index, j] = base_sequence[i, j + D] index += 1 # Copy matrix "B" for j in range(D): saltelli_sequence[index, j] = base_sequence[i, j + D] index += 1 saltelli_sequence = scale_samples(saltelli_sequence, problem) return saltelli_sequence
a3a356fd037b879c71cb6dc2e4751350857302e8
3,185
def standardize(table, option): """ standardize Z = (X - mean) / (standard deviation) """ if option == 'table': mean = np.mean(table) std = np.std(table) t = [] for row in table: t_row = [] if option != 'table': mean = np.mean(row) std = np.std(row) for i in row: if std == 0: t_row.append(0) else: t_row.append((i - mean)/std) t.append(t_row) return t
337ec0d22340ca74e54236e1cb39829eab8ad89b
3,186
def raw_input_nonblock(): """ return result of raw_input if has keyboard input, otherwise return None """ if _IS_OS_WIN32: return _raw_input_nonblock_win32() else: raise NotImplementedError('Unsupported os.')
90cc9febcaa4866334b69b19809565795a07de49
3,187
def get_batch_hard(draw_batch_size,hard_batchs_size,semihard_batchs_size,easy_batchs_size,norm_batchs_size,network,dataset,nb_classes, margin): """ Create batch of APN "hard" triplets Arguments: draw_batch_size -- integer : number of initial randomly taken samples hard_batchs_size -- interger : select the number of hardest samples to keep norm_batchs_size -- interger : number of random samples to add Returns: triplets -- list containing 3 tensors A,P,N of shape (hard_batchs_size+norm_batchs_size,w,h,c) """ X = dataset m, w, h = X[0].shape # c removed #Step 1 : pick a random batch to study studybatch = get_batch_random(draw_batch_size,dataset, nb_classes) #Step 2 : compute the loss with current network : d(A,P)-d(A,N). The alpha parameter here is omited here since we want only to order them studybatchloss = np.zeros((draw_batch_size)) #Compute embeddings for anchors, positive and negatives #print('*',studybatch[0][:,:,:].shape) A = network.predict(studybatch[0][:,:,:]) P = network.predict(studybatch[1][:,:,:]) N = network.predict(studybatch[2][:,:,:]) #Compute d(A,P)-d(A,N) # HARD studybatchloss = np.sqrt(np.sum(np.square(A-P),axis=1)) - np.sqrt(np.sum(np.square(A-N),axis=1)) #Sort by distance (high distance first) and take the hardest selection = np.argsort(studybatchloss)[::-1][:hard_batchs_size] #Compute d(A,N)-d(A,P) # EASY studybatchloss = -np.sqrt(np.sum(np.square(A-P),axis=1)) + np.sqrt(np.sum(np.square(A-N),axis=1)) #Sort by distance (high distance first) and take the EASIEST selection1 = np.argsort(studybatchloss)[::-1][:easy_batchs_size] # #Compute d(A,N)-d(A,P) SEMI-HARD semihard_index1 = np.squeeze(np.where(np.sqrt(np.sum(np.square(A-P),axis=1)) + margin > np.sqrt(np.sum(np.square(A-N),axis=1)))) semihard_index2 = np.squeeze(np.where(np.sqrt(np.sum(np.square(A-P),axis=1)) < np.sqrt(np.sum(np.square(A-N),axis=1)))) semihard_index = np.intersect1d(semihard_index1,semihard_index2) selection2 = semihard_index[:semihard_batchs_size] # selection = np.append(selection,selection1) #Hard & Easy selection = np.append(selection,selection2) #Hard & Easy & SemiHard #Draw other random samples from the batch selection2 = np.random.choice(np.delete(np.arange(draw_batch_size),selection),norm_batchs_size,replace=False) selection = np.append(selection,selection2) #Hard & Easy & SemiHard & Random triplets = [studybatch[0][selection,:,:], studybatch[1][selection,:,:], studybatch[2][selection,:,:]] return triplets
da6dc7f69354b0b74b59717140c6c46826925050
3,188
import math def sine( start, end, freq, amp: Numeric = 1, n_periods: Numeric = 1 ) -> TimeSerie: """ Generate a sine TimeSerie. """ index = pd.date_range(start=start, end=end, freq=freq) return TimeSerie( index=index, y_values=np.sin( np.linspace(0, 2 * math.pi * n_periods, num=len(index)) ) * amp, )
df4254f9fafcb61f0bcf492edf1847d89f4debb0
3,189
def get_from_address(sending_profile, template_from_address): """Get campaign from address.""" # Get template display name if "<" in template_from_address: template_display = template_from_address.split("<")[0].strip() else: template_display = None # Get template sender template_sender = template_from_address.split("@")[0].split("<")[-1] # Get sending profile domain if type(sending_profile) is dict: sp_from = sending_profile["from_address"] else: sp_from = sending_profile.from_address sp_domain = sp_from.split("<")[-1].split("@")[1].replace(">", "") # Generate from address if template_display: from_address = f"{template_display} <{template_sender}@{sp_domain}>" else: from_address = f"{template_sender}@{sp_domain}" return from_address
8617d2b793b76456cb7d1a17168f27fd1d548e6d
3,190
import ctypes def is_dwm_compositing_enabled(): """Is Desktop Window Manager compositing (Aero) enabled. """ enabled = ctypes.c_bool() try: DwmIsCompositionEnabled = ctypes.windll.dwmapi.DwmIsCompositionEnabled except (AttributeError, WindowsError): # dwmapi or DwmIsCompositionEnabled is not present return False rval = DwmIsCompositionEnabled(ctypes.byref(enabled)) return rval == 0 and enabled.value
9b31b3ef62d626008d2b6c6ef59446be79da89f6
3,191
def fgsm(x, y_true, y_hat, epsilon=0.075): """Calculates the fast gradient sign method adversarial attack Following the FGSM algorithm, determines the gradient of the cost function wrt the input, then perturbs all the input in the direction that will cause the greatest error, with small magnitude. """ loss = tf.nn.softmax_cross_entropy_with_logits( labels=y_true, logits=y_hat) grad, = tf.gradients(loss, x) scaled_grad = epsilon * tf.sign(grad) return tf.stop_gradient(x + scaled_grad)
a71d2042ea1f5efa0a3f6409836da52bf323aa5c
3,192
def tour_delete(request,id): """ delete tour depending on id """ success_message, error_message = None, None form = TourForm() tour = get_object_or_404(Tour, id=id) tours = Tour.objects.all() if request.method=="POST": tour.delete() success_message = "deleted tour" else: error_message = "to delete tour" context = { 'form': form, 'tours': tours, 'success_message': success_message, 'error_message': error_message, 'user_info': Employee.objects.get(employee_id=request.user.username), 'cart': Cart.objects.filter(created_by__employee_id=request.user.username).count, } return render(request, 'employee/tour_add.html', context)
c42e355734444d858555ad627f202f73161cbedf
3,193
import random def d3(): """Simulate the roll of a 3 sided die""" return random.randint(1, 3)
c2eac44bb36b7e35c66894bce3467f568a735ca1
3,194
import os def split_file_name(file_name): """ splits the name from the file name. :param file_name: :return: """ return os.path.splitext(file_name)[0]
f13609671ca6d9c562cef7b371147bd89d8815c6
3,195
import subprocess def run_as_root(command, printable=True, silent_start=False): """ General purpose wrapper for running a subprocess as root user """ sudo_command = "sudo {}".format(command) return run_command(sudo_command, error_msg="", stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=None, printable=printable, silent_start=silent_start)
b5abedfe6ffe4d6e5182f1cd69b8132b29755d97
3,196
def _search_qr(model, identifier, session): """Search the database using a Query/Retrieve *Identifier* query. Parameters ---------- model : pydicom.uid.UID Either *Patient Root Query Retrieve Information Model* or *Study Root Query Retrieve Information Model* for C-FIND, C-GET or C-MOVE. identifier : pydicom.dataset.Dataset The request's *Identifier* dataset. session : sqlalchemy.orm.session.Session The session we are using to query the database. Returns ------- list of db.Instance The Instances that match the query. """ # Will raise InvalidIdentifier if check failed _check_identifier(identifier, model) if model in _PATIENT_ROOT: attr = _PATIENT_ROOT[model] else: attr = _STUDY_ROOT[model] # Hierarchical search method: C.4.1.3.1.1 query = None for level, keywords in attr.items(): # Keywords at current level that are in the identifier keywords = [kw for kw in keywords if kw in identifier] # Create query dataset for only the current level and run it ds = Dataset() [setattr(ds, kw, getattr(identifier, kw)) for kw in keywords] query = build_query(ds, session, query) if level == identifier.QueryRetrieveLevel: break return query.all()
29fe8831b1e44a381202b48212ff7c40c4c8d7fd
3,197
import json def retrieve_zoom_metadata( stage=None, zoom_api=None, file_key=None, log=None, **attributes ): """General function to retrieve metadata from various Zoom endpoints.""" if "id" in attributes: api_response = zoom_api(id=attributes["id"]) elif "meeting_id" in attributes: api_response = zoom_api(meeting_id=attributes["meeting_id"]) log.debug( stage, reason="Received Zoom", response=api_response, response_content=api_response.content, ) api_content = json.loads(api_response.content) if not api_response.ok: reason = api_content["message"] if "message" in api_content else "unknown" log.error(stage, reason=reason, response=api_response.content) raise RuntimeError(f"Retrieve Zoom meeting details failed: {reason}") if file_key: s3_object = s3.Object(RECORDINGS_BUCKET, file_key) response = s3_object.put( Body=json.dumps(api_content), ContentType="application/json" ) log.debug(stage, reason="Put meeting details", response=response) log.info(stage, reason="Meeting details", details=api_content) return api_content
68c92e00693cf1deb4153bd71cd15046642a7c7d
3,198
def find_max_value(binary_tree): """This function takes a binary tree and returns the largest value of all the nodes in that tree with O(N) space and O(1) time using breadth first traversal while keeping track of the largest value thus far in the traversal """ root_node = [] rootnode.push(binary_tree.root) output = [] # helper function def is_Null(current_value): """this is a helper function to check if the value of all nodes in breadth first traversal have null values which means we have gone off the bottom depth of the tree and returns a boolean""" return current_value == null def _walk(input_list): """This is the recursive function in our breadth first traversal which implements a queue without the queue class this function returns the value of each node until all node values are returned; the base case is when all values of the nodes are null, which means we have gone off the bottom depth of the tree """ counter = 0 largest_value = 0 newNodes = [] while counter < len(input_list): if input_list[counter]: if input_list[counter].value > largest_value: largest_value = input_list[counter] print('new value: ', input_list[counter]) output.push(input_list[counter]) newNodes.push(input_list[counter].left) newNodes.push(input_list[counter].right) print('newNodes: ', len(newNodes), '\n', newNodes) if not all is_Null(newNodes): _walk(newNodes) _walk(root_node) return 'largest value ' + largest_value
a797ea1598195cfcfe1abf00d73562c59617ad9b
3,199