content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def definstance(name, ty, expr): """ Arguments: - `name`: a string - `ty`: a type of the form ClassName(t1,...,tn) """ root, _ = root_app(root_clause(ty)) if root.info.is_class: class_name = root.name c = defexpr(name, expr, type=ty, unfold=[class_name]) conf.current_ctxt().class_instances[name] = c.type conf.current_ctxt().hyps[name] = c.type return c else: raise Exception("Error in definition of {0!s}:"\ "expected {1!s} to be a class name"\ .format(name, root))
1b692a9ac49bc6a68568ee232e6e516f83b64adf
20,309
from typing import List def read_plaintext_inputs(path: str) -> List[str]: """Read input texts from a plain text file where each line corresponds to one input""" with open(path, 'r', encoding='utf8') as fh: inputs = fh.read().splitlines() print(f"Done loading {len(inputs)} inputs from file '{path}'") return inputs
27b00f4dfcdf4d76e04f08b6e74c062f2f7374d0
20,311
def extract_tool_and_dsname_from_name(row): """ Extract Basecall (MB1.6K) into Basecall and MB1.6K, and 1600 three fields :param instr: :return: """ try: toolname, dsname = row['name'].strip().split(' ') dsname = dsname[1:-1] except: # No tag process toolname = row['name'].strip() dsname = 'None' if not dsname.startswith('MB'): # not tagged with MB dsname = 'None' reads = 0 else: reads = float(dsname[2:-1]) duration = parse_string_time_to_seconds(row['duration']) realtime = parse_string_time_to_seconds(row['realtime']) cpu = float(row['%cpu'][:-1]) peak_rss = parse_mem_str_to_gbsize(row['peak_rss']) peak_vmem = parse_mem_str_to_gbsize(row['peak_vmem']) rchar = parse_mem_str_to_gbsize(row['rchar']) wchar = parse_mem_str_to_gbsize(row['wchar']) return toolname, dsname, reads, duration, realtime, cpu, peak_rss, peak_vmem, rchar, wchar
f02b106da47544c522499af1ee8670870749fb20
20,312
def pyramid_pooling(inputs, layout='cna', filters=None, kernel_size=1, pool_op='mean', pyramid=(0, 1, 2, 3, 6), flatten=False, name='psp', **kwargs): """ Pyramid Pooling module. """ shape = inputs.get_shape().as_list() data_format = kwargs.get('data_format', 'channels_last') static_shape = np.array(shape[1: -1] if data_format == 'channels_last' else shape[2:]) dynamic_shape = tf.shape(inputs)[1: -1] if data_format == 'channels_last' else tf.shape(inputs)[2:] axis = -1 if data_format == 'channels_last' else 1 num_channels = shape[axis] if filters is None: filters = num_channels // len(pyramid) with tf.variable_scope(name): layers = [] for level in pyramid: if level == 0: x = inputs else: # Pooling if None not in static_shape: x = _static_pyramid_pooling(inputs, static_shape, level, pool_op, name='pool-%d' % level) upsample_shape = static_shape else: x = _dynamic_pyramid_pooling(inputs, level, pool_op, num_channels, data_format) upsample_shape = dynamic_shape # Conv block to set number of feature maps x = ConvBlock(layout=layout, filters=filters, kernel_size=kernel_size, name='conv-%d' % level, **kwargs)(x) # Output either vector with fixed size or tensor with fixed spatial dimensions if flatten: x = tf.reshape(x, shape=(-1, level*level*filters), name='reshape-%d' % level) concat_axis = -1 else: x = Upsample(layout='b', shape=upsample_shape, name='upsample-%d' % level, **kwargs)(x) concat_axis = axis layers.append(x) x = tf.concat(layers, axis=concat_axis, name='concat') return x
1484a686791cd017a53d182994e19b333ffc00b3
20,313
def df(r, gamma): """ divergence-free function """ eta = soft_threshold(r, gamma) return eta - np.mean(eta != 0) * r
bf4d0a5d8bcbb5fa80b66d1dd555f15a44117319
20,314
def clip_3d_liang_barsky(zmin, zmax, p0, p1): """Clips the three-dimensional line segment in the canonial view volume by the algorithm of Liang and Barsky. Adapted from James D. Foley, ed., __Computer Graphics: Principles and Practice__ (Reading, Mass. [u.a.]: Addison-Wesley, 1998), 274 as well as http://www.eecs.berkeley.edu/Pubs/TechRpts/1992/CSD-92-688.pdf. Parameters ---------- zmin, zmax : float p0, p1 : array (size 3) of float the endpoints to be clipped (in-place operation) Returns ------- is_visible : bool """ x0, y0, z0 = p0 x1, y1, z1 = p1 # test for a trivial reject if (x0 > z0 and x1 > z1) or (y0 > z0 and y1 > z1) or \ (x0 < -z0 and x1 < -z1) or (y0 < -z0 and y1 < -z1) or \ (z0 < zmin) and (z1 < zmin) or (z0 > zmax and z1 > zmax): return False tmin_tmax = np.array((0.0, 1.0)) dx = x1 - x0 dz = z1 - z0 if clip_t(-dx - dz, x0 + z0, tmin_tmax): # right side if clip_t(dx - dz, -x0 + z0, tmin_tmax): # left side # if we got this far, part of the line is in -z <= x <= z dy = y1 - y0 if clip_t(dy - dz, -y0 + z0, tmin_tmax): # bottom if clip_t(-dy - dz, y0 + z0, tmin_tmax): # top # line is in -z <= x <= z, -z <= y <= z if clip_t(-dz, z0 - zmin, tmin_tmax): # front if clip_t(dz, zmax - z0, tmin_tmax): # back # part of the line is visible in -z <= x <= z, # -z <= y <= z, -1 <= z <= zmin tmin, tmax = tmin_tmax if tmax < 1: p1[0] = x0 + tmax * dx p1[1] = y0 + tmax * dy p1[2] = z0 + tmax * dz if tmin > 0: p0[0] += tmin * dx p0[1] += tmin * dy p0[2] += tmin * dz return True return False
2ffcb60a4b2b13344f2255f5d5d1816199c666a4
20,315
def RunLatencyTest(sending_vm, receiving_vm, use_internal_ip=True): """Run the psping latency test. Uses a TCP request-response time to measure latency. Args: sending_vm: the vm to send the tcp request. receiving_vm: the vm acting as the server. use_internal_ip: whether or not to use the private IP or the public IP. Returns: list of samples representing latency between the two VMs. """ server_ip = (receiving_vm.internal_ip if use_internal_ip else receiving_vm.ip_address) client_command = ( 'cd {psping_exec_dir}; ' 'sleep 2;' # sleep to make sure the server starts first. '.\\psping.exe /accepteula -l {packet_size} -i 0 -q ' '-n {rr_count} -h {bucket_count} {ip}:{port}' ' > {out_file}').format( psping_exec_dir=sending_vm.temp_dir, packet_size=FLAGS.psping_packet_size, rr_count=FLAGS.psping_rr_count, bucket_count=FLAGS.psping_bucket_count, ip=server_ip, port=TEST_PORT, out_file=PSPING_OUTPUT_FILE) # PSPing does not have a configurable timeout. To get around this, start the # server as a background job, then kill it after 10 seconds server_command = ( '{psping_exec_dir}\\psping.exe /accepteula -s 0.0.0.0:{port};').format( psping_exec_dir=sending_vm.temp_dir, port=TEST_PORT) process_args = [(_RunPsping, (receiving_vm, server_command), {}), (_RunPsping, (sending_vm, client_command), {})] background_tasks.RunParallelProcesses(process_args, 200, 1) cat_command = 'cd {psping_exec_dir}; cat {out_file}'.format( psping_exec_dir=sending_vm.temp_dir, out_file=PSPING_OUTPUT_FILE) output, _ = sending_vm.RemoteCommand(cat_command) return ParsePspingResults(output, sending_vm, receiving_vm, use_internal_ip)
5f235f0adefe988be564f8e6dce7edfd4f292be4
20,316
def get_descriptor_list(stackdriver): """Return a list of all the stackdriver custom metric descriptors.""" type_map = stackdriver.descriptor_manager.fetch_all_custom_descriptors( stackdriver.project) descriptor_list = type_map.values() descriptor_list.sort(compare_descriptor_types) return descriptor_list
9b4b16f3d3b0330a786db0310f889f8ac132cb32
20,317
def dankerize(string: str, upper_case_ratio=0.2) -> str: """ Transform a string to lower case, and randomly set some characters to upper case and return the result. string: the string to dankerize upper_case_ratio: the upper_case/letter ratio """ ret = "" for i in range(len(string)): if uniform(0, 1.0) <= upper_case_ratio: ret += string[i].upper() else: ret += string[i].lower() return ret
55f186104166b0804cadae2df5fa19deaf36473b
20,318
def distance_constraints_too_complex(wordConstraints): """ Decide if the constraints on the distances between pairs of search terms are too complex, i. e. if there is no single word that all pairs include. If the constraints are too complex and the "distance requirements are strict" flag is set, the query will find some invalid results, so further (slow) post-filtering is needed. """ if wordConstraints is None or len(wordConstraints) <= 0: return False commonTerms = None for wordPair in wordConstraints: if commonTerms is None: commonTerms = set(wordPair) else: commonTerms &= set(wordPair) if len(commonTerms) <= 0: return True return False
43429fd64dbf5fa118e2cbf1e381686e1a8518c9
20,319
def greedy_search(model, decoding_function, initial_ids, initial_memories, int_dtype, float_dtype, max_prediction_length, batch_size, eos_id, do_sample, time_major): """ Greedily decodes the target sequence conditioned on the output of the encoder and the current output prefix. """ # Declare time-dimension time_dim = int(not time_major) # i.e. 0 if time_major, 1 if batch_major # Define the 'body for the tf.while_loop() call def _decoding_step(current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories): """ Defines a single step of greedy decoding. """ # Propagate through decoder step_logits, memories = decoding_function(next_ids, current_time_step, memories) step_logits = model.sampling_utils.adjust_logits(step_logits) # Calculate log probabilities for token prediction at current time-step step_scores = tf.nn.log_softmax(step_logits) # Determine next token to be generated, next_ids has shape [batch_size] if do_sample: next_ids = tf.squeeze(tf.multinomial(step_scores, num_samples=1, output_dtype=int_dtype), axis=1) else: # Greedy decoding next_ids = tf.argmax(step_scores, -1, output_type=int_dtype) # Collect scores associated with the selected tokens score_coordinates = tf.stack([tf.range(batch_size, dtype=int_dtype), next_ids], axis=1) decoded_score += tf.gather_nd(step_scores, score_coordinates) # Concatenate newly decoded token ID with the previously decoded ones decoded_ids = tf.concat([decoded_ids, tf.expand_dims(next_ids, 1)], 1) # Extend next_id's dimensions to be compatible with input dimensionality for the subsequent step next_ids = tf.expand_dims(next_ids, time_dim) # Check if generation has concluded with <EOS> # all_finished |= tf.equal(tf.squeeze(next_ids, axis=time_dim), eos_id) all_finished |= tf.equal(tf.reduce_prod(decoded_ids - eos_id, axis=time_dim), eos_id) return current_time_step + 1, all_finished, next_ids, decoded_ids, decoded_score, memories # Define the termination condition for the tf.while_loop() call def _continue_decoding(_current_time_step, _all_finished, *_): """ Returns 'False' if all of the sequences in the generated sequence batch exceeded the maximum specified length or terminated with <EOS>, upon which the while loop is exited. """ continuation_check = \ tf.logical_and(tf.less(_current_time_step, max_prediction_length), tf.logical_not(tf.reduce_all(_all_finished))) return continuation_check # Initialize decoding-relevant variables and containers current_time_step = tf.constant(1) all_finished = tf.fill([batch_size], False) # None of the sequences is marked as finished next_ids = initial_ids decoded_ids = tf.zeros([batch_size, 0], dtype=int_dtype) # Sequence buffer is empty decoded_score = tf.zeros([batch_size], dtype=float_dtype) memories = initial_memories # Execute the auto-regressive decoding step via while loop _, _, _, decoded_ids, log_scores, memories = \ tf.while_loop(cond=_continue_decoding, body=_decoding_step, loop_vars=[current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories], shape_invariants=[tf.TensorShape([]), tf.TensorShape([None]), tf.TensorShape([None, None]), tf.TensorShape([None, None]), tf.TensorShape([None]), get_memory_invariants(memories)], parallel_iterations=10, swap_memory=False, back_prop=False) # Should return logits also, for training return decoded_ids, log_scores
3324a45ce13181ea55c8588e497864526272475d
20,320
import time def database_mostcited(response: Response, request: Request=Query(None, title=opasConfig.TITLE_REQUEST, description=opasConfig.DESCRIPTION_REQUEST), morethan: int=Query(15, title=opasConfig.TITLE_CITED_MORETHAN, description=opasConfig.DESCRIPTION_CITED_MORETHAN), period: str=Query('5', title="Period (5, 10, 20, or all)", description=opasConfig.DESCRIPTION_MOST_CITED_PERIOD), pubperiod: int=Query(None, title=opasConfig.TITLE_PUBLICATION_PERIOD, description=opasConfig.DESCRIPTION_PUBLICATION_PERIOD), author: str=Query(None, title=opasConfig.TITLE_AUTHOR, description=opasConfig.DESCRIPTION_AUTHOR), title: str=Query(None, title=opasConfig.TITLE_TITLE, description=opasConfig.DESCRIPTION_TITLE), sourcename: str=Query(None, title=opasConfig.TITLE_SOURCENAME, description=opasConfig.DESCRIPTION_SOURCENAME), sourcecode: str=Query(None, title=opasConfig.TITLE_SOURCECODE, description=opasConfig.DESCRIPTION_SOURCECODE), sourcetype: str=Query(None, title=opasConfig.TITLE_SOURCETYPE, description=opasConfig.DESCRIPTION_PARAM_SOURCETYPE), abstract:bool=Query(False, title="Return an abstract with each match", description="True to return an abstract"), stat:bool=Query(False, title="Return minimal information", description="True to return minimal information for statistical tables"), limit: int=Query(10, title=opasConfig.TITLE_LIMIT, description=opasConfig.DESCRIPTION_LIMIT), offset: int=Query(0, title=opasConfig.TITLE_OFFSET, description=opasConfig.DESCRIPTION_OFFSET) ): """ ## Function <b>Return a list of documents for a SourceCode source (and optional year specified in query params).</b> If you don't request abstracts returned, document permissions will not be checked or returned. This is intended to speed up retrieval, especially for returning large numbers of articles (e.g., for downloads.) Note: The GVPi implementation does not appear to support the limit and offset parameter ## Return Type models.DocumentList ## Status This endpoint is working. ## Sample Call /v1/Database/MostCited/ ## Notes ## Potential Errors """ time.sleep(.25) ocd, session_info = opasAPISupportLib.get_session_info(request, response) # session_id = session_info.session_id #print ("in most cited") # return documentList ret_val, ret_status = opasAPISupportLib.database_get_most_cited( period=period, more_than=morethan, publication_period=pubperiod, author=author, title=title, source_name=sourcename, source_code=sourcecode, source_type=sourcetype, # see VALS_SOURCE_TYPE (norm_val applied in opasCenralDBLib) abstract_requested=abstract, req_url=request.url, limit=limit, offset=offset, session_info=session_info ) if isinstance(ret_val, models.ErrorReturn): raise HTTPException( status_code=ret_val.httpcode, detail = ret_val.error + " - " + ret_val.error_description ) else: status_message = opasCentralDBLib.API_STATUS_SUCCESS status_code = 200 # Don't record in final build - (ok for now during testing) ocd, session_info = opasAPISupportLib.get_session_info(request, response) ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_DATABASE_MOSTCITED, session_info=session_info, params=request.url._url, return_status_code = status_code, status_message=status_message ) #print ("out mostcited") return ret_val
b18d3a3674cb5a52b7d3ea05db985774d7d25a4c
20,321
def format_event_leef(event): """Format an event as QRadar / LEEF""" syslog_header = f'<13>1 {event["actionTime"]} {hostname}' leef_header = f'LEEF:2.0|TrinityCyber|PTI|1|{event.pop("id")}|xa6|' fields = dict() fields["devTime"] = event.pop("actionTime") fields[ "devTimeFormat" ] = "yyyy-MM-dd'T'HH:mm:ss.SSSXXX" # (e.g. 2022-04-25T00:01:19.109+00:00) # LEEF-standard fields if "source" in event: fields["src"] = event.pop("source") if "destination" in event: fields["dst"] = event.pop("destination") if "sourcePort" in event: fields["srcPort"] = event.pop("sourcePort") if "destinationPort" in event: fields["dstPort"] = event.pop("destinationPort") if "transportProtocol" in event: fields["proto"] = event.pop("transportProtocol") # Formula-related metadata formula_metadata = event.pop("formula") fields["tcFormulaId"] = formula_metadata["formulaId"] fields["tcFormulaTitle"] = formula_metadata["title"] for key, value in formula_metadata["tags"].items(): key = "tcFormula" + key.title().replace(" ", "") fields[key] = value # Application / protocol related data for app_fields in event.pop("applicationData"): for key, value in app_fields.items(): if value is None: continue if isinstance(value, str): # Escape delimiter value = value.replace("\xa6", "\\\xa6") fields[key] = value # Add any extra information from the query fields.update(event) fields_formatted = "\xa6".join([f"{key}={value}" for key, value in fields.items()]) return f"{syslog_header} {leef_header}{fields_formatted}"
ca463c9e86d6b7880e992aa11cd4b6ae7592dab4
20,322
def _file_path(ctx, val): """Return the path of the given file object. Args: ctx: The context. val: The file object. """ return val.path
7c930f2511a0950e29ffc327e85cf9b2b3077c02
20,323
def Mapping_Third(Writelines, ThirdClassDict): """ :param Writelines: 将要写入的apk的method :param ThirdClassDict: 每一个APK对应的第三方的字典 :return: UpDateWritelines """ UpDateWriteLines = [] for l in Writelines: if l.strip() in list(ThirdClassDict.keys()): UpDateWriteLines.extend(ThirdClassDict[l.strip()]) else: UpDateWriteLines.extend([l]) return UpDateWriteLines
eb94db36d06104007cbbacf8884cf6d45fee46b5
20,324
def rotate_points_around_origin(points, origin, angle): """ Rotate a 2D array of points counterclockwise by a given angle around a given origin. The angle should be given in degrees. """ angle = angle * np.pi / 180 ox, oy = origin.tolist() new_points = np.copy(points) new_points[:, 0] = ox + np.cos(angle) * ( points[:, 0] - ox) - np.sin(angle) * (points[:, 1] - oy) new_points[:, 1] = oy + np.sin(angle) * ( points[:, 0] - ox) + np.cos(angle) * (points[:, 1] - oy) return new_points
0e21c2a9d6c870202935f8dbd9e725d9586670c3
20,325
def get_names_to_aliases(inp) -> dict: """ Returns pair, - out[0] = dictionary of names to sets of aliases - out[1] = erros when calling names_to_links, i.e., when file-reading @param inp: string vault directory or names_to_links dictionary if string then get_names_to_links method is used """ if type(inp) is str: inp = get_names_to_links(inp) # now inp must be names_to_links_pair out = dict() for filename, dict_links_to_aliases in inp[0].items(): for link_filename, set_of_aliases in dict_links_to_aliases.items(): try: out[link_filename].update(set_of_aliases) except KeyError: out[link_filename] = set(set_of_aliases) return [out,inp[1]]
9648099dc8422abceb5c095191e90f3dad14c4fb
20,326
def length(vec): """ Length of a given vector. If vec is an scalar, its length is 1. Parameters ---------- vec: scalar or arr Input vector Returns ------- length: int Length of vec. If vec is an scalar, its length is 1. """ if np.ndim(vec)==0: length=1 else: length=len(vec) return length
d8baea0b5f5e0bdc30b9e5a5d76b06cd876c87ba
20,327
def build_eval_infeeds(params): """Create the TPU infeed ops.""" eval_size = get_eval_size(params) num_eval_steps = eval_size // params.eval_batch_size dev_assign = params.device_assignment host_to_tpus = {} for replica_id in range(params.num_replicas): host_device = dev_assign.host_device(replica=replica_id, logical_core=0) tpu_ordinal = dev_assign.tpu_ordinal(replica=replica_id, logical_core=0) if host_device not in host_to_tpus: host_to_tpus[host_device] = [tpu_ordinal] else: assert tpu_ordinal not in host_to_tpus[host_device] host_to_tpus[host_device].append(tpu_ordinal) infeed_ops = [] infeed_graphs = [] num_inputs = len(host_to_tpus) for i, (host, tpus) in enumerate(host_to_tpus.items()): infeed_graph = tf.Graph() infeed_graphs.append(infeed_graph) with infeed_graph.as_default(): def enqueue_fn(host_device=host, input_index=i, device_ordinals=tpus): """Docs.""" worker_infeed_ops = [] with tf.device(host_device): dataset = build_eval_dataset( params, batch_size=params.eval_batch_size // num_inputs, num_workers=num_inputs, worker_index=input_index) inputs = tf.data.make_one_shot_iterator(dataset).get_next() if params.use_xla_sharding and params.num_cores_per_replica > 1: inputs, partition_dims = pad_inputs_for_xla_sharding(params, inputs) num_splits = len(device_ordinals) if len(device_ordinals) > 1: inputs = [tf.split(v, num_splits, 0) for v in inputs] else: inputs = [[v] for v in inputs] q = tpu_feed._PartitionedInfeedQueue( number_of_tuple_elements=len(inputs), host_id=int(host_device.split('/task:')[-1].split('/')[0]), input_partition_dims=partition_dims, device_assignment=dev_assign) inputs = [[v[i] for v in inputs] for i in range(num_splits)] worker_infeed_ops.extend(q.generate_enqueue_ops(inputs)) else: num_splits = len(device_ordinals) if len(device_ordinals) > 1: inputs = [tf.split(v, num_splits, 0) for v in inputs] else: inputs = [[v] for v in inputs] input_shapes = [v[0].shape for v in inputs] for j, device_ordinal in enumerate(device_ordinals): worker_infeed_ops.append(tf.raw_ops.InfeedEnqueueTuple( inputs=[v[j] for v in inputs], shapes=input_shapes, device_ordinal=device_ordinal)) return worker_infeed_ops def _body(i): with tf.control_dependencies(enqueue_fn()): return i+1 infeed_op = tf.while_loop( lambda step: tf.less(step, tf.cast(num_eval_steps, step.dtype)), _body, [0], parallel_iterations=1, name='eval_infeed').op infeed_ops.append(infeed_op) return infeed_ops, infeed_graphs, eval_size
e62586dd8fe6358eaed9a0a2eaf43fd607c0323b
20,328
import json def get_featured_parks(request): """ Returns recommended parks as JSON """ featured_parks = Park.objects.filter(featured=True).prefetch_related('images') response = { 'featured_parks': [{'id': n.pk, 'name': n.name, 'image': n.thumbnail} for n in featured_parks] } return HttpResponse(json.dumps(response), mimetype='application/json')
a3b45fa5b467434bf375a46f420538e5d5d78688
20,329
from typing import Sequence from typing import Any def make_data_output(structures: Sequence[Artefact[bytes]]) -> Artefact[list[Any]]: """Take xyz structure from xtb and parse them to a list of dicts.""" def to_dict(xyz: bytes) -> dict[str, Any]: as_str = xyz.decode().strip() energy = float(as_str.splitlines()[1].split()[1]) return {"structure": as_str, "energy": energy} def sort_by_energy(*elements: dict[str, Any]) -> list[dict[str, Any]]: out = [el for el in elements] out = sorted(out, key=lambda x: x["energy"]) # type:ignore return out out = [] for s in structures: out += [f.morph(to_dict, s, out=Encoding.json)] # elements to dicts return f.reduce(sort_by_energy, *out)
ca9e7fd187ff98a9c9c5146e12f9bc8b0e1c0466
20,331
def total_angular_momentum(particles): """ Returns the total angular momentum of the particles set. >>> from amuse.datamodel import Particles >>> particles = Particles(2) >>> particles.x = [-1.0, 1.0] | units.m >>> particles.y = [0.0, 0.0] | units.m >>> particles.z = [0.0, 0.0] | units.m >>> particles.vx = [0.0, 0.0] | units.ms >>> particles.vy = [-1.0, 1.0] | units.ms >>> particles.vz = [0.0, 0.0] | units.ms >>> particles.mass = [1.0, .5] | units.kg >>> particles.total_angular_momentum() quantity<[0.0, 0.0, 1.5] m**2 * kg * s**-1> """ # equivalent to: # lx=(m*(y*vz-z*vy)).sum() # ly=(m*(z*vx-x*vz)).sum() # lz=(m*(x*vy-y*vx)).sum() return (particles.mass.reshape((-1,1)) *particles.position.cross(particles.velocity)).sum(axis=0)
8eca23b7b1a8fc8a7722543f9193f0e4a3397f24
20,332
def svn_repos_get_logs3(*args): """ svn_repos_get_logs3(svn_repos_t repos, apr_array_header_t paths, svn_revnum_t start, svn_revnum_t end, int limit, svn_boolean_t discover_changed_paths, svn_boolean_t strict_node_history, svn_repos_authz_func_t authz_read_func, svn_log_message_receiver_t receiver, apr_pool_t pool) -> svn_error_t """ return _repos.svn_repos_get_logs3(*args)
1dab074e8112e2be0d51709d5d3d93bfc11c8c7d
20,333
def insertTimerOnOutput (signal, type): """ Plug the signal sout of the return entity instead of `signal` to input signal to enable the timer. - param signal an output signal. - return an Timer entity. """ Timer = getTimerType (type) timer = Timer ("timer_of_" + signal.name) plug(signal, timer.sin) return timer
c8914227e112916ac67faf532babb0119abc502f
20,334
from typing import List def anomaly_metrics(contended_task_id: TaskId, contending_task_ids: List[TaskId]): """Helper method to create metric based on anomaly. uuid is used if provided. """ metrics = [] for task_id in contending_task_ids: uuid = _create_uuid_from_tasks_ids(contending_task_ids + [contended_task_id]) metrics.append(Metric( name='anomaly', value=1, labels=dict( contended_task_id=contended_task_id, contending_task_id=task_id, resource=ContendedResource.MEMORY_BW, uuid=uuid, type='contention' ), type=MetricType.COUNTER )) return metrics
df67e16af8b0c018da347c47a430fbe137a8c353
20,335
import logging import json def alarm(): """.""" if request.method == 'POST': response = {'message': 'POST Accepted'} logging.info('alarm POSTED!') data = request.data logging.info(data) string = json.dumps(data) producer.send('SIP-alarms', string.encode()) return response return ""
a4444c4bc3f761cfdeb98485c745b77e8227817e
20,336
def train_net(solver_prototxt, roidb, output_dir, pretrained_model=None, detection_pretrained_model =None, max_iters=40000): """Train a TD-CNN network.""" roidb = filter_roidb(roidb) sw = SolverWrapper(solver_prototxt, roidb, output_dir, pretrained_model=pretrained_model, detection_pretrained_model=detection_pretrained_model) print 'Solving...' model_paths = sw.train_model(max_iters) print 'done solving' return model_paths
af85a78e4a477ab55e949fa0d6f6d44400d1f62f
20,337
def get_druminst_order(x): """helper function to determine order of drum instruments relies on standard sequence defined in settings """ y = shared.get_inst_name(x + shared.octave_length + shared.note2drums) return shared.standard_printseq.index(y)
8cad6cd10487d51b6edd74444115d63b7e599641
20,338
def find_nearest_idx(array, value): """ Find index of value nearest to value in an array :param np.ndarray array: Array of values in which to look :param float value: Value for which the index of the closest value in `array` is desired. :rtype: int :return: The index of the item in `array` nearest to `value` """ return (np.abs(array - value)).argmin()
3eb48426bf01c625419bbf87893b7e877fd0538d
20,339
def hard_tanh(x): """Hard tanh function Arguments: x: Input value hard_tanh(x) = {-1, for x < -2, tanh(x), for x > -2 and x < 2 1, for x > 2 } returns value according to hard tanh function """ return tf.maximum( tf.cast(-1, tf.float32), tf.minimum(tf.cast(1, tf.float32), tf.cast(keras.backend.tanh(x) * 1.05, tf.float32)) )
3c93f09aaeb57ee9bf4d3ccdb2ebe790333f8f67
20,340
async def get_publications(publication_id: str, embedded: bool = False): """ Given a Publication ID, get the Publication record from metadata store. """ publication = await get_publication(publication_id, embedded) return publication
8c4c81776abc1d8268192c23b1dee6c8c10bcfb0
20,341
def RandomNormal(inp): """ Random normally distributed weight initialization. """ return np.random.randn(inp)
d6922748ece8ec880eec6ba1b701424cd6fdd149
20,342
def get_transceiver_description(sfp_type, if_alias): """ :param sfp_type: SFP type of transceiver :param if_alias: Port alias name :return: Transceiver decsription """ return "{} for {}".format(sfp_type, if_alias)
ccb29d937495e37bc41e6f2cf35747d2acfe0d47
20,343
def find_contiguous_set(target_sum: int, values: list[int]) -> list[int]: """Returns set of at least 2 contiguous values that add to target sum.""" i = 0 set_ = [] sum_ = 0 while sum_ <= target_sum: sum_ += values[i] set_.append(values[i]) if sum_ == target_sum and len(set_) >= 2: return set_ i += 1 return []
64b6c1f99946856a33a79fed3d43395a5a9c1000
20,344
def nesoni_report_to_JSON(reportified): """ Convert a nesoni nway.any file that has been reportified to JSON See: tables.rst for info on what is stored in RethinkDB :param reportified: the reportified nway.any file (been through nway_reportify()). This is essentially a list of tuples :returns: a list of JSON """ stats = {} parsed_list = [] for position in reportified: for elem in position: skip = False ref_id, pos, strain, old, ftype, new, evidence, cons, uncalled = elem ref_id = '.'.join(ref_id.split('.')[:-1]) # Initialise the stats... if strain not in stats: stats[strain] = 0 if new == old: # Have no change #dat = ["conserved"]+[None]*9 skip = True elif new == 'N': # Have an uncalled base #dat = ["uncalled"]+[None]*9 skip = True # Check for mixtures... elif ftype == "substitution" and new.find('-') != -1: # Deletion hidden in substitution ftype = 'deletion' dat = extract_consequences(cons, ftype) stats[strain] = stats[strain]+1 elif ftype == "substitution" and len(new) > 1: # Insertion hidden in substitution ftype = 'insertion' dat = extract_consequences(cons, ftype) stats[strain] = stats[strain]+1 elif ftype == "deletion" and new.find('-') == -1 and len(new) == 1: # Substitution hidden in deletions ftype = 'substitution' dat = extract_consequences(cons, ftype) stats[strain] = stats[strain]+1 elif ftype == "deletion" and new.find('-') == -1 and len(new) > 1: # Insertion hidden in deletions ftype = 'insertion' dat = extract_consequences(cons, ftype) stats[strain] = stats[strain]+1 elif ftype == "insertion" and new.find('-') != -1: # Deletion hidden in insertions ftype = 'deletion' dat = extract_consequences(cons, ftype) stats[strain] = stats[strain]+1 elif ftype == "insertion" and new.find('-') == -1 and len(new) == 1: # Substitution hidden in insertions ftype = 'substitution' dat = extract_consequences(cons, ftype) stats[strain] = stats[strain]+1 # We have the same change state across all strains else: dat = extract_consequences(cons, ftype) stats[strain] = stats[strain]+1 obs_count = parsers.parse_evidence(evidence) # Some simple tests the_classes = ['insertion', 'deletion', 'substitution'] if not skip: assert dat[0] in the_classes json = {"id": strain+'_'+ref_id+'_'+str(pos), "StrainID": strain, "Position": pos, "LocusTag": dat[2], "Class": dat[0], "SubClass": dat[1], "RefBase": old, "ChangeBase": new, "CDSBaseNum": dat[3], "CDSAANum": dat[4], "CDSRegion": dat[5], "RefAA": dat[6], "ChangeAA": dat[7], "Product": dat[8], "CorrelatedChange": dat[9], "Evidence": obs_count, "UncalledBlock": uncalled } parsed_list.append(json) return parsed_list, stats
b78d9e9c124104a4e4c634e8fc2926804a06629d
20,345
def DeferredLightInfoEnd(builder): """This method is deprecated. Please switch to End.""" return End(builder)
f2bad6ffea3170c53a13206ab43d8b7193ccb89d
20,346
def move_character(character: dict, direction_index=None, available_directions=None) -> tuple: """ Change character's coordinates. :param character: a dictionary :param direction_index: a non-negative integer, optional :param available_directions: a list of strings, optional :precondition: character keys must contain "X-coordinate" and "Y-coordinate" :precondition: character values must be integers :precondition: direction_index must be a non-negative integer validated by validate_option function or None :precondition: availabe_directions each item must be either "north", "south", "east" or "west", or None :postcondition: updates character X or Y coordinate based on direction choice if availabe_directions is not None :postcondition: makes character X or Y coordinate be equal to the previous coordinates :return: new character's coordinates as a tuple >>> protagonist = {"Y-coordinate": 1, "X-coordinate": 1, "Previous coordinates": (0, 1)} >>> move_character(protagonist, 0, ["south", "west"]) (2, 1) >>> protagonist = {"Y-coordinate": 1, "X-coordinate": 1, "Previous coordinates": (0, 1)} >>> move_character(protagonist) (0, 1) """ directions_dictionary = {"north": -1, "south": 1, "west": -1, "east": 1} if available_directions is not None: direction = available_directions[direction_index] character["Previous coordinates"] = character["Y-coordinate"], character["X-coordinate"] if direction in "north south": character["Y-coordinate"] += directions_dictionary[direction] else: character["X-coordinate"] += directions_dictionary[direction] else: character["Y-coordinate"] = character["Previous coordinates"][0] character["X-coordinate"] = character["Previous coordinates"][1] return character["Y-coordinate"], character["X-coordinate"]
cc5cc3115437d0dc4e9b7ba7845565ee8147be30
20,348
def expandednodeid_to_str(exnode): """SOPC_ExpandedNodeId or SOPC_ExpandedNodeId* to its str representation in the OPC-UA XML syntax.""" a = '' if exnode.ServerIndex: a += 'srv={};'.format(exnode.ServerIndex) nsu = string_to_str(ffi.addressof(exnode.NamespaceUri)) if nsu: a += 'nsu={};'.format(nsu) b = ffi.string(libsub.SOPC_NodeId_ToCString(ffi.addressof(exnode.NodeId))).decode() return a + b
062012a11128a42bbaeb8d7ff316a2a29a31928b
20,349
def scrap_insta_description(inst) -> str: """ Scrap description from instagram account HTML. """ description = inst.body.div.section.main.div.header.section.find_all( 'div')[4].span.get_text() return description
898fa0d1cb44606374b131b8b471178a22ab74ed
20,350
import time def merge_by_sim(track_sim_list, track_data_dic, track_list, reid_th): """ Merge by sim. Ref: https://stackoverflow.com/questions/30089675/clustering-cosine-similarity-matrix """ print('start clustering') merge_start_time = time.time() cost_matrix = get_cost_matrix(track_sim_list, track_data_dic, track_list) cluster_labels = AgglomerativeClustering(n_clusters=None, distance_threshold=reid_th, affinity='precomputed', linkage='average').fit_predict(cost_matrix) labels = get_match(cluster_labels) # print(merged_index_list) print('we have %d global tracks after merge, time for merge %.4f s' % (len(labels), time.time()-merge_start_time)) # get real data valid_global_list = [] valid_count = 0 for person_track_list in labels: temp = [] for index in person_track_list: record_name = track_list[index] temp.append(record_name) if len(temp) > 1: cameras = set([t[0] for t in temp]) if len(cameras) > 1: valid_count += 1 valid_global_list.append(temp) #clustered_list.append(temp) print(f'after merge, %d valid global ids are created: {valid_global_list}' % valid_count) return valid_global_list
68478cb1367dcd2ef8a2550c54ea57e265bbbdf5
20,351
def cycle(*args, **kargs): """ Returns the next cycle of the given list Everytime ``cycle`` is called, the value returned will be the next item in the list passed to it. This list is reset on every request, but can also be reset by calling ``reset_cycle()``. You may specify the list as either arguments, or as a single list argument. This can be used to alternate classes for table rows:: # In Myghty... % for item in items: <tr class="<% cycle("even", "odd") %>"> ... use item ... </tr> % #endfor You can use named cycles to prevent clashes in nested loops. You'll have to reset the inner cycle, manually:: % for item in items: <tr class="<% cycle("even", "odd", name="row_class") %> <td> % for value in item.values: <span style="color:'<% cycle("red", "green", "blue", name="colors") %>'"> item </span> % #endfor <% reset_cycle("colors") %> </td> </tr> % #endfor """ if len(args) > 1: items = args else: items = args[0] name = kargs.get('name', 'default') cycles = request_config().environ.setdefault('railshelpers.cycles', {}) cycle = cycles.setdefault(name, iterdict(items)) if cycles[name].get('items') != items: cycle = cycles[name] = iterdict(items) return cycle['iter'].next()
6a1606d5fc65eb690c4ccbad3c662dc671219502
20,353
def calc_rmsd(struct1, struct2): """ Basic rmsd calculator for molecules and molecular clusters. """ geo1 = struct1.get_geo_array() ele1 = struct1.elements geo2 = struct2.get_geo_array() ele2 = struct2.elements dist = cdist(geo1,geo2) idx1,idx2 = linear_sum_assignment(dist) geo1 = geo1[idx1] geo2 = geo2[idx2] rmsd = np.mean(np.linalg.norm(geo1 - geo2,axis=-1)) return rmsd
cb368f6e4edaf223194f1e965a0f926f33e72330
20,354
def validate_flat_dimension(d): """Return strue if a 'key:value' dimension is valid.""" key, _, val = d.partition(':') return validate_dimension_value(val) and validate_dimension_key(key)
ee663bbdc62dab3d247c09ee5950dc63dafcad15
20,355
def __get_from_imports(import_tuples): """ Returns import names and fromlist import_tuples are specified as (name, fromlist, ispackage) """ from_imports = [(tup[0], tup[1]) for tup in import_tuples if tup[1] is not None and len(tup[1]) > 0] return from_imports
28df8225ad9440386342c38657944cfe7ac3d3ca
20,357
def change_dt_utc_to_local(dt): """ change UTC date time to local time zone Europe/Paris """ return convert_utctime_to_timezone(dt,'%Y%m%dT%H%M%SZ','Europe/Paris','%Y%m%dT%H%M%S')
09b52fd15a4fd9512e05f0a2801927b7f8be385f
20,358
import logging import warnings def sarimax_ACO_PDQ_search(endo_var, exog_var_matrix, PDQS, searchSpace, options_ACO, low_memory=False, verbose=False): """ Searchs SARIMAX PDQ parameters. endo_var: is the principal variable. exog_var_matrix: is the matrix of exogenous variables. PDQS: list of pdqs parameters. EG: [1, 1, 1, 24]. searchSpace: is the space of search for the particles. E.G.: p = d = q = range(0, 2) searchSpace = [p, d, q] pso_particles: is the number of particles. pso_interations: is the number of interations. options_ACO: parametrization for ACO algorithm. E.G.: {'antNumber':2, 'antTours':1, 'alpha':2, 'beta':2, 'rho':0.5, 'Q':2} """ def SARIMAX_AICc(X, *args): endo = args[0][0] exog = args[0][1] param_seasonal = args[0][2] param = X[0:3] if param_seasonal[-1] < 0: param_seasonal[-1] = 1 mod = SARIMAX(endo, exog=exog, order=param, seasonal_order=param_seasonal, enforce_stationarity=False, enforce_invertibility=False) aicc = np.inf try: results = mod.fit(disp=False, low_memory=low_memory) aicc = results.aicc except: pass return aicc antNumber = options_ACO['antNumber'] antTours = options_ACO['antTours'] alpha = options_ACO['alpha'] beta = options_ACO['beta'] rho = options_ACO['rho'] Q = options_ACO['Q'] if verbose: logging.info("Original search Space: {0}".format(searchSpace)) warnings.filterwarnings("ignore") # specify to ignore warning messages ACOsearch = ACO(alpha, beta, rho, Q) best_result, _ = ACOsearch.optimize(antNumber, antTours, dimentionsRanges=searchSpace, function=SARIMAX_AICc, functionArgs=[endo_var, exog_var_matrix, PDQS], verbose=verbose) logging.info("BEST result: {0}.".format(best_result)) param = best_result param_seasonal = PDQS mod = SARIMAX(endo_var, exog=exog_var_matrix, order=param, seasonal_order=param_seasonal, enforce_stationarity=False, enforce_invertibility=False) results = mod.fit(disp=False) return results.aicc, best_result
acae883a3aaeb501646f121753924fb321f471e5
20,359
def get_index(x, value, closest=True): """Get the index of an array that corresponds to a given value. If closest is true, get the index of the value closest to the value entered. """ if closest: index = np.abs(np.array(x) - value).argsort()[0] else: index = list(x).index(value) return index
19dc68407d576492f25235fc1efcc79895d8cb3f
20,361
def process_spawn(window, args): """ Spawns a child process with its stdin/out/err wired to a PTY in `window`. `args` should be a list where the first item is the executable and the remaining will be passed to it as command line arguments. Returns a process object. """ return (yield Trap.PROCESS_SPAWN, window, args)
979599c767f3e391541e8fd02a168cd29659bcea
20,362
import json import re def insertTaskParams(taskParams, verbose=False, properErrorCode=False, parent_tid=None): """Insert task parameters args: taskParams: a dictionary of task parameters verbose: True to see verbose messages properErrorCode: True to get a detailed error code parent_tid: ID of the parent task returns: status code 0: communication succeeded to the panda server 255: communication failure tuple of return code, message from the server, and taskID if successful, or error message if failed 0: request is processed 1: duplication in DEFT 2: duplication in JEDI 3: accepted for incremental execution 4: server error """ # serialize taskParamsStr = json.dumps(taskParams) # instantiate curl curl = _Curl() curl.sslCert = _x509() curl.sslKey = _x509() curl.verbose = verbose # execute url = baseURLSSL + '/insertTaskParams' data = {'taskParams':taskParamsStr, 'properErrorCode':properErrorCode} if parent_tid: data['parent_tid'] = parent_tid status,output = curl.post(url,data) try: loaded_output = list(pickle_loads(output)) # extract taskID try: m = re.search('jediTaskID=(\d+)', loaded_output[-1]) taskID = int(m.group(1)) except Exception: taskID = None loaded_output.append(taskID) return status, loaded_output except Exception as e: errStr = dump_log("insertTaskParams", e, output) return EC_Failed, output+'\n'+errStr
baa9f0c783361cec17cec9f6259e4f856aa0121d
20,363
def read_domains(file_name): """ 读取域名存储文件,获取要探测的域名,以及提取出主域名 注意:若是不符合规范的域名,则丢弃 """ domains = [] main_domains = [] no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None) file_path = './unverified_domain_data/' with open(file_path+file_name,'r') as fp: for d in fp.readlines(): domain_tld = no_fetch_extract(d.strip()) tld, reg_domain = domain_tld.suffix, domain_tld.domain # 提取出顶级域名和主域名部分 if tld and reg_domain: main_domains.append(reg_domain+'.'+tld) domains.append(d.strip()) else: logger.logger.warning('域名%s不符合规范,不进行探测' % d.strip()) return domains, main_domains
856a7e7d93023bf59981d51a6210b988236a14a9
20,365
def determine_last_contact_incomplete(end_time_skyfield, events, times, antenna): """ gibt letzten Kontakt vervollständigt und Anzahl der an events in unvollständiger Folge zurück :param end_time_skyfield: skyfield time :param events: array of int :param times: array of skyfield times :param antenna: Antenna object :return: Contact object, int """ incomplete_event_sequence_end = list() # letztes Kontakt bei Ende des Analysezeitraums enden lassen incomplete_event_sequence_end.append(end_time_skyfield) # falls letztes Event Höchststand, dieses zu ende hinzufügen if events[-1] == int(SkyfieldEventTypes.Culminate): incomplete_event_sequence_end.append(times[-1]) # falls letztes Event in Liste Aufgehen, dieses zu Ende hinzufügen if events[-1 - len(incomplete_event_sequence_end)] == int(SkyfieldEventTypes.Rise): incomplete_event_sequence_end.append(times[-1 - len(incomplete_event_sequence_end)]) # Invertieren der Liste für chronologisch korrekte Reihenfolge incomplete_event_sequence_end.reverse() incomplete_contact_end = Contact(antenna) for time in incomplete_event_sequence_end: incomplete_contact_end.add_relative_position_by_skyfield_time(time) return incomplete_contact_end, len(incomplete_event_sequence_end)
8a234bed084fa829c7649de99b933a59c89cba5b
20,366
def lam_est(data, J, B, Q, L = 3, paras = [3, 20], n_trees = 200, include_reward = 0, fixed_state_comp = None, method = "QRF"): """ construct the pointwise cov lam (for both test stat and c.v.), by combine the two parts (estimated and observed) Returns ------- lam: (Q-1)-len list of four lam matrices (n * T-q * B) """ dx, da = data[0][0].shape[1], data[0][1].shape[1] if fixed_state_comp is not None: dx += 1 # generate uv rseed(0); npseed(0) if include_reward: uv = [randn(B, dx + 1), randn(B, dx + da)] else: uv = [randn(B, dx), randn(B, dx + da)] # estimate characteristic values (cross-fitting): phi_R, psi_R, phi_I, # psi_I estimated = cond_char_vaule_est(data = data, uv = uv, paras = paras, n_trees = n_trees, L = L, J = J, include_reward = include_reward, fixed_state_comp = fixed_state_comp, method = method) # ,obs_ys if paras == "CV_once": CV_paras = estimated return CV_paras else: estimated_cond_char = estimated # cos and sin in batch. (n*T*dx) * (dx* B) = n * T * B: # c_X,s_X,c_XA,s_XA observed_cond_char = obs_char(data = data, uv = uv, include_reward = include_reward, fixed_state_comp = fixed_state_comp) # combine the above two parts to get cond. corr. estimation. lam = lam_formula(estimated_cond_char, observed_cond_char, J, Q) return lam
3a1637f2e522e414e7ddc5c470310c1f2c460ce0
20,367
def initialize_all(y0, t0, t1, n): """ An initialization routine for the different ODE solving methods in the lab. This initializes Y, T, and h. """ if isinstance(y0, np.ndarray): Y = np.empty((n, y.size)).squeeze() else: Y = np.empty(n) # print y0 # print Y Y[0] = y0 T = np.linspace(t0, t1, n) h = float(t1 - t0) / (n - 1) return Y, T, h
552a92dd50aca926b1cb6c9e6aaafd1c1401b5c3
20,369
import random def _sequence_event(values, length, verb): """Returns sequence (finite product) event. Args: values: List of values to sample from. length: Length of the sequence to generate. verb: Verb in infinitive form. Returns: Instance of `probability.FiniteProductEvent`, together with a text description. """ del verb # unused samples = [random.choice(values) for _ in range(length)] events = [probability.DiscreteEvent([sample]) for sample in samples] event = probability.FiniteProductEvent(events) sequence = ''.join(str(sample) for sample in samples) event_description = 'sequence {sequence}'.format(sequence=sequence) return event, event_description
1addd21c6c39451ac29f9bcf7551f070884e9328
20,370
def ndarrayToQImage(img): """ convert numpy array image to QImage """ if img.dtype != 'uint8': raise ValueError('Only support 8U data') if img.dim == 3: t = QtGui.QImage.Format_RGB888 elif img.dim == 2: t = QtGui.QImage.Format_Grayscale8 else: raise ValueError('Only support 1 and 3 channel image') qimage = QtGui.QImage(img.data, img.shape[1], img.shape[0], img.strides[0], t) return qimage
a08207266b03adff2dd66421572a0905f9525844
20,371
from datetime import datetime import re import base64 import uuid def object_hook(dct, compile_re=False, ensure_tzinfo=True, encoding=None): """ Object hook used by hoplite_loads. This object hook can encode the dictionary in the right text format. For example, json.loads by default will decode '{'hey':'hey'}' into {u'hey':u'hey'} rather than {'hey':'hey'}. If encoding is set to utf-8, this object_hook can make '{'hey':'hey'}' decode to {'hey':'hey'} This object hook also decodes extended json types such as objectId and datetime objects. Datetime objects also have the option to be decoded with or without timezone information. :param dct: Dictionary this object hook is to operate on. :param ensure_tzinfo: Boolean deciding if timezone info should be added to decoded datetime objects :param encoding: choice of text decoding(unicode/utf-8, perhaps others) :return: """ if encoding: # Converts all keys and unicode values in the top layer of the current # dictionary to the desired encoding type. new_dct = {} for key, value in dct.iteritems(): if isinstance(key, unicode): key = key.encode(encoding) if isinstance(value, unicode): value = value.encode(encoding) new_dct[key] = value dct = new_dct if "$oid" in dct: return ObjectId(str(dct["$oid"])) if "$ref" in dct: return DBRef(dct["$ref"], dct["$id"], dct.get("$db", None)) if "$date" in dct: secs = float(dct["$date"]) / 1000.0 if ensure_tzinfo: return EPOCH_AWARE + datetime.timedelta(seconds=secs) else: # Avoid adding time zone info by default, unlike # bson.json_util.loads. If the developer really wants this, they # will have to specify it. return EPOCH_NAIVE + datetime.timedelta(seconds=secs) if "$regex" in dct: flags = 0 # PyMongo always adds $options but some other tools may not. for opt in dct.get("$options", ""): flags |= _RE_OPT_TABLE.get(opt, 0) if compile_re: return re.compile(dct["$regex"], flags) else: return Regex(dct["$regex"], flags) if "$minKey" in dct: return MinKey() if "$maxKey" in dct: return MaxKey() if "$binary" in dct: if isinstance(dct["$type"], int): dct["$type"] = "%02x" % dct["$type"] subtype = int(dct["$type"], 16) if subtype >= 0xffffff80: # Handle mongoexport values subtype = int(dct["$type"][6:], 16) return Binary(base64.b64decode(dct["$binary"].encode()), subtype) if "$code" in dct: return Code(dct["$code"], dct.get("$scope")) if "$uuid" in dct: return uuid.UUID(dct["$uuid"]) if "$undefined" in dct: return None if "$numberLong" in dct: return Int64(dct["$numberLong"]) if "$timestamp" in dct: tsp = dct["$timestamp"] return Timestamp(tsp["t"], tsp["i"]) return dct
bfe93f67813bda8e77e93a7ee33b6dc3bcbfe16a
20,372
import requests import pickle def scrape_sp500_tickers(): """Scrape the wikipedia page for the latest list of sp500 companies Returns: [pickle]: [list of sp500 companies] """ #set get file to look at wikipedia's list of sp500 companies resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies') soup = bs.BeautifulSoup(resp.text, 'lxml') table = soup.find('table', {'class': 'wikitable sortable'}) tickers = [] #cycle through wiki table to find get all of the stock tickers for row in table.findAll('tr')[1:]: ticker = row.findAll('td')[0].text tickers.append(ticker) #save to pickle to speed up process with open("sp500tickers.pickle","wb") as f: pickle.dump(tickers,f) print ('Scraping Complete') return tickers
0c318e84f488c93254256394b1fe5d57d58c81a5
20,374
def get_top_k_recs(user_reps, item_reps, k): """ For each user compute the `n` topmost-relevant items Args: user_reps (dict): representations for all `m` unique users item_reps (:obj:`np.array`): (n, d) `d` latent features for all `n` items k (int): no. of most relevant items Returns: item_recs ([[int]]): list of personalized recommendations for each user as lists of item IDs """ n_user = len(user_reps) item_recs = [] for u in range(n_user): user_embed = user_reps[u]['embed'] user_item_scores = np.dot(item_reps, user_embed) item_recs.append(list(np.argsort(user_item_scores)[::-1][:k])) return item_recs
e209b8794fe3d8f8002dbbe48d858b335171c2f5
20,375
def some_function(t): """Another silly function.""" return t + " python"
2bd8adc315e97409758f13b0f777ccd17eb4b820
20,377
def add_context_for_join_form(context, request): """ Helper function used by view functions below """ # If the client has already joined a market if 'trader_id' in request.session: # If trader is in database if Trader.objects.filter(id=request.session['trader_id']).exists(): trader = Trader.objects.get(id=request.session['trader_id']) # If trader has been removed from market if trader.removed_from_market: request.session['removed_from_market'] = True # If trader has been deleted from database else: request.session['removed_from_market'] = True # We add this market to the context to notify the client market = get_object_or_404( Market, market_id=request.session['market_id']) context['market'] = market return context
aad1b592c6d28d9a69ec97a8be9d46f942cb3d7b
20,379
def create_list(inner_type_info: CLTypeInfo) -> CLTypeInfoForList: """Returns CL type information for a list. :param CLTypeInfo inner_type_info: Type information pertaining to each element within list. """ return CLTypeInfoForList( typeof=CLType.LIST, inner_type_info=inner_type_info )
e74731984cc83172c60a79e1b1efe05d90a32342
20,380
def get_baseline(baseline_filename, plugin_filenames=None): """ :type baseline_filename: string :param baseline_filename: name of the baseline file :type plugin_filenames: tuple :param plugin_filenames: list of plugins to import :raises: IOError :raises: ValueError """ if not baseline_filename: return raise_exception_if_baseline_file_is_unstaged(baseline_filename) return SecretsCollection.load_baseline_from_string( _get_baseline_string_from_file( baseline_filename, ), plugin_filenames=plugin_filenames, )
f4318ee676e0c670f152feef0884a220eb1a38ac
20,381
def del_ind_purged(*args): """ del_ind_purged(ea) """ return _ida_nalt.del_ind_purged(*args)
80133af5acff0a9c284ec7894abd10bafa2671a1
20,382
import random import hmac def hash_password(password, salthex=None, reps=1000): """Compute secure (hash, salthex, reps) triplet for password. The password string is required. The returned salthex and reps must be saved and reused to hash any comparison password in order for it to match the returned hash. The salthex string will be chosen randomly if not provided, and if provided must be an even-length string of hexadecimal digits, recommended length 16 or greater. E.g. salt="([0-9a-z][0-9a-z])*" The reps integer must be 1 or greater and should be a relatively large number (default 1000) to slow down brute-force attacks.""" if not salthex: salthex = ''.join([ "%02x" % random.randint(0, 0xFF) for d in range(0,8) ]) salt = [] for p in range(0, len(salthex), 2): salt.append(int(salthex[p:p+2], 16)) salt = bytes(salt) if reps < 1: reps = 1 msg = password.encode() for r in range(0,reps): msg = hmac.HMAC(salt, msg, digestmod='MD5').hexdigest().encode() return (msg.decode(), salthex, reps)
cac468818560ed52b415157dde71d5416c34478c
20,383
from typing import Dict def create_scheduled_job_yaml_spec( descriptor_contents: Dict, executor_config: ExecutorConfig, job_id: str, event: BenchmarkEvent ) -> str: """ Creates the YAML spec file corresponding to a descriptor passed as parameter :param event: event that triggered this execution :param descriptor_contents: dict containing the parsed descriptor :param executor_config: configuration for the transpiler :param job_id: str :return: Tuple with (yaml string for the given descriptor, job_id) """ descriptor = BenchmarkDescriptor.from_dict(descriptor_contents, executor_config.descriptor_config) bai_k8s_builder = create_scheduled_benchmark_bai_k8s_builder( descriptor, executor_config.bai_config, job_id, event=event ) return bai_k8s_builder.dump_yaml_string()
48e19b6637eafee72b0b3b04a1e31c8e6c163971
20,384
def ArgMin(iterable, key=None, default=None, retvalue=False): """ iterable >> ArgMin(key=None, default=None, retvalue=True) Return index of first minimum element (and minimum) in input (transformed or extracted by key function). >>> [1, 2, 0, 2] >> ArgMin() 2 >>> ['12', '1', '123'] >> ArgMin(key=len, retvalue=True) (1, '1') >>> ['12', '1', '123'] >> ArgMin(key=len) 1 >>> [] >> ArgMin(default=0) 0 >>> [] >> ArgMin(default=(None, 0), retvalue=True) (None, 0) >>> data = [(3, 10), (2, 20), (1, 30)] >>> data >> ArgMin(key=0) 2 >>> data >> ArgMin(1) 0 :param iterable iterable: Iterable over numbers :param int|tuple|function|None key: Key function to extract or transform elements. None = identity function. :param object default: Value returned if iterable is empty. :param bool retvalue: If True the index and the value of the minimum element is returned. :return: index of smallest element according to key function and the smallest element itself if retvalue==True. :rtype: object | tuple """ try: f = colfunc(key) i, v = min(enumerate(iterable), key=lambda i_e1: f(i_e1[1])) return (i, v) if retvalue else i except Exception: return default
9c2515c3a37ab82e2b5df6b5d8dcf5ded6ad15ad
20,386
import scipy def localize_peaks_monopolar_triangulation(traces, local_peak, contact_locations, neighbours_mask, nbefore, nafter, max_distance_um): """ This method is from Julien Boussard see spikeinterface.toolki.postprocessing.unit_localization """ peak_locations = np.zeros(local_peak.size, dtype=dtype_localize_by_method['monopolar_triangulation']) for i, peak in enumerate(local_peak): chan_mask = neighbours_mask[peak['channel_ind'], :] chan_inds, = np.nonzero(chan_mask) local_contact_locations = contact_locations[chan_inds, :] # wf is (nsample, nchan) - chann is only nieghboor wf = traces[peak['sample_ind']-nbefore:peak['sample_ind']+nafter, :][:, chan_inds] wf_ptp = wf.ptp(axis=0) x0, bounds = make_initial_guess_and_bounds(wf_ptp, local_contact_locations, max_distance_um) args = (wf_ptp, local_contact_locations) output = scipy.optimize.least_squares(estimate_distance_error, x0=x0, bounds=bounds, args = args) peak_locations[i] = tuple(output['x']) return peak_locations
c751ec223423007b170ce0020f6c21c72cff3cc2
20,387
def format_dict_with_indention(data): """Return a formatted string of key value pairs :param data: a dict :rtype: a string formatted to key='value' """ if data is None: return None return jsonutils.dumps(data, indent=4)
085ac029aa73e5049eeec12e021997a5067966ce
20,388
def _Rx(c, s): """Construct a rotation matrix around X-axis given cos and sin. The `c` and `s` MUST satisfy c^2 + s^2 = 1 and have the same shape. See https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations. """ o = np.zeros_like(c) i = np.ones_like(o) return _tailstack2([[i, o, o], [o, c, -s], [o, s, c]])
01d436bee07458ede0484ed745ccf72568214240
20,390
def is_fraud(data): """ Identifies if the transaction was fraud :param data: the data in the transaction :return: true if the transaction was fraud, false otherwise """ return data[1] == 1
115e45a10f3429b9c33bc81fd94c24eff712f618
20,391
import scipy def EvalBinomialPmf(k, n, p): """Evaluates the binomial pmf. Returns the probabily of k successes in n trials with probability p. """ return scipy.stats.binom.pmf(k, n, p)
0720359be48b514465eb3a10d3f271ac3c25dfb9
20,392
def reformat_icd_code(icd_code: str, is_diag: bool = True) -> str: """Put a period in the right place because the MIMIC-III data files exclude them. Generally, procedure ICD codes have dots after the first two digits, while diagnosis ICD codes have dots after the first three digits. Adopted from: https://github.com/jamesmullenbach/caml-mimic """ icd_code = "".join(icd_code.split(".")) if is_diag: if icd_code.startswith("E"): if len(icd_code) > 4: icd_code = icd_code[:4] + "." + icd_code[4:] else: if len(icd_code) > 3: icd_code = icd_code[:3] + "." + icd_code[3:] else: icd_code = icd_code[:2] + "." + icd_code[2:] return icd_code
4992886b257dab5361f84dd0c7774f677466437f
20,394
def parse_parent(self): """Parse enclosing arglist of self""" gtor_left = tokens_leftwards(self.begin) gtor_right = tokens_rightwards(self.end) enc = Arglist() enc.append_subarglist_right(self) # _left could have worked equally well try: parse_left(enc, gtor_left) parse_right(enc, gtor_right) except StopIteration: return None return enc.complete()
aeae5b7d56614299d0d95d52a1cd7e8cecd036ff
20,395
def dump_annotation(ribo_handle): """ Returns annotation of a ribo file in bed format in string form. Parameters ---------- ribo_handle : h5py.File hdf5 handle for the ribo file Returns ------- A string that can be output directly as a bed file. """ boundaries = get_region_boundaries(ribo_handle) names = get_reference_names(ribo_handle) bed_rows = list() for ref_name, ref_boundaries in zip(names, boundaries): for region_name, region_boundaries in zip(REGION_names, ref_boundaries ): if region_boundaries[1] <= region_boundaries[0]: continue bed_entries = tuple( map( str, [ref_name, region_boundaries[0], region_boundaries[1], region_name, 0, "+"] ) ) bed_rows.append( "\t".join(bed_entries) ) return "\n".join(bed_rows)
54c98527d02ad5a136c0bc29bc794c3819fe5426
20,396
def calculate_ani(blast_results, fragment_length): """ Takes the input of the blast results, and calculates the ANI versus the reference genome """ sum_identity = float(0) number_hits = 0 # Number of hits that passed the criteria total_aligned_bases = 0 # Total of DNA bases that passed the criteria total_unaligned_fragments = 0 total_unaligned_bases = 0 conserved_dna_bases = 0 for query in blast_results: identity = blast_results[query][2] queryEnd = blast_results[query][7] queryStart = blast_results[query][6] perc_aln_length = (float(queryEnd) - float(queryStart)) / fragment_length[query] if float(identity) > float(69.9999) and float(perc_aln_length) > float(0.69999): sum_identity += float(identity) number_hits += 1 total_aligned_bases += fragment_length[query] else: total_unaligned_fragments += 1 total_unaligned_bases += fragment_length[query] if float(identity) > float(89.999): conserved_dna_bases += fragment_length[query] return sum_identity, number_hits, total_aligned_bases, total_unaligned_fragments, total_unaligned_bases
09b649dda337d2b812f5c5fd9ec75b34737e3f15
20,399
from typing import List from typing import Dict def process_line(line: str, conditional_chain: List[str], fields: Dict[str, str]): """ Processes a line in the template, i.e. returns the output html code after evaluating all if statements and filling the fields. Since we oftentimes are in the middle of several if statements, we need to pass the current conditional_chain (i.e. the list of if statments the following line will be subject to) on (and also need to use it). Args: line: Line we are processing conditional_chain: In which conditionals are we currently enclosed? fields: field values Returns: (html output, conditional_chain) """ after = line out = "" while after: before, enclosed, after = next_braces(after) if evaluate_conditional_chain(conditional_chain, fields): out += before if is_pos_conditional(enclosed) or is_neg_conditional(enclosed): conditional_chain.append(enclosed) elif is_close_conditional(enclosed): if not len(conditional_chain) >= 1: _ = "Closing conditional '{}' found, but we didn't encounter" \ " a conditional before.".format(enclosed) logger.error(_) else: field_name = get_field_name(enclosed) if field_name not in conditional_chain[-1]: _ = "Closing conditional '{}' found, but the last opened" \ " conditional was {}. I will " \ "ignore this.".format(enclosed, field_name) logger.error(_) else: conditional_chain.pop() elif is_field(enclosed): field_name = get_field_name(enclosed) if field_name in fields: out += fields[field_name] else: _ = "Could not find value for field '{}'".format(field_name) logger.error(_) return out, conditional_chain
d84a679d1dc292f3f9cccaf7b6f6718c1ff7cbfc
20,400
def get_groundstation_code(gsi): """ Translate a GSI code into an EODS domain code. Domain codes are used in dataset_ids. It will also translate common gsi aliases if needed. :type gsi: str :rtype: str >>> get_groundstation_code('ASA') '002' >>> get_groundstation_code('HOA') '011' >>> # Aliases should work too. >>> get_groundstation_code('ALSP') '002' """ groundstation = metadata.get_groundstation(gsi) if not groundstation: return None return groundstation.eods_domain_code
a9a04935cfceeb4ca4b90f7ba05ff5c7076ff917
20,401
def mixed_social_welfare(game, mix): """Returns the social welfare of a mixed strategy profile""" return game.expected_payoffs(mix).dot(game.num_role_players)
72c465211bdc79c9fcf2b1b9d8c7dd5abae5d8df
20,403
import re def isValid(text): """ "Play Blackjack" """ return bool(re.search(r'\bblackjack\b', text, re.IGNORECASE))
c1960a9683bde9701b4e3900edd41e4d6e5444ac
20,405
def init_app(app): """init the flask application :param app: :return: """ return app
6e460eb1fdc19553c6c4139e60db06daec507a2d
20,406
def bounds(geometry, **kwargs): """Computes the bounds (extent) of a geometry. For each geometry these 4 numbers are returned: min x, min y, max x, max y. Parameters ---------- geometry : Geometry or array_like **kwargs For other keyword-only arguments, see the `NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_. Examples -------- >>> bounds(Geometry("POINT (2 3)")).tolist() [2.0, 3.0, 2.0, 3.0] >>> bounds(Geometry("LINESTRING (0 0, 0 2, 3 2)")).tolist() [0.0, 0.0, 3.0, 2.0] >>> bounds(Geometry("POLYGON EMPTY")).tolist() [nan, nan, nan, nan] >>> bounds(None).tolist() [nan, nan, nan, nan] """ # We need to provide the `out` argument here for compatibility with # numpy < 1.16. See https://github.com/numpy/numpy/issues/14949 geometry_arr = np.asarray(geometry, dtype=np.object_) out = np.empty(geometry_arr.shape + (4,), dtype="float64") return lib.bounds(geometry_arr, out=out, **kwargs)
bdf90b760fc7c62d66596159136961e7840077c4
20,407
def getRidgeEdge(distComponent, maxCoord, direction): """ 最大値〜最大値-1の範囲で、指定された方向から見て最も遠い点と近い点を見つける。 緑領域からの距離が最大値近辺で、カメラから見て最も遠い点と近い点を見つけるための関数。 これにより、石の天面の中心と底面の中心を求める """ # 最大値 maxValue = distComponent[maxCoord] # 最大値-1以上の点の座標群 ridge = np.array(np.where(distComponent >= maxValue - 1)).T # 隣の石を検出しないよう、maxCoordからの距離がmaxValue以内という制約を設ける ridge = ridge[np.apply_along_axis( lambda pt: np.linalg.norm( np.array(pt) - maxCoord ) <= maxValue , axis=1, arr=ridge)] # 内積の値 dotValue = np.apply_along_axis( lambda pt: np.dot(np.array(pt) - maxCoord, direction), axis=1, arr=ridge ) # 内積が最大になる点の座標と最小になる点の座標を返す maxEdgePoint = np.array(ridge[np.argmax(dotValue)]) minEdgePoint = np.array(ridge[np.argmin(dotValue)]) return maxEdgePoint, minEdgePoint
b22b592ee9467f1205d49e5c83dfe978b5dc2f35
20,408
def map_vL(X, w): """ Maps a random sample drawn from vector Langevin with orientation u = [0,...,0,1] to a sample that follows vector Langevin with orientation w. """ assert w.shape[0] == X.shape[0] #assert np.linalg.norm(w) == 1. #print('Orientation vector length : ' + str(np.linalg.norm(w))) d = w.shape[0] w = w.reshape(w.shape[0],1) H = np.eye(d) - 2 * np.dot(w, w.T) [l, v] = np.linalg.eigh(H) V = v[:,::-1] if np.sum( w.flatten()*V[:,-1] ) < 0: V[:,-1] = -V[:,-1].copy() return np.dot(V, X)
a7d06a295569bb08d800c46c302c5a38ef2c2f52
20,409
def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1): """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
825c4e14f91992c39d5be37b814e5c3bd7177c50
20,410
def find_bigrams(textdict, threshold=0.1): """ find bigrams in the texts Input: - textdict: a dict with {docid: preprocessed_text} - threshold: for bigrams scores Returns: - bigrams: a list of "word1 word2" bigrams """ docids = set(textdict.keys()) # to identify bigrams, transform the texts into lists of words (assume texts are preprocessed) text_words = [textdict[did].split() for did in docids] bigram_scores = get_bigram_scores(text_words) return [bigram for bigram in bigram_scores if bigram_scores[bigram] > threshold]
4f3d6e3b4b62e42a98ab8bc3f823853680ca9e6f
20,412
def _rec_filter_to_info(line): """Move a DKFZBias filter to the INFO field, for a record. """ parts = line.rstrip().split("\t") move_filters = {"bSeq": "strand", "bPcr": "damage"} new_filters = [] bias_info = [] for f in parts[6].split(";"): if f in move_filters: bias_info.append(move_filters[f]) elif f not in ["."]: new_filters.append(f) if bias_info: parts[7] += ";DKFZBias=%s" % ",".join(bias_info) parts[6] = ";".join(new_filters or ["PASS"]) return "\t".join(parts) + "\n"
496056126bdf390a6213dfad5c40c4a14ec35caa
20,413
import ast def ast_node_to_source(ast_node: ast.AST) -> str: """ Uses astor package to produce source code from ast Also handles low-level ast functions, such as wrapping in a module if necessary, and fixing line numbers for modified/extracted ast Args: ast_node: Returns: """ # Must be a module to output to source. Wrap in module if not already if not isinstance(ast_node, ast.Module): ast_node = ast.Module([ast_node]) # Fix line numbers ast.fix_missing_locations(ast_node) return astor.to_source(ast_node)
bc4bb2a9f09907e2c9ab8a8f1629135695da6aa9
20,415
import typing def _get_base(*, name: str, schemas: oa_types.Schemas) -> typing.Type: """ Retrieve the base class of a schema considering inheritance. If x-inherits is True, retrieve the parent. If it is a string, verify that the parent is valid. In either case, the model for that schema is used as the base instead of the usual base. If x-inherits is not present or False, return the usual base. Raise InheritanceConstructionOrderError if the parent of the schema has not been constructed when attempting to construct the child. Args: name: The name of the schema to determine the base for. schemas: All the schemas. Returns: The base of the model. Either the usual base or the model parent in the case of inheritance. """ schema = schemas.get(name) if schema is None: raise exceptions.SchemaNotFoundError(f"Could not fund schema {name}.") if _schema_helper.inherits(schema=schema, schemas=schemas): parent = _inheritance.retrieve_parent(schema=schema, schemas=schemas) try: return getattr(models, parent) except AttributeError as exc: raise exceptions.InheritanceError( "Any parents of a schema must be constructed before the schema can be " "constructed." ) from exc return getattr(models, "Base")
47bcca20c82078cd3bd821a0d10e951b346a1e87
20,417
def classNumber(A): """ Returns the number of transition classes in the matrix A """ cos = 0 if type(A[0][0]) == list: cos = len(A) else: cos = 1 return cos
a71bce468f7429746bfe246d94f5dcebb85c41d4
20,418
def lz4_decompress_c(src, dlen, dst=None): """ Decompresses src, a bytearray of compressed data. The dst argument can be an optional bytearray which will have the output appended. If it's None, a new bytearray is created. The output bytearray is returned. """ if dst is None: dst = bytearray() print(str(src)) b = bytes(src) d=lz4zfs.decompress(b,dlen) l=len(d) if (dlen != l): print("[-] decompress size differ from %d, got %d" %(dlen,l)) raise RuntimeError("decompress size differ from %d, got %d" %(dlen,l)) else: if (dlen < l): dst[0:dlen] = d; else: dst[0:l] = d; print(str(dst)) return dst
5b2b15f323c00a8cedec4b9caee0fea8dda89f76
20,419
def format_coordinates(obj, no_seconds=True, wgs_link=True): """Format WGS84 coordinates as HTML. .. seealso:: https://en.wikipedia.org/wiki/ISO_6709#Order.2C_sign.2C_and_units """ def degminsec(dec, hemispheres): _dec = abs(dec) degrees = int(floor(_dec)) _dec = (_dec - int(floor(_dec))) * 60 minutes = int(floor(_dec)) _dec = (_dec - int(floor(_dec))) * 60 seconds = _dec if no_seconds: if seconds > 30: if minutes < 59: minutes += 1 else: minutes = 0 degrees += 1 fmt = "{0}\xb0" if minutes: fmt += "{1:0>2d}'" if not no_seconds and seconds: fmt += '{2:0>2f}"' fmt += hemispheres[0] if dec > 0 else hemispheres[1] return str(fmt).format(degrees, minutes, seconds) if not isinstance(obj.latitude, float) or not isinstance(obj.longitude, float): return '' return HTML.div( HTML.table( HTML.tr( HTML.td( 'Coordinates ', external_link( 'https://en.wikipedia.org/wiki/World_Geodetic_System_1984', label="WGS84") if wgs_link else ''), HTML.td( HTML.span('%s, %s' % ( degminsec(obj.latitude, 'NS'), degminsec(obj.longitude, 'EW'))), HTML.br(), HTML.span( '{0.latitude:.2f}, {0.longitude:.2f}'.format(obj), class_='geo'))), class_="table table-condensed"))
1fc6a151f73e8836ee935db3cf265438e597fec2
20,420
def delete_keys_on_selected(): """ deletes set driven keys from selected controllers. :return: <bool> True for success. """ s_ctrls = object_utils.get_selected_node(single=False) if not s_ctrls: raise IndexError("[DeleteKeysOnSelectedError] :: No controllers are selected.") selected_ctrls = s_ctrls[:-1] interface_ctrl = s_ctrls[-1] for c_ctrl in selected_ctrls: if not check_if_object_is_control(c_ctrl): continue print('[DeleteKeysOnSelected] :: Deleting keys on {}.'.format(c_ctrl)) delete_keys_on_controller(c_ctrl, interface_ctrl) return True
5d1b55bdcefcd8b4997432851e015e2a875eb80a
20,422
def scan_continuation(curr, prompt_tag, look_for=None, escape=False): """ Segment a continuation based on a given continuation-prompt-tag. The head of the continuation, up to and including the desired continuation prompt is reversed (in place), and the tail is returned un-altered. The hint value |look_for| is used to determine when the continuation being installed is a prefix of the extant continuation. In this case, installing the continuation is much simpler, as the expensive merge operation needed to find common substructure is the two continuation is not needed. """ handlers = False xs = [] while isinstance(curr, Cont): if curr is look_for: return None, handlers handlers |= isinstance(curr, DynamicWindValueCont) xs.append(curr) if isinstance(curr, Prompt) and curr.tag is prompt_tag: break curr = curr.prev if not escape and not jit.isvirtual(curr): return _scan_continuation(curr, prompt_tag, look_for, xs, handlers) return xs, handlers
1182394c26b6d9e745f469f5006b5269e5854db8
20,423
def solar_wcs_frame_mapping(wcs): """ This function registers the coordinates frames to their FITS-WCS coordinate type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry. """ dateobs = wcs.wcs.dateobs if wcs.wcs.dateobs else None # SunPy Map adds 'heliographic_observer' and 'rsun' attributes to the WCS # object. We check for them here, and default to None. if hasattr(wcs, 'heliographic_observer'): observer = wcs.heliographic_observer else: observer = None if hasattr(wcs, 'rsun'): rsun = wcs.rsun else: rsun = None # First we try the Celestial sub, which rectifies the order. # It will return anything matching ??LN*, ??LT* wcss = wcs.sub([WCSSUB_CELESTIAL]) # If the SUB works, use it. if wcss.naxis == 2: wcs = wcss xcoord = wcs.wcs.ctype[0][0:4] ycoord = wcs.wcs.ctype[1][0:4] if xcoord == 'HPLN' and ycoord == 'HPLT': return Helioprojective(obstime=dateobs, observer=observer, rsun=rsun) if xcoord == 'HGLN' and ycoord == 'HGLT': return HeliographicStonyhurst(obstime=dateobs) if xcoord == 'CRLN' and ycoord == 'CRLT': return HeliographicCarrington(obstime=dateobs) if xcoord == 'SOLX' and ycoord == 'SOLY': return Heliocentric(obstime=dateobs, observer=observer)
dd6a52e8356a242c5f2dfb4808e0a3d1ed57073f
20,425
def toa_error_peak_detection(snr): """ Computes the error in time of arrival estimation for a peak detection algorithm, based on input SNR. Ported from MATLAB Code Nicholas O'Donoughue 11 March 2021 :param snr: Signal-to-Noise Ratio [dB] :return: expected time of arrival error variance [s^2] """ # Convert SNR to linear units snr_lin = utils.unit_conversions.db_to_lin(snr) # Compute Error return 1/(2*snr_lin)
4ab2f653c81a29484d96aed58cb73ca4327dbde0
20,426
def statCellFraction(gridLimit, gridSpace, valueFile): """ Calculate the fractional value of each grid cell, based on the values stored in valueFile. :param dict gridLimit: Dictionary of bounds of the grid. :param dict gridSpace: Resolution of the grid to calculate values. :param str valueFile: Path to the ascii grid file containing values to sample. :returns: :class:`numpy.ndarray` of fractional values, with length equal to the number of cells Notes: Still need to include bounds checking to ensure the valueFile data actually covers the gridLimits. """ gLon, gLat, gData = grdRead(valueFile) nCells = maxCellNum(gridLimit, gridSpace) + 1 output = np.zeros(nCells) for cellNum in range(nCells): cellLon, cellLat = getCellLonLat(cellNum, gridLimit, gridSpace) wLon = cellLon eLon = cellLon + gridSpace['x'] nLat = cellLat sLat = cellLat - gridSpace['y'] ii = np.where((gLon <= eLon) & (gLon >= wLon)) jj = np.where((gLat <= nLat) & (gLat >= sLat)) cellValues = gData[np.meshgrid(jj[0], ii[0])] if abs(cellValues).max() == 0: output[cellNum] = np.average(cellValues) else: output[cellNum] = np.average(cellValues) / abs(cellValues).max() return output
e26f011c10d94435b134e9f1b3adb2d1b1cd88ce
20,428
def relative_url_functions(current_url, course, lesson): """Return relative URL generators based on current page. """ def lesson_url(lesson, *args, **kwargs): if not isinstance(lesson, str): lesson = lesson.slug if course is not None: absolute = url_for('course_page', course=course, lesson=lesson, *args, **kwargs) else: absolute = url_for('lesson', lesson=lesson, *args, **kwargs) return get_relative_url(current_url, absolute) def subpage_url(page_slug): if course is not None: absolute = url_for('course_page', course=course, lesson=lesson, page=page_slug) else: absolute = url_for('lesson', lesson=lesson, page=page_slug) return get_relative_url(current_url, absolute) def static_url(path): absolute = url_for('lesson_static', lesson=lesson, path=path, course=course) return get_relative_url(current_url, absolute) return lesson_url, subpage_url, static_url
b49009cfdb8e9095c9ca17fee39feb8689624bd4
20,430
def fix_CompanySize(r): """ Fix the CompanySize column """ if type(r.CompanySize) != str: if r.Employment == "Independent contractor, freelancer, or self-employed": r.CompanySize = "0 to 1 Employees" elif r.Employment in [ "Not employed, but looking for work", "full-time", "Not employed, and not looking for work", "part-time", "Retired", ]: r.CompanySize = "Not Applicable" return r
bd34bb3e72920fb7ef37279a743198387b1c4717
20,431
import yaml from pathlib import Path def write_thermo_yaml(phases=None, species=None, reactions=None, lateral_interactions=None, units=None, filename=None, T=300., P=1., newline='\n', ads_act_method='get_H_act', yaml_options={'default_flow_style': None, 'indent': 2, 'sort_keys': False, 'width': 79}): """Writes the units, phases, species, lateral interactions, reactions and additional options in the CTI format for OpenMKM Parameters ---------- phases : list of :class:`~pmutt.omkm.phase.Phase` objects Phases to write in YAML file. The species should already be assigned. species : list of :class:`~pmutt.empirical.nasa.Nasa`, :class:`~pmutt.empirical.nasa.Nasa9` or :class:`~pmutt.empirical.shomate.Shomate` Species to write in YAML file. reactions : list of :class:`~pmutt.omkm.reaction.SurfaceReaction` Reactions to write in YAML file. lateral_interactions : list of :class:`~pmutt.mixture.cov.PiecewiseCovEffect` objects, optional Lateral interactions to include in YAML file. Default is None. units : dict or :class:`~pmutt.omkm.units.Unit` object, optional Units to write file. If a dict is inputted, the key is the quantity and the value is the unit. If not specified, uses the default units of :class:`~pmutt.omkm.units.Unit`. filename: str, optional Filename for the input.yaml file. If not specified, returns file as str. T : float, optional Temperature in K. Default is 300 K. P : float, optional Pressure in atm. Default is 1 atm. newline : str, optional Type of newline to use. Default is Linux newline ('\\n') ads_act_method : str, optional Activation method to use for adsorption reactions. Accepted options include 'get_H_act' and 'get_G_act'. Default is 'get_H_act'. Returns ------- lines_out : str If ``filename`` is None, CTI file is returned. """ lines = [ _get_file_timestamp(comment_char='# '), '# See documentation for OpenMKM YAML file here:', '# https://vlachosgroup.github.io/openmkm/input', ] yaml_dict = {} '''Organize units units''' if units is None: units = Units() elif isinstance(units, dict): units = Units(**units) units_out = units.to_omkm_yaml() '''Pre-assign IDs for lateral interactions so phases can be written''' if lateral_interactions is not None: interactions_out = [] i = 0 for lat_interaction in lateral_interactions: if lat_interaction.name is None: lat_interaction.name = 'i_{:04d}'.format(i) i += 1 interaction_dict = lat_interaction.to_omkm_yaml(units=units) interactions_out.append(interaction_dict) '''Pre-assign IDs for reactions so phases can be written''' beps = [] if reactions is not None: reactions_out = [] i = 0 for reaction in reactions: # Assign reaction ID if not present if reaction.id is None: reaction.id = 'r_{:04d}'.format(i) i += 1 # Write reaction reaction_dict = reaction.to_omkm_yaml(units=units, T=T) reactions_out.append(reaction_dict) # Add unique BEP relationship if any try: bep = reaction.bep except AttributeError: pass else: if bep is not None and bep not in beps: beps.append(bep) '''Write phases''' if phases is not None: phases_out = [] for phase in phases: phase_dict = _force_pass_arguments(phase.to_omkm_yaml, units=units) phases_out.append(phase_dict) # yaml_dict['phases'] = phases_out '''Write species''' if species is not None: species_out = [] for ind_species in species: ind_species_dict = _force_pass_arguments(ind_species.to_omkm_yaml, units=units) species_out.append(ind_species_dict) # yaml_dict['species'] = species_out '''Organize BEPs''' if len(beps) > 0: beps_out = [] i = 0 for bep in beps: # Assign name if necessary if bep.name is None: bep.name = 'b_{:04d}'.format(i) i += 1 bep_dict = _force_pass_arguments(bep.to_omkm_yaml, units=units) beps_out.append(bep_dict) # yaml_dict['beps'] = beps_out '''Organize fields''' fields = ('units', 'phases', 'species', 'reactions', 'beps', 'interactions',) for field in fields: try: val = locals()['{}_out'.format(field)] except: pass else: # Create a YAML string yaml_str = yaml.dump(data={field: val}, stream=None, **yaml_options) lines.extend( ['', '#' + '-' * 79, '# {}'.format(field.upper()), '#' + '-' * 79, yaml_str]) # yaml_dict[field] = val # Convert to YAML format # yaml_str = yaml.dump(data=yaml_dict, stream=None, **yaml_options) # Remove redundant quotes # yaml_str = yaml_str.replace('\'', '') # lines.append(yaml_str) lines_out = '\n'.join(lines) # Remove redundant strings lines_out = lines_out.replace('\'', '') # Add spacing between list elements lines_out = lines_out.replace('\n-', '\n\n-') if filename is not None: filename = Path(filename) with open(filename, 'w', newline=newline) as f_ptr: f_ptr.write(lines_out) else: # Or return as string return lines_out
f8d12226a137d2cf3b9ddecb427dc51257498145
20,432
import random def move_weighted(state: State, nnet: NNet) -> tuple: """ Returns are random move with weighted probabilities from the neural network. :param state: State to evaluate :param nnet: Neural network used for evaluation :return: Move as ((origin_row, origin_column),(target_row,target_column) """ policy = nnet.prediction(state)[0] moves = list(policy.keys()) weights = list(policy.values()) return random.choices(moves, weights=weights)[0]
0d9ad3e6344c3c24e71530bd7bbe6dc5a0b9a254
20,433