content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def stopping_player(bot, state): """ A Player that just stands still. """ return bot.position
72628e39d26760eedc9a0e85a8279ac530ab851d
24,461
def check_continue(transformer: transformer_class.Transformer, check_md: dict, transformer_md: dict, full_md: dict) -> tuple: """Checks if conditions are right for continuing processing Arguments: transformer: instance of transformer class Return: Returns a tuple containining the return code for continuing or not, and an error message if there's an error """ have_file = False for one_file in check_md['list_files'](): if one_file.endswith('.bin'): have_file = True break return (0) if have_file else (-1, "Missing raw image bin file from list of files")
78348046acde489a129fc8a4426a9b11ee2e2238
24,462
def getFlatten(listToFlat): """ :param listToFlat: anything ,preferably list of strings :return: flatten list (list of strings) #sacred """ preSelect=mc.ls(sl=True,fl=True) mc.select(cl=1) mc.select(listToFlat) flatten = mc.ls(sl=True, fl=True) mc.select(preSelect) return flatten
91d1376d81140fd258c80bcc23cb220ce0f99926
24,463
def can_exit_room(state: State, slot: int) -> bool: """ Return True if amphipod can escape a room because all amphipods are in their place Not exhaustive! If there are amphipods above it, it may still be stuck """ amphipod = state[slot] assert amphipod != EMPTY_SLOT room = slot // 4 bottom_amphipod = state[room * 4 + 3] if bottom_amphipod != room: return True assert bottom_amphipod != EMPTY_SLOT for i in range(room * 4 + 2, room * 4 - 1, -1): if state[i] == EMPTY_SLOT: return False if state[i] != bottom_amphipod: return True return False
914881e90c2e9b357d49fb44d56b7f864b4973c0
24,464
def square_matrix(square): """ This function will calculate the value x (i.e blurred pixel value) for each 3*3 blur image. """ tot_sum = 0 # Calculate sum of all teh pixels in a 3*3 matrix for i in range(3): for j in range(3): tot_sum += square[i][j] return tot_sum//9
4f378736c19c33f104be462939b834ece403f713
24,465
def before_after_text(join_set, index, interval_list): """ Extracts any preceeding or following markup to be joined to an interval's text. """ before_text, after_text = '', '' # Checking if we have some preceeding or following markup to join with. if join_set: if index > 0: before_text = ''.join(character for character in interval_list[index - 1][2] if character in join_set) if index < len(interval_list) - 1: after_text = ''.join(character for character in interval_list[index + 1][2] if character in join_set) return before_text, after_text
b2c63fe1e7ea5bb204e41b27bc79d2c81964369a
24,466
import socket import ssl def create_server_ssl(addr, port, backlog): """ """ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind((addr, port)) server.listen(backlog) context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) context.load_default_certs() wrap = context.wrap_socket(server, do_handshake_on_connect=False, server_side=True) sserver = SuperSocketSSL(wrap) ServerSSL(sserver) sserver.add_map(SSL_ACCEPT, handles_wrapper) return sserver
3bcd3d8a157401f23c50e6c35fe9b8e45f4659d6
24,468
def other_ops(request): """ Other Operations View """ args = { 'pending': OtherOperation.objects.filter(status=0).count(), 'active': OtherOperation.objects.filter(status=1).count(), 'done': OtherOperation.objects.filter(status=2).count(), 'cancelled': OtherOperation.objects.filter(status=3).count(), 'passed': OtherOperation.objects.filter(status=4).count(), 'failed': OtherOperation.objects.filter(status=5).count(), } args['other_ops'] = OtherOperation.objects.all() args['a'] = 'other-ops' return render(request, 'operations/other-ops.html', args)
727e620d0ba5798eb0bcdc31e31a831a9332e802
24,469
def distance_point_2_line(point, seg): """Finds the minimum distance and closest point between a point and a line Args: point ([float, float]): (x,y) point to test seg ([[float, float], [float, float]]): two points defining the line Returns: A list of two items: * Distance between the point and line * The (x,y) value on the line that is the closest point """ dseg = seg[1] - seg[0] dpt = point - seg[0] proj = (np.dot(dpt, dseg) / np.dot(dseg, dseg)) * dseg dist = np.linalg.norm(dpt, proj) return dist, seg[0] + proj
4627639f4b900b72a0b88104df44e498ef123cb4
24,470
def load_glove_from_file(glove_filepath): """ Load the GloVe embeddings Args: glove_filepath (str): path to the glove embeddings file Returns: word_to_index (dict), embeddings (numpy.ndarary) """ word_to_index = {} embeddings = [] with open(glove_filepath, "r") as fp: for index, line in enumerate(fp): line = line.split(" ") # each line: word num1 num2 ... word_to_index[line[0]] = index # word = line[0] embedding_i = np.array([float(val) for val in line[1:]]) embeddings.append(embedding_i) return word_to_index, np.stack(embeddings)
30d8a0fb8e1b0728ae9943dd0f5c2387dbcdb778
24,471
def make_pd(space: gym.Space): """Create `ProbabilityDistribution` from gym.Space""" if isinstance(space, gym.spaces.Discrete): return CategoricalPd(space.n) elif isinstance(space, gym.spaces.Box): assert len(space.shape) == 1 return DiagGaussianPd(space.shape[0]) elif isinstance(space, gym.spaces.MultiBinary): return BernoulliPd(space.n) else: raise TypeError(space)
0849e947061221ba08bf113f6576c531ca2df2cd
24,472
import typing import requests def download_file_from_google_drive( gdrive_file_id: typing.AnyStr, destination: typing.AnyStr, chunk_size: int = 32768 ) -> typing.AnyStr: """ Downloads a file from google drive, bypassing the confirmation prompt. Args: gdrive_file_id: ID string of the file to download from google drive. destination: where to save the file. chunk_size: chunk size for gradual downloads. Returns: The path to the downloaded file. """ # taken from this StackOverflow answer: https://stackoverflow.com/a/39225039 URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={'id': gdrive_file_id}, stream=True) token = None for key, value in response.cookies.items(): if key.startswith('download_warning'): token = value if token: params = {'id': gdrive_file_id, 'confirm': token} response = session.get(URL, params=params, stream=True) with open(destination, "wb") as f: for chunk in response.iter_content(chunk_size): if chunk: # filter out keep-alive new chunks f.write(chunk) return destination
29cdcc509aa21a6f2ae14ed18f2c0523bbdbd5a4
24,473
import inspect import functools def attach(func, params): """ Given a function and a namespace of possible parameters, bind any params matching the signature of the function to that function. """ sig = inspect.signature(func) params = Projection(sig.parameters.keys(), params) return functools.partial(func, **params)
35116b9b3be12f1e19789e2b1c36b7c34b6138ea
24,474
def question_route(): """ 題庫畫面 """ # 取得使用者物件 useruid = current_user.get_id() # 嘗試保持登入狀態 if not keep_active(useruid): logout_user() return question_page(useruid)
1b752709aa8264fdc19aaa44f2233b2e0382e1b5
24,475
import base64 def generate_qrcode(url: str, should_cache: bool = True) -> str: """ Generate a QR code (as data URI) to a given URL. :param url: the url the QR code should reference :param should_cache: whether or not the QR code should be cached :return: a data URI to a base64 encoded SVG image """ if should_cache and url in qrcode_cache: return qrcode_cache[url] image = qrcode.make(url, image_factory=qrcode.image.svg.SvgPathFillImage) image_stream = BytesIO() image.save(image_stream) image_stream.seek(0) qrcode_url = 'data:image/svg+xml;base64,' + base64.b64encode(image_stream.read()).decode('utf-8') if should_cache: qrcode_cache[url] = qrcode_url return qrcode_url
ab89cf09d7d50217960f48f75ff17b1d46513f52
24,476
def hz2mel(f): """Convert an array of frequency in Hz into mel.""" return 1127.01048 * np.log(f/700 +1)
84522419c972bf9b78c9931aef871f97a8a0d292
24,478
def figure(figsize=None, logo="iem", title=None, subtitle=None, **kwargs): """Return an opinionated matplotlib figure. Parameters: figsize (width, height): in inches for the figure, defaults to something good for twitter. dpi (int): dots per inch logo (str): Currently, 'iem', 'dep' is supported. `None` disables. title (str): Title to place on the figure. subtitle (str): SubTitle to place on the figure. """ if figsize is None: figsize = TWITTER_RESOLUTION_INCH fig = plt.figure(figsize=figsize, **kwargs) draw_logo(fig, logo) titlebounds = [0.1, 0.9, 0.91, 0.98] if subtitle is not None: titlebounds[2] = 0.94 fitbox(fig, title, *titlebounds) fitbox(fig, subtitle, 0.1, 0.9, 0.91, 0.935) return fig
fd89e550a891ccf6f639f8c981215aa25fa0ad06
24,479
def run_epoch(session, model, eval_op=None, verbose=False): """Runs the model on the given data.""" costs = 0.0 iters = 0 state = session.run(model.initial_state) fetches = { "cost": model.cost, "final_state": model.final_state, "accuracy":model.accuracy, "y_new":model.y_new, "y_target":model.y_target } accuracys = 0.0 if eval_op is not None: fetches["eval_op"] = eval_op output_y = [] for step in range(model.input.epoch_size): feed_dict = {} feed_dict[model.initial_state] = state vals = session.run(fetches, feed_dict) cost = vals["cost"] state = vals["final_state"] accuracy = vals["accuracy"] y_new = vals["y_new"] y_target = vals["y_target"] costs += cost accuracys += accuracy #iters += model.input.num_steps iters = iters + 1 for i in range(model.input.batch_size): if y_new[i,0] == 0: output_y.append(1) else: output_y.append(0) return costs, accuracys / iters, output_y
a69ed33e930245118e0d4054a10d6c1fd61cc0da
24,480
from typing import Any def is_scoo(x: Any) -> bool: """check if an object is an `SCoo` (a SAX sparse S-matrix representation in COO-format)""" return isinstance(x, (tuple, list)) and len(x) == 4
96d3937d9884198b75440e3de75949c713b8e16a
24,481
def project_rename_folder(object_id, input_params={}, always_retry=False, **kwargs): """ Invokes the /project-xxxx/renameFolder API method. For more info, see: https://documentation.dnanexus.com/developer/api/data-containers/folders-and-deletion#api-method-class-xxxx-renamefolder """ return DXHTTPRequest('/%s/renameFolder' % object_id, input_params, always_retry=always_retry, **kwargs)
60bfe648eb9846bf06125fd65436e9c7cf5c2fd6
24,483
def is_seq(a): """Return `True` if `a` is a Z3 sequence expression. >>> print (is_seq(Unit(IntVal(0)))) True >>> print (is_seq(StringVal("abc"))) True """ return isinstance(a, SeqRef)
1429fb3fd800a3688700a62dd0665df7536b56d9
24,485
from re import T def identity(__obj: T, /) -> T: """Identity function""" return __obj
8c96839e48e1ec270bd57616abcc3234b6f0958f
24,486
def lines_in_file(filename: str) -> int: """ Count the number of lines in a file :param filename: A string containing the relative or absolute path to a file :returns: The number of lines in the file """ with open(filename, "r") as f: return len(f.readlines())
d71b5c8de1b4eb9a45988e06c17a129f4a19f221
24,487
import click def validate_input_parameters(live_parameters, original_parameters): """Return validated input parameters.""" parsed_input_parameters = dict(live_parameters) for parameter in parsed_input_parameters.keys(): if parameter not in original_parameters: click.echo( click.style('Given parameter - {0}, is not in ' 'reana.yaml'.format(parameter), fg='red'), err=True) del live_parameters[parameter] return live_parameters
226b95d0d9b42e586e395107def239d4e61c057a
24,489
def _upper_zero_group(match: ty.Match, /) -> str: """ Поднимает все символы в верхний регистр у captured-группы `let`. Используется для конвертации snake_case в camelCase. Arguments: match: Регекс-группа, полученная в результате `re.sub` Returns: Ту же букву из группы, но в верхнем регистре """ return match.group("let").upper()
311dbc41c17b1c6fde39b30d8126eb4c867d7a6f
24,490
def _concatenate_shapes(shapes, axis): """Given array shapes, return the resulting shape and slices prefixes. These help in nested concatenation. Returns ------- shape: tuple of int This tuple satisfies: ``` shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) shape == concatenate(arrs, axis).shape ``` slice_prefixes: tuple of (slice(start, end), ) For a list of arrays being concatenated, this returns the slice in the larger array at axis that needs to be sliced into. For example, the following holds: ``` ret = concatenate([a, b, c], axis) _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) ret[(slice(None),) * axis + sl_a] == a ret[(slice(None),) * axis + sl_b] == b ret[(slice(None),) * axis + sl_c] == c ``` These are called slice prefixes since they are used in the recursive blocking algorithm to compute the left-most slices during the recursion. Therefore, they must be prepended to rest of the slice that was computed deeper in the recursion. These are returned as tuples to ensure that they can quickly be added to existing slice tuple without creating a new tuple every time. """ # Cache a result that will be reused. shape_at_axis = [shape[axis] for shape in shapes] # Take a shape, any shape first_shape = shapes[0] first_shape_pre = first_shape[:axis] first_shape_post = first_shape[axis + 1 :] if any( shape[:axis] != first_shape_pre or shape[axis + 1 :] != first_shape_post for shape in shapes ): raise ValueError("Mismatched array shapes in block along axis {}.".format(axis)) shape = first_shape_pre + (sum(shape_at_axis),) + first_shape[axis + 1 :] offsets_at_axis = _accumulate(shape_at_axis) slice_prefixes = [ (slice(start, end),) for start, end in zip([0] + offsets_at_axis, offsets_at_axis) ] return shape, slice_prefixes
2ca93f3c656f1629fa3fdb7f5c8cb325abd40cf2
24,491
import re def md_changes(seq, md_tag): """Recreates the reference sequence of a given alignment to the extent that the MD tag can represent. Note: Used in conjunction with `cigar_changes` to recreate the complete reference sequence Args: seq (str): aligned segment sequence md_tag (str): MD tag for associated sequence Returns: ref_seq (str): a version of the aligned segment's reference sequence given \ the changes reflected in the MD tag Raises: ValueError: if MD tag is None Example: >>> md_changes('CTTATATTGGCCTT', '3C4AT4') 'CTTCTATTATCCTT' """ if md_tag is None: raise ValueError('No MD tag found or given for sequence') ref_seq = '' last_md_pos = 0 for mo in re.finditer(r'(?P<matches>\d+)|(?P<del>\^\w+?(?=\d))|(?P<sub>\w)', md_tag): mo_group_dict = mo.groupdict() if mo_group_dict['matches'] is not None: matches = int(mo_group_dict['matches']) ref_seq += seq[last_md_pos:last_md_pos + matches] last_md_pos += matches elif mo_group_dict['del'] is not None: deletion = mo_group_dict['del'] ref_seq += deletion[1:] elif mo_group_dict['sub'] is not None: substitution = mo_group_dict['sub'] ref_seq += substitution last_md_pos += 1 else: pass return ref_seq
f8591d0084f6c10c9bbd1a39b3f9e13cfe952e68
24,492
def get_cs_token(accesskey="",secretkey="",identity_url="",tenant_id=""): """ Pass our accesskey and secretkey to keystone for tokenization. """ identity_request_json = json.dumps({ 'auth' : { 'apiAccessKeyCredentials' : { 'accessKey' : accesskey, 'secretKey' : secretkey }, "tenantId": tenant_id } }) identity_req = urllib2.Request(identity_url+"/tokens", identity_request_json, {'Content-type':'application/json'}) try: response = urllib2.urlopen(identity_req).read() except urllib2.HTTPError, e: log("HTTP Error: "+str(e)) return False response_json = json.loads(response) if response_json['access']['token']['tenant']['id'] == tenant_id: return response_json['access']['token']['id'] return False
a14324651039687bb52e47f4068fcee74c34aa65
24,494
def get_auto_scaling_group(asg, asg_name: str): """Get boto3 Auto Scaling Group by name or raise exception""" result = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) groups = result["AutoScalingGroups"] if not groups: raise Exception("Auto Scaling Group {} not found".format(asg_name)) return groups[0]
07176e538cdb265ae86b16a5d36bf1b274f45c19
24,495
def guiraud_r(txt_len: int, vocab_size: int) -> np.float64: """ The TTR formula underwent simple corrections: RTTR (root type-token ratio), Guiraud, 1960. """ return vocab_size / np.sqrt(txt_len)
9c054d6d741fabb64ec0659b280474385b5cfa79
24,496
def serialize_dagster_namedtuple(nt: tuple, **json_kwargs) -> str: """Serialize a whitelisted named tuple to a json encoded string""" check.tuple_param(nt, "nt") return _serialize_dagster_namedtuple(nt, whitelist_map=_WHITELIST_MAP, **json_kwargs)
fbe6606d0001d425593c0f4f880a6b314f69b94b
24,497
def join_epiweek(year, week): """ return an epiweek from the (year, week) pair """ return year * 100 + week
fdbc50f8a953ef7307e9558019b3c2b50bc65be4
24,498
def get_or_create_api_key(datastore: data_store.DataStore, project_id: str) -> str: """Return API key of existing project or create a new project and API key. If the project exists, return its API key, otherwise create a new project with the provided project ID and return its API key. Args: datastore: The datastore used for reading / writing the project. project_id: The ID of the project to get or write. Returns: The API key associated with the project. """ try: return datastore.read_by_proto_ids(project_id=project_id).api_key except data_store.NotFoundError: # Project not found, create it. api_key = unique_id.generate_base64_id() project = data_store_pb2.Project( project_id=project_id, name=project_id, api_key=api_key) datastore.write(project) return api_key
2cb5b04dcf44b0e39d171683a0bd184d582eaf34
24,499
def map_cosh(process): """ """ return map_default(process, 'cosh', 'apply')
fe853e23f8008bc5e767ef5af8b4efc6a04de407
24,500
def cleanline(line): """去除讀入資料中的換行符與 ',' 結尾 """ line = line.strip('\n') line = line.strip(',') return line
a4149663e2c3966c5d9be22f4aa009109e4a67ca
24,502
from onnx.helper import make_node import logging def convert_contrib_box_nms(node, **kwargs): """Map MXNet's _contrib_box_nms operator to ONNX """ name, input_nodes, attrs = get_inputs(node, kwargs) input_dtypes = get_input_dtypes(node, kwargs) dtype = input_dtypes[0] #dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype] opset_version = kwargs['opset_version'] if opset_version < 11: raise AttributeError('ONNX opset 11 or greater is required to export this operator') overlap_thresh = float(attrs.get('overlap_thresh', '0.5')) valid_thresh = float(attrs.get('valid_thresh', '0')) topk = int(attrs.get('topk', '-1')) coord_start = int(attrs.get('coord_start', '2')) score_index = int(attrs.get('score_index', '1')) id_index = int(attrs.get('id_index', '-1')) force_suppress = attrs.get('force_suppress', 'True') background_id = int(attrs.get('background_id', '-1')) in_format = attrs.get('in_format', 'corner') out_format = attrs.get('out_format', 'corner') center_point_box = 0 if in_format == 'corner' else 1 if topk == -1: topk = 2**31-1 if in_format != out_format: raise NotImplementedError('box_nms does not currently support in_fomat != out_format') if background_id != -1: raise NotImplementedError('box_nms does not currently support background_id != -1') if id_index != -1 or force_suppress == 'False': logging.warning('box_nms: id_idex != -1 or/and force_suppress == False detected. ' 'However, due to ONNX limitations, boxes of different categories will NOT ' 'be exempted from suppression. This might lead to different behavior than ' 'native MXNet') create_tensor([coord_start], name+'_cs', kwargs['initializer']) create_tensor([coord_start+4], name+'_cs_p4', kwargs['initializer']) create_tensor([score_index], name+'_si', kwargs['initializer']) create_tensor([score_index+1], name+'_si_p1', kwargs['initializer']) create_tensor([topk], name+'_topk', kwargs['initializer']) create_tensor([overlap_thresh], name+'_ot', kwargs['initializer'], dtype=np.float32) create_tensor([valid_thresh], name+'_vt', kwargs['initializer'], dtype=np.float32) create_tensor([-1], name+'_m1', kwargs['initializer']) create_tensor([-1], name+'_m1_f', kwargs['initializer'], dtype=dtype) create_tensor([0], name+'_0', kwargs['initializer']) create_tensor([1], name+'_1', kwargs['initializer']) create_tensor([2], name+'_2', kwargs['initializer']) create_tensor([3], name+'_3', kwargs['initializer']) create_tensor([0, 1, -1], name+'_scores_shape', kwargs['initializer']) create_tensor([0, 0, 1, 0], name+'_pad', kwargs['initializer']) create_tensor([0, -1], name+'_bat_spat_helper', kwargs['initializer']) create_const_scalar_node(name+"_0_s", np.int64(0), kwargs) create_const_scalar_node(name+"_1_s", np.int64(1), kwargs) nodes = [ make_node('Shape', [input_nodes[0]], [name+'_shape']), make_node('Shape', [name+'_shape'], [name+'_dim']), make_node('Sub', [name+'_dim', name+'_2'], [name+'_dim_m2']), make_node('Slice', [name+'_shape', name+'_dim_m2', name+'_dim'], [name+'_shape_last2']), make_node('Concat', [name+'_m1', name+'_shape_last2'], [name+'_shape_3d'], axis=0), make_node('Reshape', [input_nodes[0], name+'_shape_3d'], [name+'_data_3d']), make_node('Slice', [name+'_data_3d', name+'_cs', name+'_cs_p4', name+'_m1'], [name+'_boxes']), make_node('Slice', [name+'_data_3d', name+'_si', name+'_si_p1', name+'_m1'], [name+'_scores_raw']), make_node('Reshape', [name+'_scores_raw', name+'_scores_shape'], [name+'_scores']), make_node('Shape', [name+'_scores'], [name+'_scores_shape_actual']), make_node('NonMaxSuppression', [name+'_boxes', name+'_scores', name+'_topk', name+'_ot', name+'_vt'], [name+'_nms'], center_point_box=center_point_box), make_node('Slice', [name+'_nms', name+'_0', name+'_3', name+'_m1', name+'_2'], [name+'_nms_sliced']), make_node('GatherND', [name+'_data_3d', name+'_nms_sliced'], [name+'_candidates']), make_node('Pad', [name+'_candidates', name+'_pad', name+'_m1_f'], [name+'_cand_padded']), make_node('Shape', [name+'_nms'], [name+'_nms_shape']), make_node('Slice', [name+'_nms_shape', name+'_0', name+'_1'], [name+'_cand_cnt']), make_node('Squeeze', [name+'_cand_cnt'], [name+'_cc_s'], axes=[0]), make_node('Range', [name+'_0_s', name+'_cc_s', name+'_1_s'], [name+'_cand_indices']), make_node('Slice', [name+'_scores_shape_actual', name+'_0', name+'_3', name+'_m1', name+'_2'], [name+'_shape_bat_spat']), make_node('Slice', [name+'_shape_bat_spat', name+'_1', name+'_2'], [name+'_spat_dim']), make_node('Expand', [name+'_cand_cnt', name+'_shape_bat_spat'], [name+'_base_indices']), make_node('ScatterND', [name+'_base_indices', name+'_nms_sliced', name+'_cand_indices'], [name+'_indices']), make_node('TopK', [name+'_indices', name+'_spat_dim'], [name+'_indices_sorted', name+'__'], largest=0, axis=-1, sorted=1), make_node('Gather', [name+'_cand_padded', name+'_indices_sorted'], [name+'_gather']), make_node('Reshape', [name+'_gather', name+'_shape'], [name+'0']) ] return nodes
22bc975bc35ebe8e50f4749f981859460f695596
24,503
def fill76(text): """Any text. Wraps the text to fit in 76 columns.""" return fill(text, 76)
953ed87d8cfbee7a10c752082783469e866e8540
24,504
def current_object(cursor_offset, line): """If in attribute completion, the object on which attribute should be looked up.""" match = current_word(cursor_offset, line) if match is None: return None start, end, word = match matches = current_object_re.finditer(word) s = "" for m in matches: if m.end(1) + start < cursor_offset: if s: s += "." s += m.group(1) if not s: return None return LinePart(start, start + len(s), s)
cba608811a2081b382a2c522bb9d0651569739dd
24,505
def _is_match(option, useful_options, find_perfect_match): """ returns True if 'option' is between the useful_options """ for useful_option in useful_options: if len(option) == sum([1 for o in option if o in useful_option]): if not find_perfect_match or len(set(useful_option)) == len(set(option)): return True return False
bff60e1320744c16747926071afb3ee02022c55c
24,507
def pass_aligned_filtering(left_read, right_read, counter): """ Test if the two reads pass the additional filters such as check for soft-clipped end next to the variant region, or overlapping region between the two reads. :param left_read: the left (or 5') most read :param right_read: the right (or 3') most read :param counter: Counter to report the number of reads filtered. :return: True or False """ # in CIGAR tuples the operation is coded as an integer # https://pysam.readthedocs.io/en/latest/api.html#pysam.AlignedSegment.cigartuples if left_read.cigartuples[-1][0] == pysam.CSOFT_CLIP or right_read.cigartuples[0][0] == pysam.CSOFT_CLIP: counter['Soft-clipped alignments'] += 1 elif left_read.reference_end > right_read.reference_start: counter['Overlapping alignment'] += 1 elif left_read.is_reverse != right_read.is_reverse: counter['Unexpected orientation'] += 1 else: return True return False
78849f12541510216407b7b40fb29a0befc920d7
24,508
def detect_slow_oscillation(data: Dataset, algo: str = 'AASM/Massimini2004', start_offset: float = None) -> pd.DataFrame: """ Detect slow waves (slow oscillations) locations in an edf file for each channel :param edf_filepath: path of edf file to load. Will maybe work with other filetypes. untested. :param algo: which algorithm to use to detect spindles. See wonambi methods: https://wonambi-python.github.io/gui/methods.html :param chans_to_consider: which channels to detect spindles on, must match edf channel names :param bad_segments: :param start_offset: offset between first epoch and edf - onset is measured from this :return: returns dataframe of spindle locations, with columns for chan, start, duration and other spindle properties, sorted by onset """ detection = DetectSlowWave(algo) sos_detected = detection(data) sos_df = pd.DataFrame(sos_detected.events, dtype=float) col_map = {'start': 'onset', 'end': None, 'trough_time': 'trough_time', 'zero_time': 'zero_time', 'peak_time': 'peak_time', 'trough_val': 'trough_uV', 'peak_val': 'peak_uV', 'dur': 'duration', 'ptp': None, 'chan': 'chan'} cols_to_keep = set(sos_df.columns) - set([k for k, v in col_map.items() if v is None]) sos_df = sos_df.loc[:, cols_to_keep] sos_df.columns = [col_map[k] for k in sos_df.columns] if sos_df.shape[0] == 0: return None #empty df sos_df['peak_time'] = sos_df['peak_time'] - sos_df['onset'] sos_df['trough_time'] = sos_df['trough_time'] - sos_df['onset'] sos_df['zero_time'] = sos_df['zero_time'] - sos_df['onset'] sos_df['description'] = 'slow_osc' if start_offset is not None: sos_df['onset'] = sos_df['onset'] - start_offset sos_df = sos_df.loc[sos_df['onset']>=0,:] return sos_df.sort_values('onset')
a241196b56b6fb426fc9949ee82fca40c0c854f2
24,510
def _map_channels_to_measurement_lists(snirf): """Returns a map of measurementList index to measurementList group name.""" prefix = "measurementList" data_keys = snirf["nirs"]["data1"].keys() mls = [k for k in data_keys if k.startswith(prefix)] def _extract_channel_id(ml): return int(ml[len(prefix) :]) return {_extract_channel_id(ml): ml for ml in mls}
d6d83c01baec5f345d58fff8a0d0107a40b8db37
24,511
def is_not_applicable_for_questionnaire( value: QuestionGroup, responses: QuestionnaireResponses ) -> bool: """Returns true if the given group's questions are not answerable for the given responses. That is, for all the questions in the given question group, only not applicable answers have been provided for the provided questionnaire response. """ return value.is_not_applicable_for_responses(responses)
a534ca5560193c81e18f4028bd032b4a8e5adf8a
24,512
def _chebnodes(a,b,n): """Chebyshev nodes of rank n on interal [a,b].""" if not a < b: raise ValueError('Lower bound must be less than upper bound.') return np.array([1/2*((a+b)+(b-a)*np.cos((2*k-1)*np.pi/(2*n))) for k in range(1,n+1)])
4378468aac0642f15b64dcdee75dcb970aab11f7
24,513
def Rx_matrix(theta): """Rotation matrix around the X axis""" return np.array([ [1, 0, 0], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)] ])
c7b689b9e6042aa84689003e2de6ffff2229eb69
24,515
def spawn_actor(world: carla.World, blueprint: carla.ActorBlueprint, spawn_point: carla.Transform, attach_to: carla.Actor = None, attachment_type=carla.AttachmentType.Rigid) -> carla.Actor: """Tries to spawn an actor in a CARLA simulator. :param world: a carla.World instance. :param blueprint: specifies which actor has to be spawned. :param spawn_point: where to spawn the actor. A transform specifies the location and rotation. :param attach_to: whether the spawned actor has to be attached (linked) to another one. :param attachment_type: the kind of the attachment. Can be 'Rigid' or 'SpringArm'. :return: a carla.Actor instance. """ actor = world.try_spawn_actor(blueprint, spawn_point, attach_to, attachment_type) if actor is None: raise ValueError(f'Cannot spawn actor. Try changing the spawn_point ({spawn_point.location}) to something else.') return actor
83d29b21e76f52f1928009e22cee6a635ef4d025
24,516
def partition(lst, size): """Partition list @lst into eveni-sized lists of size @size.""" return [lst[i::size] for i in range(size)]
af7071a5aac36a51f449f153df145d9218808a4a
24,517
def form_errors_json(form=None): """It prints form errors as JSON.""" if form: return mark_safe(dict(form.errors.items())) # noqa: S703, S308 return {}
d9748d5ce4578855775af24d1a758030ad3fa432
24,518
def get_semantic_ocs_version_from_config(): """ Returning OCS semantic version from config. Returns: semantic_version.base.Version: Object of semantic version for OCS. """ return get_semantic_version(config.ENV_DATA["ocs_version"], True)
346aa6aacff9a758cf06b4a3dc4977e98e9ca501
24,519
from typing import List def get_non_ntile_cols(frame: pd.DataFrame) -> List[str]: """ :param frame: data frame to get columns of :return: all columns in the frame that dont contain 'Ntile' """ return [col for col in frame.columns if 'Ntile' not in col]
93970b576381aa668ce75d77f03793380445d9e4
24,521
from typing import Any from typing import Optional from datetime import datetime def deserialize_date(value: Any) -> Optional[datetime.datetime]: """A flexible converter for str -> datetime.datetime""" if value is None: return None if isinstance(value, datetime.datetime): return value if isinstance(value, str): # datetime.datetime.fromisoformat(...) can't parse Notion's dates, # and, anyway, this is faster return ciso8601.parse_datetime(value) raise TypeError(f'Invalid type {type(value)} for date property')
15cdd07ad4bd5873d8ed01d3eb9ce3b4e780ca44
24,522
def intersect(x1, x2, y1, y2, a1, a2, b1, b2): """ Return True if (x1,x2,y1,y2) rectangles intersect. """ return overlap(x1, x2, a1, a2) & overlap(y1, y2, b1, b2)
1e9c530b1d5e085df073b8c32d874ef457e2246a
24,523
from typing import List def recording_to_chunks(fingerprints: np.ndarray, samples_per_chunk: int) -> List[np.ndarray]: """Breaks fingerprints of a recording into fixed-length chunks.""" chunks = [] for pos in range(0, len(fingerprints), samples_per_chunk): chunk = fingerprints[pos:pos + samples_per_chunk] # exclude partial chunks (at end) if chunk.shape[0] == samples_per_chunk: chunks.append(chunk) return chunks
eae1a3b882e545a8dc08f029ddb5113dcdf1bca4
24,525
def coset_enumeration_c(fp_grp, Y): """ >>> from sympy.combinatorics.free_group import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_c >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) >>> C = coset_enumeration_c(f, [x]) >>> C.table [[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]] """ # Initialize a coset table C for < X|R > C = CosetTable(fp_grp, Y) X = fp_grp.generators R = fp_grp.relators() A = C.A # replace all the elements by cyclic reductions R_cyc_red = [rel.identity_cyclic_reduction() for rel in R] R_c = list(chain.from_iterable((rel.cyclic_conjugates(), (rel**-1).cyclic_conjugates()) \ for rel in R_cyc_red)) R_set = set() for conjugate in R_c: R_set = R_set.union(conjugate) # a list of subsets of R_c whose words start with "x". R_c_list = [] for x in C.A: r = set([word for word in R_set if word[0] == x]) R_c_list.append(r) R_set.difference_update(r) for w in Y: C.scan_and_fill_f(0, w) for x in A: C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]]) i = 0 while i < len(C.omega): alpha = C.omega[i] i += 1 for x in C.A: if C.table[alpha][C.A_dict[x]] is None: C.define_f(alpha, x) C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]]) return C
0efeacfeeb2b20275378c58a3aacaed07ade57be
24,526
def slr_pulse( num=N, time_bw=TBW, ptype=PULSE_TYPE, ftype=FILTER_TYPE, d_1=PBR, d_2=SBR, root_flip=ROOT_FLIP, multi_band = MULTI_BAND, n_bands = N_BANDS, phs_type = PHS_TYPE, band_sep = BAND_SEP ): """Use Shinnar-Le Roux algorithm to generate pulse""" if root_flip is False: complex_pulse = rf.dzrf(n=num, tb=time_bw, ptype=ptype, ftype=ftype, d1=d_1, d2=d_2) amp_arr = complex_pulse else: amp_arr, b_rootflip = slr_rootflip(ROOT_FLIP_ANGLE) phs_arr = np.zeros(num) for idx in range(num): if amp_arr[idx] < 0: phs_arr[idx] = 180 else: phs_arr[idx] = 0 if multi_band is True: amp_arr = rf.multiband.mb_rf(amp_arr, n_bands, band_sep, phs_type) # prepare pulse for instrument, which takes absolute only # cast negative values to positive amp_arr_abs = np.abs(amp_arr) # shift amplitude such that the lowest value is 0 amp_arr_abs = amp_arr_abs - amp_arr_abs.min() # fold back phase when it exceeds 360 phs_arr = phs_arr % 360 freq_arr = (np.diff(phs_arr)/num)/360 return amp_arr, freq_arr, phs_arr, amp_arr_abs
0986b6ea8adffd90c108308365ebf3172a6459d0
24,527
def policy_options(state, Q_omega, epsilon=0.1): """ Epsilon-greedy policy used to select options """ if np.random.uniform() < epsilon: return np.random.choice(range(Q_omega.shape[1])) else: return np.argmax(Q_omega[state])
66e36b81fdec06822ebb958611deca23bd64191b
24,528
import tempfile import time def test_ps_s3_creation_triggers_on_master(): """ test object creation s3 notifications in using put/copy/post on master""" if skip_push_tests: return SkipTest("PubSub push tests don't run in teuthology") hostname = get_ip() proc = init_rabbitmq() if proc is None: return SkipTest('end2end amqp tests require rabbitmq-server installed') zones, _ = init_env(require_ps=False) realm = get_realm() zonegroup = realm.master_zonegroup() # create bucket bucket_name = gen_bucket_name() bucket = zones[0].create_bucket(bucket_name) topic_name = bucket_name + TOPIC_SUFFIX # start amqp receiver exchange = 'ex1' task, receiver = create_amqp_receiver_thread(exchange, topic_name) task.start() # create s3 topic endpoint_address = 'amqp://' + hostname endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker' topic_conf = PSTopicS3(zones[0].conn, topic_name, zonegroup.name, endpoint_args=endpoint_args) topic_arn = topic_conf.set_config() # create s3 notification notification_name = bucket_name + NOTIFICATION_SUFFIX topic_conf_list = [{'Id': notification_name,'TopicArn': topic_arn, 'Events': ['s3:ObjectCreated:Put', 's3:ObjectCreated:Copy'] }] s3_notification_conf = PSNotificationS3(zones[0].conn, bucket_name, topic_conf_list) response, status = s3_notification_conf.set_config() assert_equal(status/100, 2) # create objects in the bucket using PUT key = bucket.new_key('put') key.set_contents_from_string('bar') # create objects in the bucket using COPY bucket.copy_key('copy', bucket.name, key.name) # create objects in the bucket using multi-part upload fp = tempfile.TemporaryFile(mode='w') fp.write('bar') fp.close() uploader = bucket.initiate_multipart_upload('multipart') fp = tempfile.NamedTemporaryFile(mode='r') uploader.upload_part_from_file(fp, 1) uploader.complete_upload() fp.close() print('wait for 5sec for the messages...') time.sleep(5) # check amqp receiver keys = list(bucket.list()) receiver.verify_s3_events(keys, exact_match=True) # cleanup stop_amqp_receiver(receiver, task) s3_notification_conf.del_config() topic_conf.del_config() for key in bucket.list(): key.delete() # delete the bucket zones[0].delete_bucket(bucket_name) clean_rabbitmq(proc)
bb0770cd80968d8878f0a3c379f5ce2da9863c8f
24,529
import math def weights_init(init_type='gaussian'): """ from https://github.com/naoto0804/pytorch-inpainting-with-partial-conv/blob/master/net.py """ def init_fun(m): classname = m.__class__.__name__ if (classname.find('Conv') == 0 or classname.find( 'Linear') == 0) and hasattr(m, 'weight'): if init_type == 'gaussian': nn.init.normal_(m.weight, 0.0, 0.02) elif init_type == 'xavier': nn.init.xavier_normal_(m.weight, gain=math.sqrt(2)) elif init_type == 'kaiming': nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') elif init_type == 'orthogonal': nn.init.orthogonal_(m.weight, gain=math.sqrt(2)) elif init_type == 'default': pass else: assert 0, "Unsupported initialization: {}".format(init_type) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.0) return init_fun
d65dee3744daf59a2db832b5c4866bee2131b4d6
24,530
def title(default=None, level="header"): """ A decorator that add an optional title argument to component. """ def decorator(fn): loc = get_argument_default(fn, "where", None) or st @wraps(fn) def wrapped( *args, title=default, level=level, header=None, subheader=None, where=loc, **kwargs, ): if header: where.header(str(header)) elif subheader: where.subheader(str(subheader)) elif title: if level == "header": where.header(str(title)) elif level == "subheader": where.subheader(str(title)) elif level == "bold": where.markdown(f"**{title}**") else: raise ValueError(f"invalid title level: {level!r}") kwargs["where"] = where return fn(*args, **kwargs) return wrapped return decorator
c11a3ee7ccff5e6934fba857d438743464dd653e
24,531
def _rect_to_css(rect): """ Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order :param rect: a dlib 'rect' object :return: a plain tuple representation of the rect in (top, right, bottom, left) order """ return rect.top(), rect.right(), rect.bottom(), rect.left()
e3439cc0eb30186b8fc905f518ff21883175b3e2
24,533
def client(): """Client Fixture.""" client_obj = Client(base_url=BASE_URL) return client_obj
bac2ccd038eb587b4dd67ce0cc63bef63af9c365
24,534
def encode_one_hot(s): """One-hot encode all characters of the given string. """ all = [] for c in s: x = np.zeros((INPUT_VOCAB_SIZE)) index = char_indices[c] x[index] = 1 all.append(x) return all
e4bc2b02cea4dbf74346cbd672cb58246abe4edc
24,535
from datetime import datetime def date_to_datetime(date, time_choice='min'): """ Convert date to datetime. :param date: date to convert :param time_choice: max or min :return: datetime """ choice = getattr(datetime.datetime, 'min' if time_choice == 'min' else 'max').time() return timezone.make_aware( datetime.datetime.combine(date, choice), timezone.get_current_timezone(), )
9e429bf71288ffc3bd56b682f2e24fceb0ff49d4
24,536
def standardize_cell(atoms, cell_type): """ Standardize the cell of the atomic structure. Parameters: atoms: `ase.Atoms` Atomic structure. cell_type: { 'standard', 'standard_no_symmetries', 'primitive', None} Starting from the input cell, creates a standard cell according to same standards before the supercell generation. \n `cell_type` = 'standard' creates a standard conventional cell. See :py:mod:`ai4materials.utils.utils_crystals.get_conventional_std_cell`. \n `cell_type` = 'standard_no_symmetries' creates a standard conventional cell without using symmetries. See :py:mod:`ai4materials.utils.utils_crystals.get_conventional_std_cell_no_sym`. \n `cell_type` = 'primitive' creates a standard primitive cell. See :py:mod:`ai4materials.utils.utils_crystals.get_primitive_std_cell`. \n `cell_type` = `None` does not creates any cell. It simply uses the unit cell as input for the supercell generation. Returns: `ase.Atoms` Atomic structure in the standard cell of the selected type. .. codeauthor:: Angelo Ziletti <[email protected]> """ if cell_type == 'standard': atoms = get_conventional_std_cell(atoms) elif cell_type == 'standard_no_symmetries': atoms = get_conventional_std_cell_no_sym(atoms) elif cell_type == 'primitive': atoms = get_primitive_std_cell(atoms) elif cell_type is None: pass else: raise ValueError("Unrecognized cell_type value.") return atoms
4005cf7afd6f4992f3cc271608f0b8c84649d6b1
24,537
def get_biggan_stats(): """ precomputed biggan statistics """ center_of_mass = [137 / 255., 127 / 255.] object_size = [213 / 255., 210 / 255.] return center_of_mass, object_size
6576e13b7a68369e90b2003171d946453bafd212
24,538
def get_input_var_value(soup, var_id): """Get the value from text input variables. Use when you see this HTML format: <input id="wired_config_var" ... value="value"> Args: soup (soup): soup pagetext that will be searched. var_id (string): The id of a var, used to find its value. Returns: (string): The value of the variable """ try: var_value = soup.find('input', {'id': var_id}).get('value') return var_value except AttributeError: print('\nERROR: <' + var_id + '> not found!\nPagesoup:\n\n', soup) raise LookupError
5a9dd65a285c62e0e5e79584858634cb7b0ece75
24,539
from typing import List from typing import Any import logging def get_top(metric: str, limit: int) -> List[List[Any]]: """Get top stocks based on metric from sentimentinvestor [Source: sentimentinvestor] Parameters ---------- metric : str Metric to get top tickers for limit : int Number of tickes to get Returns ------- List[List[Any]] List of tickers and scores """ data = sentipy.sort(metric, limit) table: List[List[Any]] = [] for index, stock in enumerate(data): if not hasattr(stock, "symbol") or not hasattr(stock, metric): logging.warning("data for stock %s is incomplete, ignoring", index + 1) table.append([]) else: table.append([index + 1, stock.symbol, stock.__getattribute__(metric)]) return table
c203fcbe24ccf3d0c2253961d36ec7b556c8651c
24,541
def test_add_single_entities( reference_data: np.ndarray, upper_bound: np.ndarray, lower_bound: np.ndarray, ishan: Entity, ) -> None: """Test the addition of SEPTs""" tensor1 = SEPT( child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound ) tensor2 = SEPT( child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound ) result = tensor2 + tensor1 assert isinstance(result, SEPT), "Addition of two SEPTs is wrong type" assert ( result.max_vals == 2 * upper_bound ).all(), "Addition of two SEPTs results in incorrect max_val" assert ( result.min_vals == 2 * lower_bound ).all(), "Addition of two SEPTs results in incorrect min_val" # Try with negative values tensor3 = SEPT( child=reference_data * -1.5, entity=ishan, max_vals=upper_bound, min_vals=lower_bound, ) result = tensor3 + tensor1 assert isinstance(result, SEPT), "Addition of two SEPTs is wrong type" assert ( result.max_vals == tensor3.max_vals + tensor1.max_vals ).all(), "SEPT + SEPT results in incorrect max_val" assert ( result.min_vals == tensor3.min_vals + tensor1.min_vals ).all(), "SEPT + SEPT results in incorrect min_val" return None
48531867a74d7267ae65d4350e82d26cae8bef44
24,542
def prob_get_expected_after_certain_turn(turns_later: int, turns_remain: int, tiles_expect: int) -> float: """The probability of get expected tile after `turns_later` set of turns. :param turns_later: Get the expected tile after `turns_after` set of turns :param turns_remain: The remaining turns :param tiles_expect: The number of expected tiles :return: Probability """ tiles_remain = 4 * turns_remain + 14 if tiles_expect > turns_later: greater = tiles_remain - turns_later less = tiles_remain - tiles_expect else: greater = tiles_remain - tiles_expect less = tiles_remain - turns_later numerator, denominator = 1, 1 i, j = less, greater while i > tiles_remain - turns_later - tiles_expect: numerator = numerator * i i = i - 1 while j > greater: denominator = denominator * j j = j - 1 return numerator / denominator
6575c22302b73b58b2bd9aad5068ffe723fb5fe3
24,543
def get_gpcr_calpha_distances(pdb, xtc, gpcr_name, res_dbnum, first_frame=0, last_frame=-1, step=1): """ Load distances between all selected atoms. Parameters ---------- pdb : str File name for the reference file (PDB or GRO format). xtc : str File name for the trajectory (xtc format). gpcr_name : str Name of the GPCR as in the GPCRdb. res_dbnum : list Relative GPCR residue numbers. first_frame : int, default=0 First frame to return of the features. Zero-based. last_frame : int, default=-1 Last frame to return of the features. Zero-based. step : int, default=1 Subsampling step width when reading the frames. Returns ------- feature_names : list of str Names of all C-alpha distances. feature_labels : list of str Labels containing GPCRdb numbering of the residues. features_data : numpy array Data for all C-alpha distances [Å]. """ # Select residues from relative residue numbers resnums, reslabels = select_gpcr_residues(gpcr_name, res_dbnum) # Create the selection string selection = 'name CA and resid' for rn in resnums: selection += ' %i'%rn # Create the GPCRdb distance labels distlabels = [] k = -1 for i in range(len(reslabels)): for j in range(i + 1, len(reslabels)): k += 1 _dl = 'CA DIST: %s - %s'%(reslabels[i], reslabels[j]) distlabels.append(_dl) # Calculate the distances and get the sequential names names, data = get_atom_self_distances(pdb, xtc, selection=selection, first_frame=first_frame, last_frame=last_frame, step=step) return names, distlabels, data
3465246d610510f2976813fcc69c394e98452292
24,544
def main(yumrepomap=None, **kwargs): """ Checks the distribution version and installs yum repo definition files that are specific to that distribution. :param yumrepomap: list of dicts, each dict contains two or three keys. 'url': the url to the yum repo definition file 'dist': the linux distribution to which the repo should be installed. one of 'amazon', 'redhat', 'centos', or 'all'. 'all' is a special keyword that maps to all distributions. 'epel_version': optional. match the major version of the epel-release that applies to the system. one of '6' or '7'. if not specified, the repo is installed to all systems. Example: [ { 'url' : 'url/to/the/yum/repo/definition.repo', 'dist' : 'amazon' or 'redhat' or 'centos' or 'all', 'epel_version' : '6' or '7', }, ] """ scriptname = __file__ print('+' * 80) print('Entering script -- {0}'.format(scriptname)) print('Printing parameters...') print(' yumrepomap = {0}'.format(yumrepomap)) if not yumrepomap: print('`yumrepomap` is empty. Nothing to do!') return None if not isinstance(yumrepomap, list): raise SystemError('`yumrepomap` must be a list!') # Read first line from /etc/system-release release = None try: with open(name='/etc/system-release', mode='rb') as f: release = f.readline().strip() except Exception as exc: raise SystemError('Could not read /etc/system-release. ' 'Error: {0}'.format(exc)) # Search the release file for a match against _supported_dists m = _match_supported_dist.search(release.lower()) if m is None: # Release not supported, exit with error raise SystemError('Unsupported OS distribution. OS must be one of: ' '{0}.'.format(', '.join(_supported_dists))) # Assign dist,version from the match groups tuple, removing any spaces dist,version = (x.translate(None, ' ') for x in m.groups()) # Determine epel_version epel_version = None if 'amazon' == dist: epel_version = _amazon_epel_versions.get(version, None) else: epel_version = version.split('.')[0] if epel_version is None: raise SystemError('Unsupported OS version! dist = {0}, version = {1}.' .format(dist, version)) for repo in yumrepomap: # Test whether this repo should be installed to this system if repo['dist'] in [dist, 'all'] and repo.get('epel_version', 'all') \ in [epel_version, 'all']: # Download the yum repo definition to /etc/yum.repos.d/ url = repo['url'] repofile = '/etc/yum.repos.d/{0}'.format(url.split('/')[-1]) download_file(url, repofile) print('{0} complete!'.format(scriptname)) print('-' * 80)
1caed81f53cd0dc2e1963aa1b53bc48c1ef71dd3
24,545
def zero_pad1d(inputs, padding=0): """Zero padding for 1d tensor Args: ----------------------------- inputs : tvm.te.tensor.Tensor shape [batch, channel, length] padding: (optional:0) int or tuple ----------------------------- Returns: ----------------------------- tvm.te.tensor.Tensor shape [batch, channel, padded_length] ----------------------------- """ padding = (padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding assert_print(isinstance(padding, tuple), "type(padding)={}".format(type(padding))) assert_print(len(padding) == 2) padding_zero = tvm.tir.expr.const(0, inputs.dtype) batch_size, in_channel, in_len = inputs.shape return tvm.te.compute( (batch_size, in_channel, in_len + padding[0] + padding[1]), lambda b, c, l: tvm.te.if_then_else( tvm.te.all(l >= padding[0], l < in_len + padding[0]), inputs[b, c, l - padding[0]], padding_zero ) )
8135ffd8447d5fbc84988953a2bfca14b51d3f83
24,546
import torch import math def gelu(x): """gelu activation function copied from pytorch-pretrained-BERT.""" return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
35c0f45f904b2381acc95f5a2b4f28cec9fa924b
24,547
import requests def stock_fund_stock_holder(stock: str = "600004") -> pd.DataFrame: """ 新浪财经-股本股东-基金持股 https://vip.stock.finance.sina.com.cn/corp/go.php/vCI_FundStockHolder/stockid/600004.phtml :param stock: 股票代码 :type stock: str :return: 新浪财经-股本股东-基金持股 :rtype: pandas.DataFrame """ url = f"https://vip.stock.finance.sina.com.cn/corp/go.php/vCI_StockStructure/stockid/{stock}.phtml" r = requests.get(url) temp_df = pd.read_html(r.text)[13].iloc[:, :5] temp_df.columns = [*range(5)] big_df = pd.DataFrame() need_range = temp_df[temp_df.iloc[:, 0].str.find("截止日期") == 0].index.tolist() + [len(temp_df)] for i in range(len(need_range)-1): truncated_df = temp_df.iloc[need_range[i]: need_range[i + 1], :] truncated_df = truncated_df.dropna(how="all") temp_truncated = truncated_df.iloc[2:, :] temp_truncated.reset_index(inplace=True, drop=True) concat_df = pd.concat([temp_truncated, truncated_df.iloc[0, 1:]], axis=1) concat_df.columns = truncated_df.iloc[1, :].tolist() + ["截止日期"] concat_df["截止日期"] = concat_df["截止日期"].fillna(method="ffill") concat_df["截止日期"] = concat_df["截止日期"].fillna(method="bfill") big_df = pd.concat([big_df, concat_df], axis=0, ignore_index=True) big_df.dropna(inplace=True) big_df.reset_index(inplace=True, drop=True) return big_df
acde3d06b9fabd9a22223401b6b9b947a1e248ff
24,548
def set_to_available(request, slug, version): """ Updates the video status. Sets the version already encoded to available. """ video = get_object_or_404(Video, slug=slug) status, created = VideoStatus.objects.get_or_create(video_slug=slug) if version == 'web': status.web_available = True elif version == 'cocreate': status.cocreate_available = True else: status.mobile_available = True status.is_encoding = False status.encode_duration = Decimal(str(status.encode_duration)) status.save() # If the video is part of a cocreate project, auto-compile the cocreate project. try: if video.section and video.section.cocreate: cocreate_obj = video.section.cocreate init_cocreate(cocreate_obj, generate_slug) except Section.DoesNotExist: pass return HttpResponse("OK")
ead832327d733b82b0d1bc38efd241baab039ed2
24,549
def generate_solve_c(): """Generate C source string for the recursive solve() function.""" piece_letters = 'filnptuvwxyz' stack = [] lines = [] add = lines.append add('#define X_PIECE_NUM {}'.format(piece_letters.index('x'))) add(""" void solve(char* board, int pos, unsigned int used) { if (used == (1 << NUM_PIECES) - 1) { display_solution(board); return; } while (board[pos]) { pos++; } """) indent = ' ' * 4 for c in ORIENTATIONS: if c == '.': indent = indent[:-4] add(indent + '}') stack.pop() elif c > 'a': # Found a piece that fits: if it's not yet used, place it and # solve rest of board recursively piece_num = piece_letters.index(c) add(indent + 'if ((used & (1<<{})) == 0) {{'.format(piece_num)) add(indent + ' _num_tries++;') add(indent + ' used ^= 1<<{};'.format(piece_num)) for offset in stack: add(indent + ' board[pos + {}] = {!r};'.format(offset, c)) add(indent + ' solve(board, pos, used);') for offset in stack: add(indent + ' board[pos + {}] = 0;'.format(offset)) add(indent + ' used ^= 1<<{};'.format(piece_num)) add(indent + '}') indent = indent[:-4] add(indent + '}') stack.pop() else: i = ord(c) - ord('A') + 3 x, y = i % 8, i // 8 offset = y * TOTAL_WIDTH + x - 3 add(indent + 'if (board[pos + {}] == 0) {{'.format(offset)) indent += ' ' * 4 stack.append(offset) add('}') return '\n'.join(lines)
dde70d4cdbeb8b691c1ffcb61ba524b2c1df9b2c
24,551
def get_permission_info(room): """ Fetches permissions about the room, like ban info etc. # Return Value dict of session_id to current permissions, a dict containing the name of the permission mapped to a boolean value. """ return jsonify({k: addExtraPermInfo(v) for k, v in room.permissions.items()})
aab7aa691e1e34e1bf20e3de744f8d4352a2421e
24,552
def ravel(m): """ravel(m) returns a 1d array corresponding to all the elements of it's argument. """ return reshape(m, (-1,))
728204f77737750783fef9818c102522f17c472e
24,553
def parse_index_file(filename): """Parse index file.""" index = [] for line in open(filename): # My additions print ("Printing this unstripped text:", line) index.append(int(line.strip())) return index
a76c4e94c593a234fd858d369f0133a5170ec8bf
24,554
import click import socket def init(): """Top level command handler.""" @click.command() @click.option('--port', type=int, help='Port to listen.', default=0) @click.option('--tun-dev', type=str, required=True, help='Device to use when establishing tunnels.') @click.option('--tun-addr', type=str, required=False, help='Local IP address to use when establishing tunnels.') @click.option('--tun-cidrs', type=cli.LIST, required=True, help='CIDRs block assigned to the tunnels.') @click.option('--policies-dir', type=str, required=True, help='Directory where to look for policies') @click.option('--state-dir', type=str, required=False, default='/var/run/warpgate', help='Directory where running state is kept') def warpgate_policy_server(port, tun_dev, tun_addr, tun_cidrs, policies_dir, state_dir): """Run warpgate policy server.""" myhostname = socket.getfqdn() policy_server.run_server( admin_address=myhostname, admin_port=port, tun_devname=tun_dev, tun_address=( tun_addr if tun_addr else socket.gethostbyname(myhostname) ), tun_cidrs=tun_cidrs, policies_dir=policies_dir, state_dir=state_dir ) return warpgate_policy_server
ba660e7f6698457951e766ce402857a6a5e4bc86
24,555
def check_collision(bird_rect:object, pipes:list, collide_sound:object): """ Checks for collision with the Pipe and the Base """ for pipe in pipes: if bird_rect.colliderect(pipe): collide_sound.play() return False if bird_rect.bottom >= gv.BASE_TOP: return False return True
080c8a6142397e3c1b91b0e3a4dfbd3ed7f1acde
24,556
def compute_ranking_scores(ranking_scores, global_ranks_to_save, rank_per_query): """ Compute ranking scores (MRR and MAP) and a bunch of interesting ranks to save to file from a list of ranks. Args: ranking_scores: Ranking scores previously computed global_ranks_to_save: Global interesting ranks to save to file rank_per_query: List of ranks computed by the model evaluation procedure Returns: ranking scores (in a dict) and a dict of global interesting ranks to save to file """ # compute binarized (0/1) relevance scores rs = [np.asarray([i == rank['ground_truth_label'] for i in rank['rank_labels']], dtype=np.dtype(int)) for rank in rank_per_query] # compute and log MRR and MAP scores ranking_scores['MRR'].append(mean_reciprocal_rank(rs)) ranking_scores['MAP'].append(mean_average_precision(rs)) # compute a bunch of indexes for interesting queries to save in csv files as examples max_rr, max_rr_idx = max_reciprocal_rank(rs) min_rr, min_rr_idx = min_reciprocal_rank(rs) max_ap, max_ap_idx = max_average_precision(rs) min_ap, min_ap_idx = min_average_precision(rs) # save indexes (and values) just computed to a dict queries_indexes = { 'max_rr': {'value': max_rr, 'index': max_rr_idx}, 'min_rr': {'value': min_rr, 'index': min_rr_idx}, 'max_ap': {'value': max_ap, 'index': max_ap_idx}, 'min_ap': {'value': min_ap, 'index': min_ap_idx} } # get interesting queries ranks_to_save = { key: { 'value': scores['value'], 'rank': rank_per_query[scores['index']] } for key, scores in queries_indexes.items() } # if the global ranks to save dict is none set it to the current ranks to save if global_ranks_to_save is None: global_ranks_to_save = ranks_to_save else: # otherwise select from the current ranks to save the ones that are more 'interesting' than those # already in the global ranks to save dict if ranks_to_save['max_rr']['value'] > global_ranks_to_save['max_rr']['value']: global_ranks_to_save['max_rr']['value'] = ranks_to_save['max_rr']['value'] global_ranks_to_save['max_rr']['rank'] = ranks_to_save['max_rr']['rank'] if ranks_to_save['min_rr']['value'] < global_ranks_to_save['min_rr']['value']: global_ranks_to_save['min_rr']['value'] = ranks_to_save['min_rr']['value'] global_ranks_to_save['min_rr']['rank'] = ranks_to_save['min_rr']['rank'] if ranks_to_save['max_ap']['value'] > global_ranks_to_save['max_ap']['value']: global_ranks_to_save['max_ap']['value'] = ranks_to_save['max_ap']['value'] global_ranks_to_save['max_ap']['rank'] = ranks_to_save['max_ap']['rank'] if ranks_to_save['min_ap']['value'] < global_ranks_to_save['min_ap']['value']: global_ranks_to_save['min_ap']['value'] = ranks_to_save['min_ap']['value'] global_ranks_to_save['min_ap']['rank'] = ranks_to_save['min_ap']['rank'] # return computed ranking scores and global ranks to save dict return ranking_scores, global_ranks_to_save
a25a664b67e35ff9b35327b364e84eaf9ae37aaa
24,557
def AirAbsorptionRelaxationFrequencies(T,p,H,T0, p_r): """ Calculates the relaxation frequencies for air absorption conforming to ISO 9613-1. Called by :any:`AirAbsorptionCoefficient`. Parameters ---------- T : float Temperature in K. p : float Pressure in Pa. H : float Humidity as molar conentration in percent. T0 : float Reference temperature in K, 293.15 K. p_r : float Reference sound pressure in Pa, 101.325*10³ Pa. Returns ------- f_rO : float Relaxation frequency of oxygen. f_rN : float Relaxation frequency of nitrogen. """ f_rO = p / p_r * (24 + 4.04 * 10**4 * H * (0.02+H) / (0.391+H)) f_rN = p / p_r * (T/T0)**(-0.5) * (9+280*H*np.exp(-4.17*((T/T0)**(-1/3)-1))) return f_rO, f_rN
c8c047ed4d9a7fc62b2cdb6d19f0d3c8b1b4c570
24,558
def table_from_bool(ind1, ind2): """ Given two boolean arrays, return the 2x2 contingency table ind1, ind2 : array-like Arrays of the same length """ return [ sum(ind1 & ind2), sum(ind1 & ~ind2), sum(~ind1 & ind2), sum(~ind1 & ~ind2), ]
497ce6ad1810386fedb6ada9ba87f0a5baa6318a
24,559
def preprocess_skills(month_kpi_skills: pd.DataFrame, quarter_kpi_skills: pd.DataFrame) -> pd.DataFrame: """ Функция принимает на вход два DataFrame: - с данными по KPI сотрудников ВЭД за последний месяц - с данными по KPI сотрудников ВЭД за последний квартал Возвращает объединенный DataFrame по двум таблицам с дополнительными признаками отношений выполненных работ к нормам сотрудников :param month_kpi_skills: pd.DataFrame :param quarter_kpi_skills: pd.DataFrame :return: pd.DataFrame """ month_kpi_skills.fillna(0, inplace=True) quarter_kpi_skills.fillna(0, inplace=True) # Переносим данные по месячным скилам в один дата-фрейм month_kpi_skills.columns = month_skills_columns quarter_kpi_skills.columns = quarter_skills_columns assert sorted(month_kpi_skills['ВЭД'].unique()) == sorted(quarter_kpi_skills['ВЭД'].unique()), 'В таблицах KPI за месяц из за квартал содержатся разные ВЭД' kpi_skills = month_kpi_skills.merge(quarter_kpi_skills, on='ВЭД', how='inner') # Считаем отношения между результатами за 3 мес и нормами kpi_skills['Звонки / Норма'] = kpi_skills['Звонки (3 мес)'] / kpi_skills['Звонки норма (3 мес)'] kpi_skills['Обработанные заявки / Норма'] = kpi_skills['Обработанные заявки (3 мес)'] / kpi_skills['Норма 88% (3 мес)'] kpi_skills['48 часов / Норма'] = kpi_skills['Обработка не позднее 48 часов (3 мес)'] / kpi_skills['Норма 85% (3 мес)'] kpi_skills['Полнота сбора / Норма'] = kpi_skills['Полнота сбора (3 мес)'] / kpi_skills['Норма 95% (3 мес)'] kpi_skills['Встречи / Норма'] = kpi_skills['Встречи (3 мес)'] / kpi_skills['Встречи норма (3 мес)'] kpi_skills.fillna(0.0, inplace=True) # Заполняем NaN там, где возникло деление на 0 kpi_skills.drop(['Звонки норма', 'Встречи норма', 'Звонки норма (3 мес)', 'Встречи норма (3 мес)'], axis=1, inplace=True) kpi_skills = kpi_skills.reindex(columns=skills_final_columns) return kpi_skills
6bcbc1b93c99acbef04bf0962678c35a3abd3faa
24,560
def bias_col_spline(im, overscan, dymin=5, dymax=2, statistic=np.mean, **kwargs): """Compute the offset by fitting a spline to the mean of each row in the serial overscan region. Args: im: A masked (lsst.afw.image.imageLib.MaskedImageF) or unmasked (lsst.afw.image.imageLib.ImageF) afw image. overscan: A bounding box for the parallel overscan region. dymin: The number of rows to skip at the beginning of the parallel overscan region. dymax: The number of rows to skip at the end of the parallel overscan region. statistic: The statistic to use to calculate the offset for each columns. Keyword Arguments: k: The degree of the spline fit. The default is: 3. s: The amount of smoothing to be applied to the fit. The default is: 18000. t: The number of knots. If None, finds the number of knots to use for a given smoothing factor, s. The default is: None. Returns: A tuple (t,c,k) containing the vector of knots, the B-spline coefficients, and the degree of the spline. """ try: imarr = im.Factory(im, overscan).getArray() except AttributeError: # Dealing with a MaskedImage imarr = im.Factory(im, overscan).getImage().getArray() ny, nx = imarr.shape cols = np.arange(nx) values = np.array([statistic(imarr[dymin:-dymax,j]) for j in cols]) rms = 7 # Expected read noise per pixel weights = np.ones(nx) * (rms / np.sqrt(nx)) return interpolate.splrep(cols, values, w=1/weights, k=kwargs.get('k', 3), s=kwargs.get('s', 18000), t=kwargs.get('t', None))
d157275dd8337b81c9f4c67efe1c033512f963d3
24,561
def read_config(): """ Returns the decoded config data in 'db_config.json' Will return the decoded config file if 'db_config.json' exists and is a valid JSON format. Otherwise, it will return a False. """ # Check if file exists if not os.path.isfile('db_config.json'): return False # Check if file is a valid JSON format. try: with open('db_config.json') as json_data: config = json.load(json_data) except ValueError: print '[WARN] Error Decoding config.json' return False return config
36b0ccdbd653b654663c7a3c6cf47cb3f68bc399
24,562
import pandas def get_sub_title_from_series(ser: pandas.Series, decimals: int = 3) -> str: """pandas.Seriesから、平均値、標準偏差、データ数が記載されたSubTitleを生成する。""" mean = round(ser.mean(), decimals) std = round(ser.std(), decimals) sub_title = f"μ={mean}, α={std}, N={len(ser)}" return sub_title
45c227e7ddd203872f015e4a95532c8acb80d54f
24,563
import numpy def atand2(delta_y: ArrayLike, delta_x: ArrayLike) -> ArrayLike: """Return the arctan2 of an angle specified in degrees. Returns ------- float An angle, in degrees. """ return numpy.degrees(numpy.arctan2(delta_y, delta_x))
14d825d9886a2a62e36748eb9660ee27e6ba6827
24,564
from typing import Union def adjust_doy_calendar( source: xr.DataArray, target: Union[xr.DataArray, xr.Dataset] ) -> xr.DataArray: """Interpolate from one set of dayofyear range to another calendar. Interpolate an array defined over a `dayofyear` range (say 1 to 360) to another `dayofyear` range (say 1 to 365). Parameters ---------- source : xr.DataArray Array with `dayofyear` coordinate. target : xr.DataArray or xr.Dataset Array with `time` coordinate. Returns ------- xr.DataArray Interpolated source array over coordinates spanning the target `dayofyear` range. """ doy_max_source = source.dayofyear.max() doy_max = max_doy[get_calendar(target)] if doy_max_source == doy_max: return source return _interpolate_doy_calendar(source, doy_max)
d55da217c6b6e3b2947e992611da4e1fdacf7f5f
24,565
def iou(box_a, box_b): """Calculates intersection area / union area for two bounding boxes.""" assert area(box_a) > 0 assert area(box_b) > 0 intersect = np.array( [[max(box_a[0][0], box_b[0][0]), max(box_a[0][1], box_b[0][1])], [min(box_a[1][0], box_b[1][0]), min(box_a[1][1], box_b[1][1])]]) return area(intersect) / (area(box_a) + area(box_b) - area(intersect))
9722673c7cc5b636d698453224cf3f06d1aa3678
24,566
def poll(): """ The send buffer is flushed and any outstanding CA background activity is processed. .. note:: same as pend_event(1e-12) """ status = libca.ca_pend_event(1e-12) return ECA(status)
96052229179a0188a3bb63a6e3ab35aa3d6cc5f7
24,567
def TopLevelWindow_GetDefaultSize(*args): """TopLevelWindow_GetDefaultSize() -> Size""" return _windows_.TopLevelWindow_GetDefaultSize(*args)
e9a04052461bf64b7b3e4962a7df052e1f63de4b
24,568
def human_size(numbytes): """converts a number of bytes into a readable string by humans""" KB = 1024 MB = 1024*KB GB = 1024*MB TB = 1024*GB if numbytes >= TB: amount = numbytes / TB unit = "TiB" elif numbytes >= GB: amount = numbytes / GB unit = "GiB" elif numbytes >= MB: amount = numbytes / MB unit = "MiB" elif numbytes >= KB: amount = numbytes / KB unit = "KiB" else: amount = numbytes unit = "B" return "%.3f%s" % (amount, unit)
733fdff47350072b9cfcaf72a2de85f8a1d58cc6
24,569
from typing import Callable from typing import Any def node_definitions( id_fetcher: Callable[[str, GraphQLResolveInfo], Any], type_resolver: GraphQLTypeResolver = None, ) -> GraphQLNodeDefinitions: """ Given a function to map from an ID to an underlying object, and a function to map from an underlying object to the concrete GraphQLObjectType it corresponds to, constructs a `Node` interface that objects can implement, and a field object to be used as a `node` root field. If the type_resolver is omitted, object resolution on the interface will be handled with the `is_type_of` method on object types, as with any GraphQL interface without a provided `resolve_type` method. """ node_interface = GraphQLInterfaceType( "Node", description="An object with an ID", fields=lambda: { "id": GraphQLField( GraphQLNonNull(GraphQLID), description="The id of the object." ) }, resolve_type=type_resolver, ) # noinspection PyShadowingBuiltins node_field = GraphQLField( node_interface, description="Fetches an object given its ID", args={ "id": GraphQLArgument( GraphQLNonNull(GraphQLID), description="The ID of an object" ) }, resolve=lambda _obj, info, id: id_fetcher(id, info), ) nodes_field = GraphQLField( GraphQLNonNull(GraphQLList(node_interface)), args={ "ids": GraphQLArgument( GraphQLNonNull(GraphQLList(GraphQLNonNull(GraphQLID))), description="The IDs of objects", ) }, resolve=lambda _obj, info, ids: [id_fetcher(id_, info) for id_ in ids], ) return GraphQLNodeDefinitions(node_interface, node_field, nodes_field)
4e041edacbd7e5d6c82dd7df8616a694aa00181a
24,571
def get_image_from_request(request): """ This function is used to extract the image from a POST or GET request. Usually it is a url of the image and, in case of the POST is possible to send it as a multi-part data. Returns a tuple with (ok:boolean, error:string, image:ndarray) """ if request.method == 'POST': content_type = parse_content_type(request) if content_type == "multipart/form-data": if 'image' in request.files: try: image = read_image_from_stream(request.files['image']) return (True, '', image) except: return (False, "Unable to read uploaded file", None) else: return (False, "No image provided in form-data request", None) elif content_type == 'application/json': try: input_params = request.get_json(True) except: return (False, 'No valid JSON present', None) if 'imageUrl' in input_params: image_url = input_params['imageUrl'] try: image = read_image_from_url(image_url) return (True, '', image) except: return (False, 'Unable to read image from url', None) elif 'imageB64' in input_params: image_b64 = input_params['imageB64'] try: image = read_image_b64(image_b64) return (True, '', image) except: return (False, 'Unable to read base 64 image', None) else: return (False, 'Image url or base 64 string not informed', None) elif request.method == 'GET': if request.args.get('imageUrl') == None: return (False, 'Image url not informed', None) else: image_url = request.args.get('imageUrl') try: image = read_image_from_url(image_url) return (True, '', image) except: return (False, 'Unable to read image from url', None)
0af18d65664e1c7dc264ac112b42e001ac293fd6
24,572
def con_external(): """Define a connection fixture. Returns ------- ibis.omniscidb.OmniSciDBClient """ omnisci_client = ibis.omniscidb.connect( user=EXT_OMNISCIDB_USER, password=EXT_OMNISCIDB_PASSWORD, host=EXT_OMNISCIDB_HOST, port=EXT_OMNISCIDB_PORT, database=EXT_OMNISCIDB_DATABASE, protocol=EXT_OMNISCIDB_PROTOCOL ) return omnisci_client
e5a57ebdf8640bd96a2e28678fe4d0b285fe8408
24,573
def parse_risk(data_byte_d): """Parse and arrange risk lists. Parameters ---------- data_byte_d : object Decoded StringIO object. Returns ------- neocc_lst : *pandas.Series* or *pandas.DataFrame* Data frame with risk list data parsed. """ # Read data as csv neocc_lst = pd.read_csv(data_byte_d, sep='|', skiprows=[3], header=2) # Remove redundant white spaces neocc_lst.columns = neocc_lst.columns.str.strip() neocc_lst = neocc_lst.replace(r'\s+', ' ', regex=True) df_obj = neocc_lst.select_dtypes(['object']) neocc_lst[df_obj.columns] = df_obj.apply(lambda x: x.str.strip()) # Rename columns col_dict = {"Num/des. Name": 'Object Name', "m": 'Diameter in m', "Vel km/s": 'Vel in km/s'} neocc_lst.rename(columns=col_dict, inplace=True) # Remove last column neocc_lst = neocc_lst.drop(neocc_lst.columns[-1], axis=1) # Convert column with date to datetime variable neocc_lst['Date/Time'] = pd.to_datetime(neocc_lst['Date/Time']) # Split Years into 2 columns to avoid dashed between integers # Check dataframe is not empty (for special list) if len(neocc_lst.index.values) != 0: neocc_lst[['First year', 'Last year']] = neocc_lst['Years']\ .str.split("-", expand=True)\ .astype(int) # Drop split column neocc_lst = neocc_lst.drop(['Years'], axis=1) # Reorder columns neocc_lst = neocc_lst[['Object Name', 'Diameter in m', '*=Y', 'Date/Time', 'IP max', 'PS max', 'TS', 'Vel in km/s', 'First year', 'Last year', 'IP cum', 'PS cum']] # Adding metadata neocc_lst.help = ('Risk lists contain a data frame with the ' 'following information:\n' '-Object Name: name of the NEA\n' '-Diamater in m: approximate diameter in meters\n' '-*=Y: recording an asterisk if the value has ' 'been estimated from the absolute magnitude\n' '-Date/Time: predicted impact date in datetime ' 'format\n' '-IP max: Maximum Impact Probability\n' '-PS max: Palermo scale rating\n' '-Vel in km/s: Impact velocity at atmospheric entry' ' in km/s\n' '-First year: first year of possible impacts\n' '-Last year: last year of possible impacts\n' '-IP cum: Cumulative Impact Probability\n' '-PS cum: Cumulative Palermo Scale') return neocc_lst
cf8761e46df621ffcf69dba9e2c359c25da02234
24,574
def plot_step_w_variable_station_filters(df, df_stations=None, options=None): """ """ p = PlotStepWithControls(df, df_stations, options) return p.plot()
a1faa31c90f4c00103148aa50648f040849984b1
24,575
def pick_random_element(count): """ Parameters ---------- count: {string: int} A dictionary of all transition counts from some state we're in to all other states Returns ------- The next character, randomly sampled from the empirical probabilities determined from the counts """ keys = list(count.keys()) counts = np.array(list(count.values())) counts = np.cumsum(counts) r = np.random.rand()*counts[-1] idx = np.searchsorted(counts, r) return keys[idx]
90388526b0a3a663f4f8d2ef6530484ddcf6fde2
24,576