content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_post_count(user): """ Get number of posts published by the requst user. Parameters ------------ user: The request user Returns ------- count: int The number of posts published by the requst user. """ count = Post.objects.filter(publisher=user).count() return count
6000bcd43ef2b8edf3c1dd04df89dcef38f110d5
12,459
from config import employee_required_fields def create_new_employee(employees): """ Create a new employee record with the employees dictionary Use the employee_sections dictionary template to create a new employee record. """ subsidiary = input('Employee Subsidiary (SK, CZ):') employee_id = generate_employee_id(subsidiary, employees) employee = {} # Storage for new employee print('Please, enter records for new employee ID: ' + employee_id) # Iterating over 'employee_sections' for section in employee_sections['<employee_id>']: # Inserting empty section employee[section] = {} for field in employee_sections['<employee_id>'][section]: _input = '' while not _input: _input = input(section + '/' + field + ': ') if not _input and field in employee_required_fields: print('This field is required, please enter the value.') else: employee[section][field] = _input break print(employee) employees[employee_id] = employee print('Thank you, entry has been completed for ID: ' + employee_id) input('Press ENTER to continue') commit_changes(file_with_employees, str(employees)) return employees
aa5d0981c2b81ad65ed5ad0368fd1b3b79796a40
12,460
def gather_squares_triangles(p1,p2,depth): """ Draw Square and Right Triangle given 2 points, Recurse on new points args: p1,p2 (float,float) : absolute position on base vertices depth (int) : decrementing counter that terminates recursion return: squares [(float,float,float,float)...] : absolute positions of vertices of squares triangles [(float,float,float)...] : absolute positions of vertices of right triangles """ # Break Recursion if depth is met if depth == 0: return [],[] # Generate Points pd = (p2[0] - p1[0]),(p1[1] - p2[1]) p3 = (p2[0] - pd[1]),(p2[1] - pd[0]) p4 = (p1[0] - pd[1]),(p1[1] - pd[0]) p5 = (p4[0] + (pd[0] - pd[1])/2),(p4[1] - (pd[0] + pd[1])/2) # Gather Points further down the tree squares_left,triangles_left = gather_squares_triangles(p4,p5,depth-1) squares_right,triangles_right = gather_squares_triangles(p5,p3,depth-1) # Merge and Return squares = [[p1,p2,p3,p4]]+squares_left+squares_right triangles = [[p3,p4,p5]]+triangles_left+triangles_right return squares,triangles
de4e720eb10cb378f00086a6e8e45886746055c0
12,461
def update_node(node_name, node_type, root=None): """ ! Node is assumed to have only one input and one output port with a maximum of one connection for each. Returns: NodegraphAPI.Node: newly created node """ new = NodegraphAPI.CreateNode(node_type, root or NodegraphAPI.GetRootNode()) if new.getType() == "Group": new_in = new.addInputPort("in") new_out = new.addOutputPort("out") else: new_in = new.getInputPortByIndex(0) new_out = new.getOutputPortByIndex(0) existingn = NodegraphAPI.GetNode(node_name) if existingn: # we assume there is only 1 input/output port with only one connection in_port = existingn.getInputPorts()[0] in_port = in_port.getConnectedPort(0) out_port = existingn.getOutputPorts()[0] out_port = out_port.getConnectedPort(0) pos = NodegraphAPI.GetNodePosition(existingn) # type: tuple existingn.delete() NodegraphAPI.SetNodePosition(new, pos) if in_port: in_port.connect(new_in) if out_port: out_port.connect(new_out) logger.info("[update_node] Found existing node, it has been updated.") new.setName(node_name) logger.info("[update_node] Finished for node <{}>".format(node_name)) return new
916beec7de527ee56d5326061aa2c367af17434f
12,462
def dan_acf(x, axis=0, fast=False): """ Estimate the autocorrelation function of a time series using the FFT. Args: x (array): The time series. If multidimensional, set the time axis using the ``axis`` keyword argument and the function will be computed for every other axis. axis (Optional[int]): The time axis of ``x``. Assumed to be the first axis if not specified. fast (Optional[bool]): If ``True``, only use the largest ``2^n`` entries for efficiency. (default: False) Returns: acf (array): The acf array. """ x = np.atleast_1d(x) m = [slice(None), ] * len(x.shape) # For computational efficiency, crop the chain to the largest power of # two if requested. if fast: n = int(2**np.floor(np.log2(x.shape[axis]))) m[axis] = slice(0, n) x = x else: n = x.shape[axis] # Compute the FFT and then (from that) the auto-correlation function. f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis) m[axis] = slice(0, n) acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[tuple(m)].real m[axis] = 0 return acf / acf[m]
85273d95564f0e8c0afb9ff00ac23dc04539f291
12,463
from datetime import datetime def schedule_decision(): """最適化の実行と結果の表示を行う関数""" # トップページを表示する(GETリクエストがきた場合) if request.method == "GET": return render_template("scheduler/schedule_decision.html", solution_html=None) # POSTリクエストである「最適化を実行」ボタンが押された場合に実行 # データがアップロードされているかチェックする。適切でなければ元のページに戻る if not check_request(request): return redirect(request.url) # 前処理(データ読み込み) df_kagisime, df_gomisute = preprocess(request) # 最適化実行 prob = KandGProblem(df_kagisime, df_gomisute) solution_df = prob.solve() L_gomisute_members = list(prob.L_gomisute_members) # ログインしている場合,DBに決定した予定表を追加. if current_user.is_authenticated: yyyy, mm, _ = solution_df.index[0].split("/") user_id = session["_user_id"] print(user_id) print("currentuser:", current_user) is_new_schedule = not ScheduleLists.query.filter_by( user_id=user_id, yyyymm=yyyy + mm ).all() if is_new_schedule: schedule_list = ScheduleLists(user_id=user_id, yyyymm=yyyy + mm) db.session.add(schedule_list) db.session.commit() schedulelist_id = ( ScheduleLists.query.filter_by(user_id=user_id, yyyymm=yyyy + mm) .group_by("id") .first() ) print(schedulelist_id.id) for row in solution_df.itertuples(): if not is_new_schedule: print(datetime.strptime(row[0], "%Y/%m/%d")) old_schedule = Schedules.query.filter_by( schedulelist_id=schedulelist_id.id, date=datetime.strptime(row[0], "%Y/%m/%d"), ).first() print(old_schedule) if old_schedule: old_schedule.k_members = row[1] old_schedule.g_members = row[2] db.session.add(old_schedule) db.session.commit() else: schedule = Schedules( schedulelist_id=schedulelist_id.id, date=datetime.strptime(row[0], "%Y/%m/%d"), k_members=row[1], g_members=row[2], ) db.session.add(schedule) db.session.commit() # 後処理(最適化結果をHTMLに表示できる形式にする) solution_html = postprocess(solution_df) return render_template( "scheduler/schedule_decision.html", solution_html=solution_html, solution_df=solution_df, L_gomisute_members=" ".join(L_gomisute_members), )
6f259961d027b6e4a3dc88289a5ba62b162705f6
12,464
def infection_rate_asymptomatic_30x40(): """ Real Name: b'infection rate asymptomatic 30x40' Original Eqn: b'contact infectivity asymptomatic 30x40*(social distancing policy SWITCH self 40*social distancing policy 40\\\\ +(1-social distancing policy SWITCH self 40))*Infected asymptomatic 30x40*Susceptible 40\\\\ /non controlled pop 30x40' Units: b'person/Day' Limits: (None, None) Type: component b'' """ return contact_infectivity_asymptomatic_30x40() * ( social_distancing_policy_switch_self_40() * social_distancing_policy_40() + (1 - social_distancing_policy_switch_self_40()) ) * infected_asymptomatic_30x40() * susceptible_40() / non_controlled_pop_30x40()
16aebdca2259933dcdab1a00ed8d37b10d5b8714
12,465
def slug(hans, style=Style.NORMAL, heteronym=False, separator='-', errors='default', strict=True): """将汉字转换为拼音,然后生成 slug 字符串. :param hans: 汉字字符串( ``'你好吗'`` )或列表( ``['你好', '吗']`` ). 可以使用自己喜爱的分词模块对字符串进行分词处理, 只需将经过分词处理的字符串列表传进来就可以了。 :type hans: unicode 字符串或字符串列表 :param style: 指定拼音风格,默认是 :py:attr:`~pypinyin.Style.NORMAL` 风格。 更多拼音风格详见 :class:`~pypinyin.Style` :param heteronym: 是否启用多音字 :param separator: 两个拼音间的分隔符/连接符 :param errors: 指定如何处理没有拼音的字符,详情请参考 :py:func:`~pypinyin.pinyin` :param strict: 只获取声母或只获取韵母相关拼音风格的返回结果 是否严格遵照《汉语拼音方案》来处理声母和韵母, 详见 :ref:`strict` :return: slug 字符串. :raise AssertionError: 当传入的字符串不是 unicode 字符时会抛出这个异常 :: >>> import pypinyin >>> from pypinyin import Style >>> pypinyin.slug('中国人') 'zhong-guo-ren' >>> pypinyin.slug('中国人', separator=' ') 'zhong guo ren' >>> pypinyin.slug('中国人', style=Style.FIRST_LETTER) 'z-g-r' >>> pypinyin.slug('中国人', style=Style.CYRILLIC) 'чжун1-го2-жэнь2' """ return separator.join( chain( *_default_pinyin.pinyin( hans, style=style, heteronym=heteronym, errors=errors, strict=strict ) ) )
124431e3ea8747dfdc024f93e88f692746797013
12,466
def A_weight(signal, fs): """ Return the given signal after passing through an A-weighting filter signal : array_like Input signal fs : float Sampling frequency """ b, a = A_weighting(fs) return lfilter(b, a, signal)
1c6abdd90b85762db4383972de7508d00b561065
12,467
from typing import Tuple from typing import Union import traceback def send_task_to_executor(task_tuple: TaskInstanceInCelery) \ -> Tuple[TaskInstanceKey, CommandType, Union[AsyncResult, ExceptionWithTraceback]]: """Sends task to executor.""" key, _, command, queue, task_to_run = task_tuple try: with timeout(seconds=OPERATION_TIMEOUT): result = task_to_run.apply_async(args=[command], queue=queue) except Exception as e: # pylint: disable=broad-except exception_traceback = "Celery Task ID: {}\n{}".format(key, traceback.format_exc()) result = ExceptionWithTraceback(e, exception_traceback) return key, command, result
cbc93ac3a3c146b748c0ec88eaa9cb2cd631ac85
12,470
def geometries_from_bbox(north, south, east, west, tags): """ Create a GeoDataFrame of OSM entities within a N, S, E, W bounding box. Parameters ---------- north : float northern latitude of bounding box south : float southern latitude of bounding box east : float eastern longitude of bounding box west : float western longitude of bounding box tags : dict Dict of tags used for finding objects in the selected area. Results returned are the union, not intersection of each individual tag. Each result matches at least one given tag. The dict keys should be OSM tags, (e.g., `building`, `landuse`, `highway`, etc) and the dict values should be either `True` to retrieve all items with the given tag, or a string to get a single tag-value combination, or a list of strings to get multiple values for the given tag. For example, `tags = {'building': True}` would return all building footprints in the area. `tags = {'amenity':True, 'landuse':['retail','commercial'], 'highway':'bus_stop'}` would return all amenities, landuse=retail, landuse=commercial, and highway=bus_stop. Returns ------- gdf : geopandas.GeoDataFrame Notes ----- You can configure the Overpass server timeout, memory allocation, and other custom settings via ox.config(). """ # convert bounding box to a polygon polygon = utils_geo.bbox_to_poly(north, south, east, west) # create GeoDataFrame of geometries within this polygon gdf = geometries_from_polygon(polygon, tags) return gdf
32aeebe7f644df00b613ef6e0d4f30baef1a5743
12,473
def dBzdtAnalCircT(a, t, sigma): """ Hz component of analytic solution for half-space (Circular-loop source) Src and Rx are on the surface and receiver is located at the center of the loop. Src waveform here is step-off. .. math:: \\frac{\partial h_z}{\partial t} = -\\frac{I}{\mu_0\sigma a^3} \ \left( 3erf(\\theta a) - \\frac{2}{\sqrt{\pi}}\\theta a (3+2\\theta^2 a^2) e^{-\\theta^2a^2}\\right) .. math:: \\theta = \sqrt{\\frac{\sigma\mu}{4t}} """ theta = np.sqrt((sigma*mu_0)/(4*t)) const = -1/(mu_0*sigma*a**3) ta = theta*a eta = erf(ta) t1 = 3*eta t2 = -2/(np.pi**0.5)*ta*(3+2*ta**2)*np.exp(-ta**2) dhzdt = const*(t1+t2) return mu_0*dhzdt
18b9428528ed11a121ad01578d2bfc35faceae21
12,474
def count_increasing(ratings, n): """ Only considering the increasing case """ arr = [1] * n cnt = 1 for i in range(1, n): cnt = cnt + 1 if ratings[i - 1] < ratings[i] else 1 arr[i] = cnt return arr
9fe274527fbba505467a195bf555c77d2f3e6aed
12,475
import copy def load_train_data_frame(train_small, target, keras_options, model_options, verbose=0): """ ### CAUTION: TF2.4 Still cannot load a DataFrame with Nulls in string or categoricals! ############################################################################ #### TF 2.4 still cannot load tensor_slices into ds if an object or string column #### that has nulls in it! So we need to find other ways to load tensor_slices by #### first filling dataframe with pandas fillna() function! ############################################################################# """ train_small = copy.deepcopy(train_small) DS_LEN = model_options['DS_LEN'] #### do this for dataframes ################## try: batch_size = keras_options["batchsize"] if isinstance(keras_options["batchsize"], str): batch_size = find_batch_size(DS_LEN) except: #### If it is not given find it here #### batch_size = find_batch_size(DS_LEN) ######### Modify or Convert column names to fit tensorflow rules of no space in names! sel_preds = ["_".join(x.split(" ")) for x in list(train_small) ] #### This can also be a problem with other special characters ### sel_preds = ["_".join(x.split("(")) for x in sel_preds ] sel_preds = ["_".join(x.split(")")) for x in sel_preds ] sel_preds = ["_".join(x.split("/")) for x in sel_preds ] sel_preds = ["_".join(x.split("\\")) for x in sel_preds ] sel_preds = ["_".join(x.split("?")) for x in sel_preds ] sel_preds = [x.lower() for x in sel_preds ] if isinstance(target, str): target = "_".join(target.split(" ")) target = "_".join(target.split("(")) target = "_".join(target.split(")")) target = "_".join(target.split("/")) target = "_".join(target.split("\\")) target = "_".join(target.split("?")) target = target.lower() model_label = 'Single_Label' else: target = ["_".join(x.split(" ")) for x in target ] target = ["_".join(x.split("(")) for x in target ] target = ["_".join(x.split(")")) for x in target ] target = ["_".join(x.split("/")) for x in target ] target = ["_".join(x.split("\\")) for x in target ] target = ["_".join(x.split("?")) for x in target ] target = [x.lower() for x in target ] model_label = 'Multi_Label' train_small.columns = sel_preds print('Alert! Modified column names to satisfy rules for column names in Tensorflow...') #### if target is changed you must send that modified target back to other processes ###### ### usecols is basically target in a list format. Very handy to know when target is a list. try: modeltype = model_options["modeltype"] if model_options["modeltype"] == '': ### usecols is basically target in a list format. Very handy to know when target is a list. modeltype, model_label, usecols = find_problem_type(train_small, target, model_options, verbose) else: if isinstance(target, str): usecols = [target] else: usecols = copy.deepcopy(target) except: ### if modeltype is given, then do not find the model type using this function modeltype, model_label, usecols = find_problem_type(train_small, target, model_options, verbose) ### Cat_Vocab_Dict contains all info about vocabulary in each variable and their size print(' Classifying variables using data sample in pandas...') train_small, var_df, cat_vocab_dict = classify_features_using_pandas(train_small, target, model_options, verbose=verbose) ########## Just transfer all the values from var_df to cat_vocab_dict ################################## for each_key in var_df: cat_vocab_dict[each_key] = var_df[each_key] ############################################################################################################ model_options['modeltype'] = modeltype model_options['model_label'] = model_label cat_vocab_dict['target_variables'] = usecols cat_vocab_dict['modeltype'] = modeltype model_options['batch_size'] = batch_size ########## Find small details about the data to help create the right model ### target_transformed = False if modeltype != 'Regression': if isinstance(target, str): #### This is for Single Label Problems ###### if train_small[target].dtype == 'object' or str(train_small[target].dtype).lower() == 'category': target_transformed = True target_vocab = train_small[target].unique() num_classes = len(target_vocab) else: if 0 not in np.unique(train_small[target]): target_transformed = True ### label encoding must be done since no zero class! target_vocab = train_small[target].unique() num_classes = len(train_small[target].value_counts()) elif isinstance(target, list): #### This is for Multi-Label Problems ####### copy_target = copy.deepcopy(target) num_classes = [] for each_target in copy_target: if train_small[target[0]].dtype == 'object' or str(train_small[target[0]].dtype).lower() == 'category': target_transformed = True target_vocab = train_small[target].unique().tolist() num_classes_each = len(target_vocab) else: if 0 not in np.unique(train_small[target[0]]): target_transformed = True ### label encoding must be done since no zero class! target_vocab = train_small[target[0]].unique() num_classes_each = train_small[target].apply(np.unique).apply(len).max() num_classes.append(int(num_classes_each)) else: num_classes = 1 target_vocab = [] ########### find the number of labels in data #### if isinstance(target, str): num_labels = 1 elif isinstance(target, list): if len(target) == 1: num_labels = 1 else: num_labels = len(target) #### This is where we set the model_options for num_classes and num_labels ######### model_options['num_labels'] = num_labels model_options['num_classes'] = num_classes cat_vocab_dict['num_labels'] = num_labels cat_vocab_dict['num_classes'] = num_classes cat_vocab_dict["target_transformed"] = target_transformed #### fill missing values using this function ############## train_small = fill_missing_values_for_TF2(train_small, cat_vocab_dict) ##### Do the deletion of cols after filling with missing values since otherwise fill errors! drop_cols = var_df['cols_delete'] cat_vocab_dict['columns_deleted'] = drop_cols if len(drop_cols) > 0: ### drop cols that have been identified for deletion ### print(' Dropping %s columns marked for deletion...' %drop_cols) train_small.drop(drop_cols,axis=1,inplace=True) ######### Now load the train Dataframe into a tf.data.dataset ############# if target_transformed: ####################### T R A N S F O R M I N G T A R G E T ######################## train_small[target], cat_vocab_dict = transform_train_target(train_small, target, modeltype, model_label, cat_vocab_dict) if isinstance(target, str): #### For single label do this: labels can be without names since there is only one label if target != '': labels = train_small[target] features = train_small.drop(target, axis=1) ds = tf.data.Dataset.from_tensor_slices((dict(features), labels)) else: print('target variable is blank - please fix input and try again') return elif isinstance(target, list): #### For multi label do this: labels must be dict and hence with names since there are many targets labels = train_small[target] features = train_small.drop(target, axis=1) ds = tf.data.Dataset.from_tensor_slices((dict(features), dict(labels))) else: ds = tf.data.Dataset.from_tensor_slices(dict(train_small)) ###### Now save some defaults in cat_vocab_dict ########################## try: keras_options["batchsize"] = batch_size cat_vocab_dict['batch_size'] = batch_size except: batch_size = find_batch_size(DS_LEN) keras_options["batchsize"] = batch_size cat_vocab_dict['batch_size'] = batch_size ########################################################################## #### C H E C K F O R I N F I N I T E V A L U E S H E R E ########## ########################################################################## cols_with_infinity = find_columns_with_infinity(train_small) if cols_with_infinity: train_small = drop_rows_with_infinity(train_small, cols_with_infinity, fill_value=True) return train_small, ds, var_df, cat_vocab_dict, keras_options, model_options
85c496b485bbc26afbadf181a2231e3f5bd93706
12,476
def stat_float_times(space, newval=-1): """stat_float_times([newval]) -> oldval Determine whether os.[lf]stat represents time stamps as float objects. If newval is True, future calls to stat() return floats, if it is False, future calls return ints. If newval is omitted, return the current setting. """ state = space.fromcache(StatState) if newval == -1: return space.newbool(state.stat_float_times) else: state.stat_float_times = (newval != 0)
e183f0cc2ce56bc7b4ac6ce95d8cb671a963422f
12,477
def decorate(rvecs): """Output range vectors into some desired string format""" return ', '.join(['{%s}' % ','.join([str(x) for x in rvec]) for rvec in rvecs])
31a3d4414b0b88ffd92a5ddd8eb09aaf90ef3742
12,478
def update_topic_collection_items(request_ctx, collection_item_id, topic_id, **request_kwargs): """ Accepts the same parameters as create :param request_ctx: The request context :type request_ctx: :class:RequestContext :param collection_item_id: (required) ID :type collection_item_id: string :param topic_id: (required) ID :type topic_id: string :return: Update a topic :rtype: requests.Response (with void data) """ path = '/v1/collection_items/{collection_item_id}/discussion_topics/{topic_id}' url = request_ctx.base_api_url + path.format(collection_item_id=collection_item_id, topic_id=topic_id) response = client.put(request_ctx, url, **request_kwargs) return response
06b0709f5fa4acf189baef8f2665bee81b3c4993
12,479
def upsample(inputs, factor=(2, 2), interpolation='nearest'): """ Upsampling layer by factor Parameters ---------- inputs: Input tensor factor: The upsampling factors for (height, width). One integer or tuple of two integers interpolation: A string, one of [`nearest`, `bilinear`, 'bicubic', 'area']. """ # get new_size _, height, width, _ = inputs.get_shape().as_list() factor = _make_pair(factor) new_height = height * factor[0] new_width = width * factor[1] new_size = (new_height, new_width) # get interpolation type interp_types = { 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR, 'bilinear': tf.image.ResizeMethod.BILINEAR, 'bicubic': tf.image.ResizeMethod.BICUBIC, 'area': tf.image.ResizeMethod.AREA, } if interpolation not in interp_types.keys(): raise ValueError("interpolation must be one of " "['nearest', 'bilinear', 'bicubic', 'area']") interp_type = interp_types.get(interpolation) return tf.image.resize_images(inputs, size=new_size, method=interp_type)
dfbd42871e63cb685f9cfbf9185da38839a9ee4e
12,480
def root_mean_squared_error(*args, **kwargs): """ Returns the square-root of ``scikit-learn``'s ``mean_squared_error`` metric. All arguments are forwarded to that function. """ return np.sqrt(mean_squared_error(*args, **kwargs))
51084b2ec55d14657fa128f0df2bd3f438c2367b
12,481
def idwt2(Wimg, level=4): """ inverse 2d wavelet transform :param Wimg: 2d array wavelet coefficients :param level: int level of wavelet transform - image shape has to be multiples of 2**level :return: 2d array image """ coeffs = _from_img_to_coeffs(Wimg, levels=level) return pywt.waverec2(coeffs, wavelet='db4', mode='per')
521ceca879b0961730b1efd6dac54772a2b41ca3
12,482
def get_color(card): """Returns the card's color Args: card (webelement): a visible card Returns: str: card's color """ color = card.find_element_by_xpath(".//div/*[name()='svg']/*[name()='use'][2]").get_attribute("stroke") # both light and dark theme if (color == "#ff0101" or color == "#ffb047"): color = "red" elif (color == "#800080" or color == "#ff47ff"): color = "purple" else: color = "green" return color
452266b81d70973149fed4ab2e6cbc9c93591180
12,483
from typing import Dict from typing import Any def is_valid_path(parameters: Dict[str, Any]) -> bool: """Single "." chars and empty strings "" are excluded from path by urllib3. A path containing to "/" or "%2F" will lead to ambiguous path resolution in many frameworks and libraries, such behaviour have been observed in both WSGI and ASGI applications. In this case one variable in the path template will be empty, which will lead to 404 in most of the cases. Because of it this case doesn't bring much value and might lead to false positives results of Schemathesis runs. """ path_parameter_blacklist = (".", SLASH, "") return not any( (value in path_parameter_blacklist or is_illegal_surrogate(value) or isinstance(value, str) and SLASH in value) for value in parameters.values() )
5f80ff76c535b3913efc7ba83e04c4c049a9e50b
12,484
import torch def to_tensor(x): """ Arguments: x: an instance of PIL image. Returns: a float tensor with shape [3, h, w], it represents a RGB image with pixel values in [0, 1] range. """ x = np.array(x) x = torch.FloatTensor(x) return x.permute(2, 0, 1).unsqueeze(0).div(255.0)
6ff19bd7549a4fce455f03559420216020658c44
12,485
def fetch_data(fold_path): """Fetch data saving in fold path. Convert data into suitable format, using csv files in fold path. :param fold_path: String. The fold in which data files are saved. :return: training_data: Dataframe. Combined dataframe to create training data. testing_data: Dataframe. Combined dataframe to create testing data. """ # Read all the data from target fold path. pokemon = pd.read_csv(fold_path+'/pokemon.csv') combats = pd.read_csv(fold_path+'/combats.csv') test_data = pd.read_csv(fold_path+'/tests.csv') # Convert data into suitable format for training and testing. training_data = convert_data(combats, pokemon, win_column='Winner') testing_data = convert_data(test_data, pokemon) return training_data, testing_data
42ea9ea6d1d9d597acc4ed1a14099711642608f4
12,488
def add_chr_prefix(band): """ Return the band string with chr prefixed """ return ''.join(['chr', band])
08a99220023f10d79bdacdb062a27efcb51086ce
12,489
def disable_text_recog_aug_test(cfg, set_types=None): """Remove aug_test from test pipeline of text recognition. Args: cfg (mmcv.Config): Input config. set_types (list[str]): Type of dataset source. Should be None or sublist of ['test', 'val'] Returns: cfg (mmcv.Config): Output config removing `MultiRotateAugOCR` in test pipeline. """ assert set_types is None or isinstance(set_types, list) if set_types is None: set_types = ['val', 'test'] for set_type in set_types: if cfg.data[set_type].pipeline[1].type == 'MultiRotateAugOCR': cfg.data[set_type].pipeline = [ cfg.data[set_type].pipeline[0], *cfg.data[set_type].pipeline[1].transforms ] return cfg
bda3a5420d32d55062b23a6af27cee3e203b878c
12,490
def layer_svg(svg_bottom, svg_top, offset: list = [0.0, 0.0]): """ Adds one SVG over another. Modifies the bottom SVG in place. :param svg_bottom: The bottom SVG, in in xml.etree.ElementTree form :param svg_top: The top SVG, in in xml.etree.ElementTree form :param offset: How far to offset the top SVG elements """ if svg_top is None: return # print(svg_top.tag) for child in list(svg_top): apply_offset(child, offset, offset_children=True) svg_bottom.append(child) return svg_bottom
6c6a8151d17f4aff9f1491d1ed71772d9434ae4c
12,491
def utxo_cmd(ctx, dry_run): """Get the node's current UTxO with the option of filtering by address(es)""" try: CardanoCli.execute(cmd=["cardano-cli", "query", "utxo"], dry_run=dry_run, include_network=True) except CardanoPyError as cpe: ctx.fail(cpe.message) return cpe.return_code
52807294a445fc2f641c1b921807bba898ad8c34
12,493
def delta_in_ms(delta): """ Convert a timedelta object to milliseconds. """ return delta.seconds*1000.0+delta.microseconds/1000.0
4ed048155daf4a4891488e28c674e905e1bbe947
12,494
import slicer, collections, fnmatch def getNodes(pattern="*", scene=None, useLists=False): """Return a dictionary of nodes where the name or id matches the ``pattern``. By default, ``pattern`` is a wildcard and it returns all nodes associated with ``slicer.mrmlScene``. If multiple node share the same name, using ``useLists=False`` (default behavior) returns only the last node with that name. If ``useLists=True``, it returns a dictionary of lists of nodes. """ nodes = collections.OrderedDict() if scene is None: scene = slicer.mrmlScene count = scene.GetNumberOfNodes() for idx in range(count): node = scene.GetNthNode(idx) name = node.GetName() id = node.GetID() if (fnmatch.fnmatchcase(name, pattern) or fnmatch.fnmatchcase(id, pattern)): if useLists: nodes.setdefault(node.GetName(), []).append(node) else: nodes[node.GetName()] = node return nodes
6d6c44987a800f361d45f4538167acb65e738418
12,495
from typing import Union from typing import Type from re import X from typing import Mapping from typing import Optional def get_cls( query: Union[None, str, Type[X]], base: Type[X], lookup_dict: Mapping[str, Type[X]], lookup_dict_synonyms: Optional[Mapping[str, Type[X]]] = None, default: Optional[Type[X]] = None, suffix: Optional[str] = None, ) -> Type[X]: """Get a class by string, default, or implementation.""" if query is None: if default is None: raise ValueError(f'No default {base.__name__} set') return default elif not isinstance(query, (str, type)): raise TypeError(f'Invalid {base.__name__} type: {type(query)} - {query}') elif isinstance(query, str): key = normalize_string(query, suffix=suffix) if key in lookup_dict: return lookup_dict[key] if lookup_dict_synonyms is not None and key in lookup_dict_synonyms: return lookup_dict_synonyms[key] raise ValueError(f'Invalid {base.__name__} name: {query}') elif issubclass(query, base): return query raise TypeError(f'Not subclass of {base.__name__}: {query}')
e5f805df5ef19de9939344beee21834e3f2556ab
12,496
def selection_sort(data): """Sort a list of unique numbers in ascending order using selection sort. O(n^2). The process includes repeatedly iterating through a list, finding the smallest element, and sorting that element. Args: data: data to sort (list of int) Returns: sorted list """ sorted_data = data[:] for i, value in enumerate(sorted_data): # find smallest value in unsorted subset min_value = min(sorted_data[i:]) index_min = sorted_data.index(min_value) # place smallest value at start of unsorted subset sorted_data[i], sorted_data[index_min] = min_value, value return sorted_data
8b745be41c857669aedecb25b3006bbdc1ef04eb
12,497
def _conv(args, filter_size, num_features, bias, reuse, w_init=None, b_init=0.0, scope='_conv'): """convolution: Args: args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D batch x n, Tensors. filter_size: int tuple of filter height and width. reuse: None/True, whether to reuse variables w_init: weights initializer object b_init: a `int`, bias initializer value num_features: int, number of features. bias_start: starting value to initialize the bias; 0 by default. Returns: A 3D, 4D, or 5D Tensor with shape [batch ... num_features] Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ # Calculate the total size of arguments on dimension 1. total_arg_size_depth = 0 shapes = [a.get_shape().as_list() for a in args] shape_length = len(shapes[0]) for shape in shapes: if len(shape) not in [3, 4, 5]: raise ValueError("Conv Linear expects 3D, 4D or 5D arguments: %s" % str(shapes)) if len(shape) != len(shapes[0]): raise ValueError("Conv Linear expects all args to be of same Dimensiton: %s" % str(shapes)) else: total_arg_size_depth += shape[-1] dtype = [a.dtype for a in args][0] # determine correct conv operation if shape_length == 3: conv_op = tf.nn.conv1d strides = 1 elif shape_length == 4: conv_op = tf.nn.conv2d strides = shape_length * [1] elif shape_length == 5: conv_op = tf.nn.conv3d strides = shape_length * [1] # Now the computation. with tf.variable_scope(scope, reuse=reuse): kernel = tf.get_variable( "W", filter_size + [total_arg_size_depth, num_features], dtype=dtype, initializer=w_init) if len(args) == 1: res = conv_op(args[0], kernel, strides, padding='SAME') else: res = conv_op(tf.concat(axis=shape_length - 1, values=args), kernel, strides, padding='SAME') if not bias: return res bias_term = tf.get_variable( "biases", [num_features], dtype=dtype, initializer=tf.constant_initializer(b_init, dtype=dtype)) return res + bias_term
104d91623949e4506c4b72001c23b6ab7fb312ca
12,498
def _feature_normalization(features, method, feature_type): """Normalize the given feature vector `y`, with the stated normalization `method`. Args: features (np.ndarray): The signal array method (str): Normalization method. 'global': Uses global mean and standard deviation values from `train.txt`. The normalization is being applied element wise. ([sample] - [mean]^T) / [std]^T Where brackets denote matrices or vectors. 'local': Use local (in sample) mean and standard deviation values, and apply the normalization element wise, like in `global`. 'local_scalar': Uses only the mean and standard deviation of the current sample. The normalization is being applied by ([sample] - mean_scalar) / std_scalar 'none': No normalization is being applied. feature_type (str): Feature type, see `load_sample` for details. Returns: np.ndarray: The normalized feature vector. """ if method == 'none': return features elif method == 'global': # Option 'global' is applied element wise. if feature_type == 'mel': global_mean = __global_mean_mel global_std = __global_std_mel elif feature_type == 'mfcc': global_mean = __global_mean_mfcc global_std = __global_std_mfcc else: raise ValueError('Unsupported global feature type: {}'.format(feature_type)) return (features - global_mean) / global_std elif method == 'local': return (features - np.mean(features, axis=0)) / np.std(features, axis=0) elif method == 'local_scalar': # Option 'local' uses scalar values. return (features - np.mean(features)) / np.std(features) else: raise ValueError('Invalid normalization method: {}'.format(method))
0479363651a4bcf1622e7bdb0906b55e3adb1cce
12,500
def get_constraint(name): """ Lookup table of default weight constraint functions. Parameters ---------- name : Constraint, None, str Constraint to look up. Must be one of: - 'l1' : L1 weight-decay. - 'l2' : L2 weight-decay. - 'l1-l2' : Combined L1-L2 weight-decay. - Constraint : A custom implementation. - None : Return None. Custom Constraint must implement `constrain` function. Returns ------- constraint : Constraint or None The constraint function. """ if name == 'unit' : return UnitNorm elif name == 'maxnorm' : return MaxNorm elif name == 'minmax' : return MinMaxNorm elif isinstance(name, (None, Constraint)) : return name else : raise ValueError("Invalid regularizer")
09927531f4c6770e86ad603063e4edb0b0c4ff48
12,501
def player_count(conn, team_id): """Returns the number of players associated with a particular team""" c = conn.cursor() c.execute("SELECT id FROM players WHERE team_id=?", (team_id,)) return len(c.fetchall())
cfced6da6c8927db2ccf331dca7d23bba0ce67e5
12,502
def _RedisClient(address): """ Return a connection object connected to the socket given by `address` """ h1, h2 = get_handle_pair(conn_type=REDIS_LIST_CONN) c = _RedisConnection(h1) #redis_client = util.get_redis_client() redis_client = util.get_cache_client() ip, port = address chan = '{}:{}'.format(ip, port) redis_client.publish(chan, bytes(h2, 'utf-8')) ack = c.recv() assert ack == 'OK' return c
fc8bab786bb521fbd0715da3ab690575d1df865e
12,503
import math def format_timedelta(value, time_format="{days} days, {hours2}:{minutes2}:{seconds2}"): """Format a datetie.timedelta. See """ if hasattr(value, 'seconds'): seconds = value.seconds + value.days * 24 * 3600 else: seconds = int(value) seconds_total = seconds minutes = int(math.floor(seconds / 60)) minutes_total = minutes seconds -= minutes * 60 hours = int(math.floor(minutes / 60)) hours_total = hours minutes -= hours * 60 days = int(math.floor(hours / 24)) days_total = days hours -= days * 24 years = int(math.floor(days / 365)) years_total = years days -= years * 365 return time_format.format( **{ 'seconds': seconds, 'seconds2': str(seconds).zfill(2), 'minutes': minutes, 'minutes2': str(minutes).zfill(2), 'hours': hours, 'hours2': str(hours).zfill(2), 'days': days, 'years': years, 'seconds_total': seconds_total, 'minutes_total': minutes_total, 'hours_total': hours_total, 'days_total': days_total, 'years_total': years_total, })
19dc2b175beb1d030f14ae7fe96cb16d66f6c219
12,504
def random_account_user(account): """Get a random user for an account.""" account_user = AccountUser.objects.filter(account=account).order_by("?").first() return account_user.user if account_user else None
5fe918af67710d0d1519f56eee15811430a0e139
12,505
def overwrite(main_config_obj, args): """ Overwrites parameters with input flags Args: main_config_obj (ConfigClass): config instance args (dict): arguments used to overwrite Returns: ConfigClass: config instance """ # Sort on nested level to override shallow items first args = dict(sorted(args.items(), key=lambda item: item[0].count('.'))) for argument_key, val in args.items(): # Seperate nested keys into outer and inner outer_keys = argument_key.split('.') inner_key = outer_keys.pop(-1) base_err_msg = f"Can't set '{argument_key} = {val}'" # Check that the nested config has the attribute and is a config class config_obj = main_config_obj config_class = type(config_obj).__name__ for key_idx, key_part in enumerate(argument_key.split('.')): err_msg = f"{base_err_msg}. '{key_part}' isn't an attribute in '{config_class}'" assert hasattr(config_obj, key_part), err_msg # Check if the config allows the argument figutils.check_allowed_input_argument(config_obj, key_part, argument_key) # Check if the outer attributes are config classes if key_idx < len(outer_keys): config_obj = getattr(config_obj, key_part) config_class = type(config_obj).__name__ err_msg = f"{base_err_msg}. '{'.'.join(outer_keys)}' isn't a registered Anyfig config class" assert figutils.is_config_class(config_obj), err_msg value_class = type(getattr(config_obj, inner_key)) base_err_msg = f"Input argument '{argument_key}' with value {val} can't create an object of the expected type" # Create new anyfig class object if figutils.is_config_class(value_class): value_obj = create_config(val) # Create new object that follows the InterfaceField's rules elif issubclass(value_class, fields.InterfaceField): field = getattr(config_obj, inner_key) if isinstance(value_class, fields.InputField): value_class = field.type_pattern else: value_class = type(field.value) try: val = value_class(val) except Exception as e: err_msg = f"{base_err_msg} {field.type_pattern}. {e}" raise RuntimeError(err_msg) from None field = field.update_value(inner_key, val, config_class) value_obj = field.finish_wrapping_phase(inner_key, config_class) # Create new object of previous value type with new value else: try: if isinstance(val, dict): # Keyword specified cli-arguments value_obj = value_class(**val) else: value_obj = value_class(val) except Exception as e: err_msg = f"{base_err_msg} {value_class}. {e}" raise RuntimeError(err_msg) from None # Overwrite old value setattr(config_obj, inner_key, value_obj) return main_config_obj
98ee9cf034a9b714ae18e737761b06bfd669bfa4
12,506
def max_delta(model, new_model): """Return the largest difference between any two corresponding values in the models""" return max( [(abs(model[i] - new_model[i])).max() for i in range(len(model))] )
faf4a9fb2b24f7e7b4f357eef195e435950ea218
12,507
def wiener_khinchin_transform(power_spectrum, frequency, time): """ A function to transform the power spectrum to a correlation function by the Wiener Khinchin transformation ** Input:** * **power_spectrum** (`list or numpy.array`): The power spectrum of the signal. * **frequency** (`list or numpy.array`): The frequency discretizations of the power spectrum. * **time** (`list or numpy.array`): The time discretizations of the signal. **Output/Returns:** * **correlation_function** (`list or numpy.array`): The correlation function of the signal. """ frequency_interval = frequency[1] - frequency[0] fac = np.ones(len(frequency)) fac[1: len(frequency) - 1: 2] = 4 fac[2: len(frequency) - 2: 2] = 2 fac = fac * frequency_interval / 3 correlation_function = np.zeros(len(time)) for i in range(len(time)): correlation_function[i] = 2 * np.dot(fac, power_spectrum * np.cos(frequency * time[i])) return correlation_function
3cf8916c75632e3a0db52f907ce180eb766f9f2e
12,508
def child_is_flat(children, level=1): """ Check if all children in section is in same level. children - list of section children. level - integer, current level of depth. Returns True if all children in the same level, False otherwise. """ return all( len(child) <= level + 1 or child[(level + 1) :][0].isalpha() for child in children )
e14f9210a90b40b419d21fffa1542212429d80be
12,509
from pathlib import Path def load_dataset(name, other_paths=[]): """Load a dataset with given (file) name.""" if isinstance(name, Dataset): return name path = Path(name) # First, try if you have passed a fully formed dataset path if path.is_file(): return _from_npy(name, classes=classes) # Go through the dataset paths, return the first dataset found all_paths = dataset_path + other_paths for p in all_paths: try: file = p / path return _from_npy(file, classes=classes) except FileNotFoundError: pass raise FileNotFoundError( "Could not find dataset {} in paths {}".format(name, all_paths) )
3f3d2e7e7ec577098e1a1599c74638ced5d3c103
12,510
def isqrtcovresnet101b(**kwargs): """ iSQRT-COV-ResNet-101 model with stride at the second convolution in bottleneck block from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,' https://arxiv.org/abs/1712.01034. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_isqrtcovresnet(blocks=101, conv1_stride=False, model_name="isqrtcovresnet101b", **kwargs)
fdf166fa3ce9e893e8e97d1057dac89d084d2217
12,511
def get_data(name: str, level: int, max_level: int) -> str: """從維基頁面爬取資料 參數: name: 程式或節點名稱 level: 欲查詢的等級 回傳: 爬到的資料 """ reply_msg = [] for dataframe in read_html(generate_url(name)): if (max_level < dataframe.shape[0] < max_level + 3 and dataframe.iloc[level, 0].isdigit() and level == int(dataframe.iloc[level, 0])): reply_msg.append(zip(*dataframe.iloc[[0, level], 1:].values)) return '\n'.join(':'.join(pair) for data in reply_msg for pair in data)
4e0f11a33c81993132d45f3fdad5f42c1288bbe5
12,512
def insert_data(context, data_dict): """ :raises InvalidDataError: if there is an invalid value in the given data """ data_dict['method'] = _INSERT result = upsert_data(context, data_dict) return result
c631016be36f1988bfa9c98cea42a7f63fddc276
12,514
import time def timestamp(): """Get the unix timestamp now and retuen it. Attention: It's a floating point number.""" timestamp = time.time() return timestamp
8e56a61659da657da9d5dda364d4d9e8f3d58ed2
12,515
from datetime import datetime def _n64_to_datetime(n64): """Convert Numpy 64 bit timestamps to datetime objects. Units in seconds""" return datetime.utcfromtimestamp(n64.tolist() / 1e9)
a25327f2cd0093635f86f3145f5674cc1945d3f8
12,516
import itertools def cycle(iterable): """Make an iterator returning elements from the iterable and saving a copy of each. When the iterable is exhausted, return elements from the saved copy. Repeats indefinitely. This function uses single dispatch. .. seealso:: :func:`itertools.cycle` """ return itertools.cycle(iterable)
13f479fca709dffa77eeca3d32ff7265c81588bf
12,517
def get_availability_zone(name=None,state=None,zone_id=None,opts=None): """ `.getAvailabilityZone` provides details about a specific availability zone (AZ) in the current region. This can be used both to validate an availability zone given in a variable and to split the AZ name into its component parts of an AWS region and an AZ identifier letter. The latter may be useful e.g. for implementing a consistent subnet numbering scheme across several regions by mapping both the region and the subnet letter to network numbers. This is different from the `.getAvailabilityZones` (plural) data source, which provides a list of the available zones. :param str name: The full name of the availability zone to select. :param str state: A specific availability zone state to require. May be any of `"available"`, `"information"` or `"impaired"`. :param str zone_id: The zone ID of the availability zone to select. > This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/d/availability_zone.html.markdown. """ __args__ = dict() __args__['name'] = name __args__['state'] = state __args__['zoneId'] = zone_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = utilities.get_version() __ret__ = pulumi.runtime.invoke('aws:index/getAvailabilityZone:getAvailabilityZone', __args__, opts=opts).value return AwaitableGetAvailabilityZoneResult( name=__ret__.get('name'), name_suffix=__ret__.get('nameSuffix'), region=__ret__.get('region'), state=__ret__.get('state'), zone_id=__ret__.get('zoneId'), id=__ret__.get('id'))
6cb20524c1e0a2539e221711f1153949ab72f8e1
12,518
def _add_u_eq(blk, uex=0.8): """Add heat transfer coefficent adjustment for feed water flow rate. This is based on knowing the heat transfer coefficent at a particular flow and assuming the heat transfer coefficent is porportial to feed water flow rate raised to certain power (typically 0.8) Args: blk: Heat exchanger block to add correlation to uex: Correlation parameter value (defalut 0.8) Returns: None """ ti = blk.flowsheet().time blk.U0 = pyo.Var(ti) blk.f0 = pyo.Var(ti) blk.uex = pyo.Var(ti, initialize=uex) for t in ti: blk.U0[t].value = blk.overall_heat_transfer_coefficient[t].value blk.f0[t].value = blk.tube.properties_in[t].flow_mol.value blk.overall_heat_transfer_coefficient.unfix() blk.U0.fix() blk.uex.fix() blk.f0.fix() @blk.Constraint(ti) def U_eq(b, t): return ( b.overall_heat_transfer_coefficient[t] == b.U0[t]*(b.tube.properties_in[t].flow_mol/b.f0[t])**b.uex[t] )
f6b34a8e75367b43dbe759d273aa4be7dc371c12
12,519
def find_process_in_list( proclist, pid ): """ Searches for the given 'pid' in 'proclist' (which should be the output from get_process_list(). If not found, None is returned. Otherwise a list [ user, pid, ppid ] """ for L in proclist: if pid == L[1]: return L return None
19eab54b4d04b40a54a39a44e50ae28fbff9457c
12,520
def solution(s, start_pos, end_pos): """ Find the minimal nucleotide from a range of sequence DNA. :param s: String consisting of the letters A, C, G and T, which correspond to the types of successive nucleotides in the sequence :param start_pos: array with the start indexes for the intervals to check :param end_pos: array with the end indexes for the intervals to check :return: a list with the minimal nucleotide for each interval defined by start_pos and end_pos """ highest_class = 'T' highest_class_value = 4 # The array below must be in ascending order regarding the value assigned to the classes in the challenge description # (not necessarily in alphabetic order) other_classes = ['A', 'C', 'G'] other_classes_values = [1, 2, 3] # We create a prefix_sum list for each class, so we can identify when a range has that specific class prefix_sums = __class_based_prefix_sums(s, other_classes) result = [] for i in range(len(start_pos)): # We don't need to create a prefix_sum list for the class with highest value, # because we can always use it as a fallback current_result = highest_class_value for j in range(len(other_classes)): if __class_is_present(prefix_sums, j, start_pos[i], end_pos[i]): current_result = other_classes_values[j] break result.append(current_result) return result
25ef2f7e9b009de0534f8dde132c0eb44e3fe374
12,521
def validate_address(value: str, context: dict = {}) -> str: """ Default address validator function. Can be overriden by providing a dotted path to a function in ``SALESMAN_ADDRESS_VALIDATOR`` setting. Args: value (str): Address text to be validated context (dict, optional): Validator context data. Raises: ValidationError: In case address is not valid Returns: str: Validated value """ if not value: raise ValidationError(_("Address is required.")) return value
65e04a4780432608aa049687da98bd05a527fbad
12,522
from pathlib import Path def _get_hg_repo(path_dir): """Parse `hg paths` command to find remote path.""" if path_dir == "": return "" hgrc = Path(path_dir) / ".hg" / "hgrc" if hgrc.exists(): config = ConfigParser() config.read(str(hgrc)) if "paths" in config: return config["paths"].get("default", "hgrc: no default path?") else: return "hgrc: no [paths] section?" else: return "not a hg repo"
773ab4b45ba6883446c8e4a7725b7ac9d707440f
12,525
def array_to_string(array, col_delim=' ', row_delim='\n', digits=8, value_format='{}'): """ Convert a 1 or 2D array into a string with a specified number of digits and delimiter. The reason this exists is that the basic numpy array to string conversions are surprisingly bad. Parameters ------------ array : (n,) or (n, d) float or int Data to be converted If shape is (n,) only column delimiter will be used col_delim : str What string should separate values in a column row_delim : str What string should separate values in a row digits : int How many digits should floating point numbers include value_format : str Format string for each value or sequence of values If multiple values per value_format it must divide into array evenly. Returns ---------- formatted : str String representation of original array """ # convert inputs to correct types array = np.asanyarray(array) digits = int(digits) row_delim = str(row_delim) col_delim = str(col_delim) value_format = str(value_format) # abort for non-flat arrays if len(array.shape) > 2: raise ValueError('conversion only works on 1D/2D arrays not %s!', str(array.shape)) # allow a value to be repeated in a value format repeats = value_format.count('{}') if array.dtype.kind == 'i': # integer types don't need a specified precision format_str = value_format + col_delim elif array.dtype.kind == 'f': # add the digits formatting to floats format_str = value_format.replace( '{}', '{:.' + str(digits) + 'f}') + col_delim else: raise ValueError('dtype %s not convertible!', array.dtype.name) # length of extra delimiters at the end end_junk = len(col_delim) # if we have a 2D array add a row delimiter if len(array.shape) == 2: format_str *= array.shape[1] # cut off the last column delimiter and add a row delimiter format_str = format_str[:-len(col_delim)] + row_delim end_junk = len(row_delim) # expand format string to whole array format_str *= len(array) # if an array is repeated in the value format # do the shaping here so we don't need to specify indexes shaped = np.tile(array.reshape((-1, 1)), (1, repeats)).reshape(-1) # run the format operation and remove the extra delimiters formatted = format_str.format(*shaped)[:-end_junk] return formatted
9e7f189049b1ad3eff3679568a84e7151e2c643c
12,526
def get_dp_logs(logs): """Get only the list of data point logs, filter out the rest.""" filtered = [] compute_bias_for_types = [ "mouseout", "add_to_list_via_card_click", "add_to_list_via_scatterplot_click", "select_from_list", "remove_from_list", ] for log in logs: if log["type"] in compute_bias_for_types: filtered.append(log) return filtered
e0a7c579fa9218edbf942afdbdb8e6cf940d1a0c
12,527
from typing import List from typing import Dict def assign_reports_to_watchlist(cb: CbThreatHunterAPI, watchlist_id: str, reports: List[Dict]) -> Dict: """Set a watchlist report IDs attribute to the passed reports. Args: cb: Cb PSC object watchlist_id: The Watchlist ID to update. reports: The Intel Reports. Returns: The Watchlist in dict form. """ watchlist_data = get_watchlist(cb, watchlist_id) if not watchlist_data: return None watchlist_data["report_ids"] = [r["id"] for r in reports] watchlist_data = update_watchlist(cb, watchlist_data) if not watchlist_data: LOGGER.error(f"unexpected problem updating watchlist with report IDs.") return False return watchlist_data
92bb0369211c1720fa4d9baa7a4e3965851339f2
12,528
def visualize_filter( image, model, layer, filter_index, optimization_parameters, transformation=None, regularization=None, threshold=None, ): """Create a feature visualization for a filter in a layer of the model. Args: image (array): the image to be modified by the feature vis process. model (object): the model to be used for the feature visualization. layer (string): the name of the layer to be used in the visualization. filter_index (number): the index of the filter to be visualized. optimization_parameters (OptimizationParameters): the optimizer class to be applied. transformations (function): a function defining the transformations to be perfromed. regularization (function): customized regularizers to be applied. Defaults to None. threshold (list): Intermediate steps for visualization. Defaults to None. Returns: tuple: activation and result image for the process. """ image = tf.Variable(image) feature_extractor = get_feature_extractor(model, layer) _threshold_figures = figure(figsize=(15, 10), dpi=200) print("Starting Feature Vis Process") for iteration in range(optimization_parameters.iterations): pctg = int(iteration / optimization_parameters.iterations * 100) if transformation: if not callable(transformation): raise ValueError("The transformations need to be a function.") image = transformation(image) else: image = trans.standard_transformation(image) activation, image = gradient_ascent_step( image, feature_extractor, filter_index, regularization, optimization_parameters ) print('>>', pctg, '%', end="\r", flush=True) # Routine for creating a threshold image for Jupyter Notebooks if isinstance(threshold, list) and (iteration in threshold): threshold_image = _threshold_figures.add_subplot( 1, len(threshold), threshold.index(iteration) + 1 ) threshold_image.title.set_text(f"Step {iteration}") threshold_view(image) print('>> 100 %') if image.shape[1] < 299 or image.shape[2] < 299: image = tf.image.resize(image, [299, 299]) # Decode the resulting input image image = imgs.deprocess_image(image[0].numpy()) return activation, image
09940c0484361240929f61f04c9a96771b440033
12,529
def subtraction(x, y): """ Subtraction x and y >>> subtraction(-20, 80) -100 """ assert isinstance(x, (int, float)), "The x value must be an int or float" assert isinstance(y, (int, float)), "The y value must be an int or float" return x - y
203233897d31cb5bc79fca0f8c911b03d7deb5ba
12,530
import aiohttp async def paste(text: str) -> str: """Return an online bin of given text.""" session = aiohttp.ClientSession() async with session.post("https://hasteb.in/documents", data=text) as post: if post.status == 200: response = await post.text() return f"https://hasteb.in/{response[8:-2]}" post = await session.post("https://bin.drlazor.be", data={"val": text}) if post.status == 200: return post.url
d204f6f1db3aa33c98c4ebeae9888acc438f7dc3
12,531
def lr_step(base_lr, curr_iter, decay_iters, warmup_iter=0): """Stepwise exponential-decay learning rate policy. Args: base_lr: A scalar indicates initial learning rate. curr_iter: A scalar indicates current iteration. decay_iter: A list of scalars indicates the numbers of iteration when the learning rate is decayed. warmup_iter: A scalar indicates the number of iteration before which the learning rate is not adjusted. Return: A scalar indicates the current adjusted learning rate. """ if curr_iter < warmup_iter: alpha = curr_iter / warmup_iter return base_lr * (1 / 10.0 * (1 - alpha) + alpha) else: return base_lr * (0.1 ** get_step_index(curr_iter, decay_iters))
b8cfe670aba0bed1f84ae09c6271e681fad42864
12,532
def apo(coalg): """ Extending an anamorphism with the ability to halt. In this version, a boolean is paired with the value that indicates halting. """ def run(a): stop, fa = coalg(a) return fa if stop else fa.map(run) return run
a1e64d9ed49a8641095c8a8c20ae08c1cc6e9c19
12,533
def cat_sample(ps): """ sample from categorical distribution ps is a 2D array whose rows are vectors of probabilities """ r = nr.rand(len(ps)) out = np.zeros(len(ps),dtype='i4') cumsums = np.cumsum(ps, axis=1) for (irow,csrow) in enumerate(cumsums): for (icol, csel) in enumerate(csrow): if csel > r[irow]: out[irow] = icol break return out
30009b31dba0eff23010bfe6d531e8c55e46873c
12,534
def extract_text(text): """ """ l = [] res = [] i = 0 while i < len(text) - 2: h, i, _ = next_token(text, i) obj = text[h:i] l.append(obj) for j, tok in enumerate(l): if tok == b'Tf': font = l[j-2] fsize = float(l[j-1]) elif tok == b'Td': x = float(l[j-2]) y = float(l[j-1]) elif tok == b'Tj': text = l[j-1] res.append((x, y, font, fsize, text[1:-1])) return res
9b0746be6f6fa39548fd34f3bffda7e8baf4a6ef
12,536
def add_pruning_arguments_to_parser(parser): """Add pruning arguments to existing argparse parser""" parser.add_argument('--do_prune', action='store_true', help="Perform pruning when training a model") parser.add_argument('--pruning_config', type=str, default='', help="Path to a pruning config") parser.add_argument('--pruning_override', type=str, nargs='*', action=ConcatenateStringAction, default='', help="JSON string to override pruning configuration file") return parser
2a94e0986564f4af8fe580ca3500f06c04598f14
12,537
def read_ult_meta(filebase): """Convenience fcn for output of targeted metadata.""" meta = _parse_ult_meta(filebase) return (meta["NumVectors"], meta["PixPerVector"], meta["ZeroOffset"], meta["Angle"], meta["PixelsPerMm"], meta["FramesPerSec"], meta["TimeInSecsOfFirstFrame"])
b2237a2dab9faf98179f69de9e9a5f1dc7289f78
12,539
from typing import Iterable from typing import List def safe_identifiers_iterable(val_list: Iterable[str]) -> List[str]: """ Returns new list, all with safe identifiers. """ return [safe_identifier(val) for val in val_list]
6b80d90cfac2ea527ace38cc6550571b5f120a7f
12,540
def encode_varint(value, write): """ Encode an integer to a varint presentation. See https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints on how those can be produced. Arguments: value (int): Value to encode write (function): Called per byte that needs to be writen Returns: int: Number of bytes written """ value = (value << 1) ^ (value >> 63) if value <= 0x7f: # 1 byte write(value) return 1 if value <= 0x3fff: # 2 bytes write(0x80 | (value & 0x7f)) write(value >> 7) return 2 if value <= 0x1fffff: # 3 bytes write(0x80 | (value & 0x7f)) write(0x80 | ((value >> 7) & 0x7f)) write(value >> 14) return 3 if value <= 0xfffffff: # 4 bytes write(0x80 | (value & 0x7f)) write(0x80 | ((value >> 7) & 0x7f)) write(0x80 | ((value >> 14) & 0x7f)) write(value >> 21) return 4 if value <= 0x7ffffffff: # 5 bytes write(0x80 | (value & 0x7f)) write(0x80 | ((value >> 7) & 0x7f)) write(0x80 | ((value >> 14) & 0x7f)) write(0x80 | ((value >> 21) & 0x7f)) write(value >> 28) return 5 else: # Return to general algorithm bits = value & 0x7f value >>= 7 i = 0 while value: write(0x80 | bits) bits = value & 0x7f value >>= 7 i += 1 write(bits) return i
075286208008a0b7507eafe19158eebdb2af66b7
12,541
def heap_sort(li): """ [list of int] => [list of int] Heap sort: divides its input into a sorted and an unsorted region, and it iteratively shrinks the unsorted region by extracting the largest element from it and inserting it into the sorted region. It does not waste time with a linear-time scan of the unsorted region; rather, heap sort maintains the unsorted region in a heap data structure to more quickly find the largest element in each step. To implement a heap using arrays, we will use the rule li[k] >= li[2*k+1] and li[k] >= li[2*k+2] (left child and right child respectively). More generally, the array must satisfy the heap quality: For any given node C, if P is a parent node of C, then the value of P is greater than or equal to the key of C (for max heaps) Graphically, this would look like: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 """ def heapify(lst, heap_size, root): """ ([list of int], int, int) => [list of int] Rearranges the list to satisfy the heap quality. Root is index of the largest element in the lst. """ # the largest node largest = root left_child = 2 * largest + 1 right_child = 2 * largest + 2 # check if left_child and root need to be swapped if left_child < heap_size and lst[largest] < lst[left_child]: largest = left_child # check if right_child and root need to be swapped if right_child < heap_size and lst[largest] < lst[right_child]: largest = right_child # change root, if needed if largest != root: lst[root], lst[largest] = lst[largest], lst[root] # continue to heapify the root heapify(lst, heap_size, largest) # Build a maxheap by iterating through the list backwards for i in range(len(li), -1, -1): heapify(li, len(li), i) print(li) # extract elements one by one for i in range(len(li) - 1, 0, -1): """remember, heap sort differs from insertion sort in that # it searches for the maximum, rather than minimum, element. li[0:end] is a heap (like a tree, but elements are not guaranteed to be sorted) and li[end:len(li)] is in sorted order.""" li[i], li[0] = li[0], li[i] # return to heap, since the heap was messed up by swapping heapify(li, i, 0) return li
a72be31e5256c880c157636aa7a15df013ce651d
12,542
def vector_field(v, t, inf_mat, state_meta): """vector_field returns the temporal derivative of a flatten state vector :param v: array of shape (1,mmax+1+(nmax+1)**2) for the flatten state vector :param t: float for time (unused) :param inf_mat: array of shape (nmax+1,nmax+1) representing the infection rate :param state_meta: tuple of arrays encoding information of the structure. :returns vec_field: array of shape (1,(nmax+1)**2) for the flatten vector field. """ mmax = state_meta[0] nmax = state_meta[1] m = state_meta[2] gm = state_meta[3] pn = state_meta[4] imat = state_meta[5] nmat = state_meta[6] pnmat = state_meta[7] sm = v[:mmax+1] fni = v[mmax+1:].reshape(nmax+1,nmax+1) fni_field = np.zeros(fni.shape) #matrix field sm_field = np.zeros(sm.shape) #calculate mean-field quantities r = np.sum(inf_mat[2:,:]*(nmat[2:,:]-imat[2:,:])*fni[2:,:]*pnmat[2:,:]) r /= np.sum((nmat[2:,:]-imat[2:,:])*fni[2:,:]*pnmat[2:,:]) rho = r*excess_susceptible_membership(m,gm,sm) #contribution for nodes #------------------------ sm_field = 1 - sm - sm*m*r #contribution for groups #------------------------ #contribution from above fni_field[2:,:nmax] += imat[2:,1:]*fni[2:,1:] #contribution from equal fni_field[2:,:] += (-imat[2:,:] -(nmat[2:,:] - imat[2:,:]) *(inf_mat[2:,:] + rho))*fni[2:,:] #contribution from below fni_field[2:,1:nmax+1] += ((nmat[2:,:nmax] - imat[2:,:nmax]) *(inf_mat[2:,:nmax] + rho))*fni[2:,:nmax] return np.concatenate((sm_field,fni_field.reshape((nmax+1)**2)))
31c8023966fd3e5c35b734759a3747f0d2752390
12,543
def newton(start, loss_fn, *args, lower=0, upper=None, epsilon=1e-9): """ Newton's Method! """ theta, origin, destination = args[0], args[1], args[2] if upper is None: upper = 1 start = lower while True: if loss_fn(start, theta, origin, destination) > 0: start = (upper+start)/2 else: start = (lower+start)/2 # print("START", start) x_cur = start x_prev = -1 try: while np.abs(x_cur-x_prev) >= epsilon: # print(x) x_prev = x_cur x_cur = newton_single(x_cur, loss_fn, theta, origin, destination) # print(x, x-x_prev, np.abs(x-x_prev)>=epsilon) if np.isnan(x_cur): continue return x_cur except ZeroDivisionError: print(start, x_cur)
bbd04297639fbc964c55a8c964e5bd5fb24d6e22
12,544
import torch def eval_det_cls(pred, gt, iou_thr=None): """Generic functions to compute precision/recall for object detection for a single class. Args: pred (dict): Predictions mapping from image id to bounding boxes \ and scores. gt (dict): Ground truths mapping from image id to bounding boxes. iou_thr (list[float]): A list of iou thresholds. Return: tuple (np.ndarray, np.ndarray, float): Recalls, precisions and \ average precision. """ # {img_id: {'bbox': box structure, 'det': matched list}} class_recs = {} npos = 0 img_id_npos = {} for img_id in gt.keys(): cur_gt_num = len(gt[img_id]) if cur_gt_num != 0: gt_cur = torch.zeros([cur_gt_num, 7], dtype=torch.float32) for i in range(cur_gt_num): gt_cur[i] = gt[img_id][i].tensor bbox = gt[img_id][0].new_box(gt_cur) else: bbox = gt[img_id] det = [[False] * len(bbox) for i in iou_thr] npos += len(bbox) img_id_npos[img_id] = img_id_npos.get(img_id, 0) + len(bbox) class_recs[img_id] = {'bbox': bbox, 'det': det} # construct dets image_ids = [] confidence = [] ious = [] for img_id in pred.keys(): cur_num = len(pred[img_id]) if cur_num == 0: continue pred_cur = torch.zeros((cur_num, 7), dtype=torch.float32) box_idx = 0 for box, score in pred[img_id]: image_ids.append(img_id) confidence.append(score) pred_cur[box_idx] = box.tensor box_idx += 1 pred_cur = box.new_box(pred_cur) gt_cur = class_recs[img_id]['bbox'] if len(gt_cur) > 0: # calculate iou in each image iou_cur = pred_cur.overlaps(pred_cur, gt_cur) for i in range(cur_num): ious.append(iou_cur[i]) else: for i in range(cur_num): ious.append(np.zeros(1)) confidence = np.array(confidence) # sort by confidence sorted_ind = np.argsort(-confidence) image_ids = [image_ids[x] for x in sorted_ind] ious = [ious[x] for x in sorted_ind] # go down dets and mark TPs and FPs nd = len(image_ids) tp_thr = [np.zeros(nd) for i in iou_thr] fp_thr = [np.zeros(nd) for i in iou_thr] for d in range(nd): R = class_recs[image_ids[d]] iou_max = -np.inf BBGT = R['bbox'] cur_iou = ious[d] if len(BBGT) > 0: # compute overlaps for j in range(len(BBGT)): # iou = get_iou_main(get_iou_func, (bb, BBGT[j,...])) iou = cur_iou[j] if iou > iou_max: iou_max = iou jmax = j for iou_idx, thresh in enumerate(iou_thr): if iou_max > thresh: if not R['det'][iou_idx][jmax]: tp_thr[iou_idx][d] = 1. R['det'][iou_idx][jmax] = 1 else: fp_thr[iou_idx][d] = 1. else: fp_thr[iou_idx][d] = 1. ret = [] # Return additional information for custom metrics. new_ret = {} new_ret["image_ids"] = image_ids new_ret["iou_thr"] = iou_thr new_ret["ious"] = [max(x.tolist()) for x in ious] new_ret["fp_thr"] = [x.tolist() for x in fp_thr] new_ret["tp_thr"] = [x.tolist() for x in tp_thr] new_ret["img_id_npos"] = img_id_npos for iou_idx, thresh in enumerate(iou_thr): # compute precision recall fp = np.cumsum(fp_thr[iou_idx]) tp = np.cumsum(tp_thr[iou_idx]) recall = tp / float(npos) # avoid divide by zero in case the first detection matches a difficult # ground truth precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) ap = average_precision(recall, precision) ret.append((recall, precision, ap)) return ret, new_ret
762f70d95261509778a1b015af30eab68f951b15
12,545
import pathlib from typing import List from typing import Dict import tqdm def parse_g2o(path: pathlib.Path, pose_count_limit: int = 100000) -> G2OData: """Parse a G2O file. Creates a list of factors and dictionary of initial poses.""" with open(path) as file: lines = [line.strip() for line in file.readlines()] pose_variables: List[jaxfg.geometry.LieVariableBase] = [] initial_poses: Dict[jaxfg.geometry.LieVariableBase, jaxlie.MatrixLieGroup] = {} factors: List[jaxfg.core.FactorBase] = [] for line in tqdm(lines): parts = [part for part in line.split(" ") if part != ""] variable: jaxfg.geometry.LieVariableBase between: jaxlie.MatrixLieGroup if parts[0] == "VERTEX_SE2": if len(pose_variables) > pose_count_limit: continue # Create SE(2) variable _, index, x, y, theta = parts index = int(index) x, y, theta = map(float, [x, y, theta]) assert len(initial_poses) == index variable = jaxfg.geometry.SE2Variable() initial_poses[variable] = jaxlie.SE2.from_xy_theta(x, y, theta) pose_variables.append(variable) elif parts[0] == "EDGE_SE2": # Create relative offset between pair of SE(2) variables before_index = int(parts[1]) after_index = int(parts[2]) if before_index > pose_count_limit or after_index > pose_count_limit: continue between = jaxlie.SE2.from_xy_theta(*(float(p) for p in parts[3:6])) precision_matrix_components = onp.array(list(map(float, parts[6:]))) precision_matrix = onp.zeros((3, 3)) precision_matrix[onp.triu_indices(3)] = precision_matrix_components precision_matrix = precision_matrix.T precision_matrix[onp.triu_indices(3)] = precision_matrix_components sqrt_precision_matrix = onp.linalg.cholesky(precision_matrix).T factors.append( jaxfg.geometry.BetweenFactor.make( variable_T_world_a=pose_variables[before_index], variable_T_world_b=pose_variables[after_index], T_a_b=between, noise_model=jaxfg.noises.Gaussian( sqrt_precision_matrix=sqrt_precision_matrix ), ) ) elif parts[0] == "VERTEX_SE3:QUAT": # Create SE(3) variable _, index, x, y, z, qx, qy, qz, qw = parts index = int(index) assert len(initial_poses) == index variable = jaxfg.geometry.SE3Variable() initial_poses[variable] = jaxlie.SE3( wxyz_xyz=onp.array(list(map(float, [qw, qx, qy, qz, x, y, z]))) ) pose_variables.append(variable) elif parts[0] == "EDGE_SE3:QUAT": # Create relative offset between pair of SE(3) variables before_index = int(parts[1]) after_index = int(parts[2]) numerical_parts = list(map(float, parts[3:])) assert len(numerical_parts) == 7 + 21 # between = jaxlie.SE3.from_xy_theta(*(float(p) for p in parts[3:6])) xyz = numerical_parts[0:3] quaternion = numerical_parts[3:7] between = jaxlie.SE3.from_rotation_and_translation( rotation=jaxlie.SO3.from_quaternion_xyzw(onp.array(quaternion)), translation=onp.array(xyz), ) precision_matrix = onp.zeros((6, 6)) precision_matrix[onp.triu_indices(6)] = numerical_parts[7:] precision_matrix = precision_matrix.T precision_matrix[onp.triu_indices(6)] = numerical_parts[7:] sqrt_precision_matrix = onp.linalg.cholesky(precision_matrix).T factors.append( jaxfg.geometry.BetweenFactor.make( variable_T_world_a=pose_variables[before_index], variable_T_world_b=pose_variables[after_index], T_a_b=between, noise_model=jaxfg.noises.Gaussian( sqrt_precision_matrix=sqrt_precision_matrix ), ) ) else: assert False, f"Unexpected line type: {parts[0]}" # Anchor start pose factors.append( jaxfg.geometry.PriorFactor.make( variable=pose_variables[0], mu=initial_poses[pose_variables[0]], noise_model=jaxfg.noises.DiagonalGaussian( jnp.ones(pose_variables[0].get_local_parameter_dim()) * 100.0 ), ) ) return G2OData(factors=factors, initial_poses=initial_poses)
6c766401220849e337279e8b465f9d67477a1599
12,546
def _som_actor(env): """ Construct the actor part of the model and return it. """ nactions = np.product(env.action_shape) model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=(1,) + env.observation_space.shape)) model.add(keras.layers.Dense(400)) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(200)) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(nactions)) model.add(keras.layers.Activation('sigmoid')) return model
e3bc1f675b16b2d728b1c070324139f0d99071a7
12,547
def sendEmail(): """email sender""" send_email('Registration ATS', ['[email protected]'], 'Thanks for registering ATS!', '<h3>Thanks for registering with ATS!</h3>') return "email sent to [email protected]"
e9125c32adac8267aaa550e59e27db4a10746ace
12,548
import scipy def Pvalue(chi2, df): """Returns the p-value of getting chi2 from a chi-squared distribution. chi2: observed chi-squared statistic df: degrees of freedom """ return 1 - scipy.stats.chi2.cdf(chi2, df)
1a2198e5d47396fc785a627d96513ded1d6894e0
12,549
def template(template_lookup_key: str) -> str: """Return template as string.""" with open(template_path(template_lookup_key), "r") as filepath: template = filepath.read() return template
d03bbc2baa8cb18174a468579bdea1da906de09d
12,550
def filter_rows(df, condition, reason): """ :param reason: :param df: :param condition: boolean, true for row to keep :return: filter country_city_codes df """ n_dropped = (condition == False).sum() print( f"\nexcluding {n_dropped} locations ({n_dropped / df.shape[0]:.1%}) due to {reason}" ) return df[condition]
7e5e6925bfb7d90bc90b42fda202d80e8ef5e3f6
12,551
def parse_projected_dos(f): """Parse `projected_dos.dat` output file.""" data = np.loadtxt(f) projected_dos = {"frequency_points": data[:, 0], "projected_dos": data[:, 1:].T} pdos = orm.XyData() pdos_list = [pd for pd in projected_dos["projected_dos"]] pdos.set_x(projected_dos["frequency_points"], "Frequency", "THz") pdos.set_y( pdos_list, [ "Projected DOS", ] * len(pdos_list), [ "1/THz", ] * len(pdos_list), ) pdos.label = "Projected DOS" return pdos
89c280e92c7598e3947d8ccda20b921c601c9b10
12,552
def get_from_parameters(a, b, c, alpha, beta, gamma): """ Create a Lattice using unit cell lengths and angles (in degrees). This code is modified from the pymatgen source code [1]_. Parameters ---------- a : :class:`float`: *a* lattice parameter. b : :class:`float`: *b* lattice parameter. c : :class:`float`: *c* lattice parameter. alpha : :class:`float`: *alpha* angle in degrees. beta : :class:`float`: *beta* angle in degrees. gamma : :class:`float`: *gamma* angle in degrees. Returns ------- :class:`tuple` of three :class:`numpy.ndarray` Tuple of cell lattice vectors of shape (3, ) in Angstrom. """ angles_r = np.radians([alpha, beta, gamma]) cos_alpha, cos_beta, cos_gamma = np.cos(angles_r) sin_alpha, sin_beta, sin_gamma = np.sin(angles_r) val = (cos_alpha * cos_beta - cos_gamma) / (sin_alpha * sin_beta) # Sometimes rounding errors result in values slightly > 1. val = cap_absolute_value(val) gamma_star = np.arccos(val) vector_a = np.array([a * sin_beta, 0.0, a * cos_beta]) vector_b = np.array([ -b * sin_alpha * np.cos(gamma_star), b * sin_alpha * np.sin(gamma_star), b * cos_alpha, ]) vector_c = np.array([0.0, 0.0, float(c)]) return tuple([vector_a, vector_b, vector_c])
076763f30da86b12747ede930993d99fc3b742d8
12,553
import random def random_chinese_name(): """生成随机中文名字 包括的名字格式:2个字名字**,3个字名字***,4个字名字**** :return: """ name_len = random.choice([i for i in range(4)]) if name_len == 0: name = random_two_name() elif name_len == 1: name = random_three_name() elif name_len == 2: name = random_three_names() else: name = random_four_name() return name
c86232cb81c492e2301837f5e330e6140ee503f3
12,554
def power_list(lists: [list]) -> list: """ power set across the options of all lists """ if len(lists) == 1: return [[v] for v in lists[0]] grids = power_list(lists[:-1]) new_grids = [] for v in lists[-1]: for g in grids: new_grids.append(g + [v]) return new_grids
135e3cde20388d999456e2e8a2fed4d98fac581d
12,555
import time def send_email(from_email, to, subject, message, html=True): """ Send emails to the given recipients :param from_email: :param to: :param subject: :param message: :param html: :return: Boolean value """ try: email = EmailMessage(subject, message, from_email, to) print("Sending email..") if html: email.content_subtype = 'html' email.send() return True except Exception as e: print("Error in sending email: {0}".format(str(e))) if 'rate exceeded' in str(e): time.sleep(2) send_email(from_email, to, subject, message) return False
28751bc30f51148c0389d4127229e6352a18cacb
12,556
import random def attack(health, power, percent_to_hit): """Calculates health from percent to hit and power of hit Parameters: health - integer defining health of attackee power - integer defining damage of attacker percent to hit - float defining percent chance to hit of attacker Returns: new health """ random_number = random.random() # number between 0.0 and 1.0 # if our random number falls between 0 and percent to hit if random_number <= percent_to_hit: # then a hit occurred so we reduce health by power health = health - power # return the new health value return health
83a74908f76f389c798b28c5d3f9035d2d8aff6a
12,557
def signal_requests_mock_factory(requests_mock: Mocker) -> Mocker: """Create signal service mock from factory.""" def _signal_requests_mock_factory( success_send_result: bool = True, content_length_header: str = None ) -> Mocker: requests_mock.register_uri( "GET", "http://127.0.0.1:8080/v1/about", status_code=HTTPStatus.OK, json={"versions": ["v1", "v2"]}, ) if success_send_result: requests_mock.register_uri( "POST", "http://127.0.0.1:8080" + SIGNAL_SEND_PATH_SUFIX, status_code=HTTPStatus.CREATED, ) else: requests_mock.register_uri( "POST", "http://127.0.0.1:8080" + SIGNAL_SEND_PATH_SUFIX, status_code=HTTPStatus.BAD_REQUEST, ) if content_length_header is not None: requests_mock.register_uri( "GET", URL_ATTACHMENT, status_code=HTTPStatus.OK, content=CONTENT, headers={"Content-Length": content_length_header}, ) else: requests_mock.register_uri( "GET", URL_ATTACHMENT, status_code=HTTPStatus.OK, content=CONTENT, ) return requests_mock return _signal_requests_mock_factory
543f73ec004911c87e9986cbd940a733f03287bf
12,558
def test_dwt_denoise_trace(): """ Check that sample data fed into dwt_denoise_trace() can be processed and that the returned signal is reasonable (for just one trace)""" # Loma Prieta test station (nc216859) data_files, origin = read_data_dir('geonet', 'us1000778i', '*.V1A') trace = [] trace = read_data(data_files[0]) dataOut = dwt.denoise_trace(tr=trace) # Look at frequency content? Samples? return dataOut
4c526e7e76c8672322bec0323974ca2ee20e25dd
12,559
def get_networks(project_id=None, auth_token=None): """ Get a list of all routed networks """ url = CATALOG_HOST + "/routednetwork" try: response_body = _api_request(url=url, http_method="GET", project_id=project_id, auth_token=auth_token) except CommandExecutionError as e: log.exception(e) return None networks = [ network for network in response_body if network['internalDeploymentStatus']['phase'] in list(map(str, POSITIVE_PHASES)) ] return networks
c2c9bfe05cfa416c9e37d04aefcc640d5d2250f7
12,560
def feature_registration(source,target, MIN_MATCH_COUNT = 12): """ Obtain the rigid transformation from source to target first find correspondence of color images by performing fast registration using SIFT features on color images. The corresponding depth values of the matching keypoints is then used to obtain rigid transformation through a ransac process. Parameters ---------- source : ((n,m) uint8, (n,m) float) The source color image and the corresponding 3d pointcloud combined in a list target : ((n,m) uint8, (n,m) float) The target color image and the corresponding 3d pointcloud combined in a list MIN_MATCH_COUNT : int The minimum number of good corresponding feature points for the algorithm to trust the pairwise registration result with feature matching only Returns ---------- transform: (4,4) float or None The homogeneous rigid transformation that transforms source to the target's frame if None, registration result using feature matching only cannot be trusted either due to no enough good matching feature points are found, or the ransac process does not return a solution """ cad_src, depth_src = source cad_des, depth_des = target # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descripto rs with SIFT kp1, des1 = sift.detectAndCompute(cad_src,None) kp2, des2 = sift.detectAndCompute(cad_des,None) # find good mathces bf = cv2.BFMatcher() matches = bf.knnMatch(des1,des2, k=2) good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) # if number of good matching feature point is greater than the MIN_MATCH_COUNT if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() bad_match_index = np.where(np.array(matchesMask) == 0) src_index=np.vstack(src_pts).squeeze() src_index = np.delete(src_index, tuple(bad_match_index[0]), axis=0) src_index[:,[0, 1]] = src_index[:,[1, 0]] src_index = tuple(src_index.T.astype(np.int32)) src_depths = depth_src[src_index] dst_index=np.vstack(dst_pts).squeeze() dst_index = np.delete(dst_index, tuple(bad_match_index[0]), axis=0) dst_index[:,[0, 1]] = dst_index[:,[1, 0]] dst_index = tuple(dst_index.T.astype(np.int32)) dst_depths = depth_des[dst_index] dst_good=[] src_good=[] dst_depths=dst_depths[matchesMask>0][0] src_depths=src_depths[matchesMask>0][0] for i in xrange(len(dst_depths)): if np.sum(dst_depths[i])!=0 and np.sum(src_depths[i])!=0: dst_good.append(dst_depths[i].tolist()) src_good.append(src_depths[i].tolist()) # get rigid transforms between 2 set of feature points through ransac transform = match_ransac(np.asarray(src_good),np.asarray(dst_good)) return transform else: return None
d5839ef3586acd84c57341f19700de38660f9a9f
12,561
def set_metadata(testbench_config, testbench): """ Perform the direct substitutions from the sonar testbench metadata into the the testbench Args: testbench_config (Testbench): Sonar testbench description testbench (str): The testbench template """ for key, value in testbench_config.metadata.items(): if value is None: replace_str = "" else: replace_str = str(value) search_str = "SONAR_" + key.upper() testbench = replace_in_testbenches(testbench, search_str, replace_str) return testbench
375712b92f7467ee4d49e5d9e91250464c81337d
12,562
def index(a, x): """Locate the leftmost value exactly equal to x""" i = bisect_left(a, x) if i != len(a) and a[i] == x: return i raise ValueError
f77aed5c55750b848fdf51b66b38f3774c812e23
12,563
def convert_secondary_type_list(obj): """ :type obj: :class:`[mbdata.models.ReleaseGroupSecondaryType]` """ type_list = models.secondary_type_list() [type_list.add_secondary_type(convert_secondary_type(t)) for t in obj] return type_list
d84d20f6d82b462bda5bf04f6784effea47a0265
12,564
import json def load_data(path): """Load JSON data.""" with open(path) as inf: return json.load(inf)
531fc2b27a6ab9588b1f047e25758f359dc21b6d
12,566
from pathlib import Path def get_extension(file_path): """ get_extension(file) Gets the extension of the given file. Parameters ---------- file_path A path to a file Returns ------- str Returns the extension of the file if it exists or None otherwise. The Returning extension contains a dot. Ex: .csv """ if exists(file_path): return Path(file_path).suffix else: return None
7b1c4ba4f20ac913bb38292d4a704869cab6937e
12,567
def rank_in_group(df, group_col, rank_col, rank_method="first"): """Ranks a column in each group which is grouped by another column Args: df (pandas.DataFrame): dataframe to rank-in-group its column group_col (str): column to be grouped by rank_col (str): column to be ranked for rank_method (str): rank method to be the "method" argument of pandas.rank() function Returns: pandas.DataFrame: dataframe after the rank-in-group operation """ df = df.copy() df_slice = df[[group_col, rank_col]].drop_duplicates() df_slice["ranked_{}".format(rank_col)] = df_slice[rank_column].rank( method=rank_method ) df = pd.merge( df, df_slice[[group_col, "ranked_{}".format(rank_col)]], how="left", on=group_col, ) return df
f2ae45641339bf4bc71bc48a415a28602ccf8da3
12,568
import six def get_layer_options(layer_options, local_options): """ Get parameters belonging to a certain type of layer. Parameters ---------- layer_options : list of String Specifies parameters of the layer. local_options : list of dictionary Specifies local parameters in a model function. """ layer_options_dict = {} for key, value in six.iteritems(local_options): if key in layer_options: layer_options_dict[key] = value return layer_options_dict
e40945395c4a96c0a0b9447eeb1d0b50cf661bd7
12,569
def expr(term:Vn,add:Vt,expr:Vn)->Vn: """ expr -> term + expr """ return {"add":[term,expr]}
f66475ecbd255ac4c4a04b0d705f1c052c4ee123
12,570
import json def gene_box(cohort, order='median', percentage=False): """Box plot with counts of filtered mutations by gene. percentage computes fitness as the increase with respect to the self-renewing replication rate lambda=1.3. Color allows you to use a dictionary of colors by gene. Returns a figure.""" # Load gene color dictionary with open('../Resources/gene_color_dict.json') as json_file: color_dict = json.load(json_file) # Create a dictionary with all filtered genes gene_list = [] for traj in cohort: gene_list.append(traj.gene) gene_dict = {element: [] for element in set(gene_list)} # update the counts for each gene if percentage is False: y_label = 'Fitness' for traj in cohort: fitness = traj.fitness gene_dict[traj.gene].append(fitness) if percentage is True: y_label = 'fitness_percentage' for traj in cohort: fitness = traj.fitness_percentage gene_dict[traj.gene].append(fitness) # sort dictionary in descending order if order == 'mean': gene_dict = dict(sorted(gene_dict.items(), key=lambda item: np.mean(item[1]), reverse=True)) if order == 'median': gene_dict = dict(sorted(gene_dict.items(), key=lambda item: np.median(item[1]), reverse=True)) if order == 'max': gene_dict = dict(sorted(gene_dict.items(), key=lambda item: np.max(item[1]), reverse=True)) # Bar plot fig = go.Figure() # color_dict = dict() # if isinstance(color, dict): # color_dict = color for i, key in enumerate(gene_dict): fig.add_trace( go.Box(y=gene_dict[key], marker_color=color_dict[key], name=key, boxpoints='all', showlegend=False)) fig.update_layout(title='Gene distribution of filtered mutations', yaxis_title=y_label, template="simple_white") fig.update_xaxes(linewidth=2) fig.update_yaxes(linewidth=2) if percentage is False: fig.update_yaxes(type='log', tickvals=[0.05, 0.1, 0.2, 0.4]) fig.update_layout(xaxis_tickangle=-45) return fig, gene_dict
851c166246144b14d51863b4c775baa88ab87205
12,571
from typing import Union from typing import List def _clip_and_count( adata: AnnData, target_col: str, *, groupby: Union[str, None, List[str]] = None, clip_at: int = 3, inplace: bool = True, key_added: Union[str, None] = None, fraction: bool = True, ) -> Union[None, np.ndarray]: """Counts the number of identical entries in `target_col` for each group in `group_by`. """ if target_col not in adata.obs.columns: raise ValueError("`target_col` not found in obs.") groupby = [groupby] if isinstance(groupby, str) else groupby groupby_cols = [target_col] if groupby is None else groupby + [target_col] clonotype_counts = ( adata.obs.groupby(groupby_cols, observed=True) .size() .reset_index(name="tmp_count") .assign( tmp_count=lambda X: [ ">= {}".format(min(n, clip_at)) if n >= clip_at else str(n) for n in X["tmp_count"].values ] ) ) clipped_count = adata.obs.merge(clonotype_counts, how="left", on=groupby_cols)[ "tmp_count" ].values if inplace: key_added = ( "{}_clipped_count".format(target_col) if key_added is None else key_added ) adata.obs[key_added] = clipped_count else: return clipped_count
20673965557afdcf75b3201cf743fff100981ec3
12,572