content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def license(soup): """ Find the license text """ license = None try: license_section = get_license_section(soup) license = extract_node_text(license_section[0], "license-p") except(IndexError): return None return license
c7f677e369b7170623968cdfe0d0a85d75e4a339
23,992
def filter_nofix(df,NoFrames): """ Filter for immobilized origami with DNA-PAINT based tracking handle (TH) as described in `spt`_. Positives are groups - with a trajectory within the first 5 frames after the start of the measurement - and number localizations within group are greater or equal to 20% of total measurement duration (in frames) Args: df(pandas.DataFrame): Immobile properties as calulated by apply_props() Returns: pandas.DataFrame: Positives in ``df`` according to TH filter as described above. """ istrue=df.min_frame<=5 istrue=istrue&(df.n_locs/NoFrames>=0.2) # Occupancy of more than 20% df_filter=df.loc[istrue,:] return df_filter
e115cc479c984037adbe3dd662bed1aa70acaafd
23,993
def read_array(cls, start=None,end=None,weight=None,use_datetime = False, convert_delta = False): """ Read arrays of values for start, end and weight values that represent either the cummulative value of the data steps or the direct step values seperately, indexed by the start and possibly end arrays. Parameters ============== start : array_like An array of step start location values. end : array_like, Optional An array of step end location values. weight : array_like, Optional An array of step weight values, if these are not provided, a value of 1 will be assigned for each row entry. use_datetime : bool, Opyional Assume start and end fields are of datetime format (Numpy.datetime64,datetime or Pandas.Timestamp). convert_delta : bool, Optional Assume weight values are individual step weights (default), or convert values by performing a delta between adjacent values. The data is assumed to be sorted by the provided start values. Returns ============== Steps See Also ============== read_dataframe read_dict """ if hasattr(start,'__iter__') or hasattr(end,'__iter__'): #needs to be an array like object if convert_delta: weight0 = 0 if weight[0] !=0: weight0 = weight[0] if weight0 !=0 and not pd.isnull(start[0]): weight = np.diff(weight) new_steps = cls(use_datetime).add_direct(start,end,weight) new_steps.add_steps([[get_epoch_start(False),1,weight0]]) else: weight = np.diff(weight,prepend=0) new_steps = cls(use_datetime).add_direct(start,end,weight) else: new_steps = cls(use_datetime).add_direct(start,end,weight) return new_steps else: raise TypeError("input data must be array like, python array or ndarray.")
98b97a53883ed39d762849313cab64f29fadfd8d
23,994
def store_nugget_nodes(gold_nuggets, sys_nuggets, m_mapping): """ Store nuggets as nodes. :param gold_nuggets: :param sys_nuggets: :param m_mapping: :return: """ # Stores time ML nodes that actually exists in gold standard and system. gold_nodes = [] sys_nodes = [] # Store the mapping from nugget id to unified time ML node id. system_nugget_to_node = {} gold_nugget_to_node = {} mapped_system_mentions = set() tid = 0 for gold_index, (system_index, _) in enumerate(m_mapping): node_id = "te%d" % tid tid += 1 gold_script_instance_id = gold_nuggets[gold_index] gold_nugget_to_node[gold_script_instance_id] = node_id gold_nodes.append(node_id) if system_index != -1: system_nugget_id = sys_nuggets[system_index] system_nugget_to_node[system_nugget_id] = node_id sys_nodes.append(node_id) mapped_system_mentions.add(system_index) for system_index, system_nugget in enumerate(sys_nuggets): if system_index not in mapped_system_mentions: node_id = "te%d" % tid tid += 1 system_nugget_to_node[system_nugget] = node_id sys_nodes.append(node_id) return gold_nodes, sys_nodes, gold_nugget_to_node, system_nugget_to_node
659eeccc244d7dfe7fc1c4b9813844d70973b5dc
23,995
def return_union_item(item): """union of statements, next statement""" return " __result.update({0})".format(item)
60fff47ff948f5b62ff6c6793b9dd339c23ecfd7
23,996
def normalize_basename(s, force_lowercase=True, maxlen=255): """Replaces some characters from s with a translation table: trans_table = {" ": "_", "/": "_slash_", "\\": "_backslash_", "?": "_question_", "%": "_percent_", "*": "_asterisk_", ":": "_colon_", "|": "_bar_", '"': "_quote_", "<": "_lt_", ">": "_gt_", "&": "_amp_"} then if the generated name is longer than maxlen, the name is truncated to maxlen and the hash of the name modulo 0xffffffff is appended. """ # replace all whietspaces by _ l = s.lower() if force_lowercase else s # table = mktrans(" ", "_") # return l.translate(table) trans_table = {" ": "_", "/": "_slash_", "\\": "_backslash_", "?": "_question_", "%": "_percent_", "*": "_asterisk_", ":": "_colon_", "|": "_bar_", '"': "_quote_", "<": "_lt_", ">": "_gt_", "&": "_amp_"} n = ("".join([trans_table.get(x, x) for x in l])) if len(n) > maxlen - 8: h = format(hash(n) & 0xffffffff, "08x") n = n[:maxlen-8] + "_"+ h return n
8b6c6fee3a55b3d704294d8bdaa7f72101ac477b
23,997
import re def _get_toc_string_from_log(file_handle): """ Returns a toc string or None for a given log file (EAC or XLD) Copyright (c) 2018 Konstantin Mochalov Released under the MIT License Original source: https://gist.github.com/kolen/765526 """ def _filter_toc_entries(file_handle): """ Take file handle, return iterator of toc entries """ while True: line = file_handle.readline() # TOC table header: if re.match(r""" \s* .+\s+ \| (?#track) \s+.+\s+ \| (?#start) \s+.+\s+ \| (?#length) \s+.+\s+ \| (?#start sec) \s+.+\s*$ (?#end sec) """, line, re.X): file_handle.readline() break while True: line = file_handle.readline() m = re.match(r""" ^\s* (?P<num>\d+) \s*\|\s* (?P<start_time>[0-9:.]+) \s*\|\s* (?P<length_time>[0-9:.]+) \s*\|\s* (?P<start_sector>\d+) \s*\|\s* (?P<end_sector>\d+) \s*$ """, line, re.X) if not m: break yield m.groupdict() PREGAP = 150 try: entries = list(_filter_toc_entries(file_handle)) num_entries = len(entries) tracknums = [int(e['num']) for e in entries] if [x for x in range(1, num_entries+1)] != tracknums: # Non-standard track number sequence return None leadout_offset = int(entries[-1]['end_sector']) + PREGAP + 1 offsets = [(int(x['start_sector']) + PREGAP) for x in entries] toc_numbers = [1, num_entries, leadout_offset] + offsets return " ".join(str(x) for x in toc_numbers) except Exception as e: # can fail if the log file is malformed print("Ignoring log file because of the following error:") print(e) pass return None
1b8152171dcc5a512ea92df96bdc63497f01499a
23,999
def _matrix_M_entry(row, col): """Returns one entry for the matrix that maps alpha to theta. See Eq. (3) in `Mรถttรถnen et al. (2004) <https://arxiv.org/pdf/quant-ph/0407010.pdf>`_. Args: row (int): one-based row number col (int): one-based column number Returns: (float): transformation matrix entry at given row and column """ # (col >> 1) ^ col is the Gray code of col b_and_g = row & ((col >> 1) ^ col) sum_of_ones = 0 while b_and_g > 0: if b_and_g & 0b1: sum_of_ones += 1 b_and_g = b_and_g >> 1 return (-1) ** sum_of_ones
12b2d7b458d8b940504c108cd5704795184400a9
24,000
def get_environment_variable_names(): """Helper to return names of environment variables queried. Returns: tuple: name of environment variable to control log level, name of environment variable to control logging to file """ __log_file_environment_variable_name = mwi_env.get_env_name_log_file() __log_level_environment_variable_name = mwi_env.get_env_name_logging_level() return __log_level_environment_variable_name, __log_file_environment_variable_name
58457a843eeda900261fcecdb41a938ea59ff0c4
24,001
import pynvml def get_device_total_memory(index=0): """ Return total memory of CUDA device with index """ pynvml.nvmlInit() return pynvml.nvmlDeviceGetMemoryInfo( pynvml.nvmlDeviceGetHandleByIndex(index) ).total
11fb76c85393531cf01d9193adddaeec59e5f5c5
24,002
def repeat_elements(x, rep, axis): """Repeats the elements of a tensor along an axis, like `np.repeat`. If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output will have shape `(s1, s2 * rep, s3)`. # Arguments x: Tensor or variable. rep: Python integer, number of times to repeat. axis: Axis along which to repeat. # Returns A tensor. """ return KerasSymbol(mx.sym.repeat(x.symbol, repeats=rep, axis=axis))
3a4e3617021afa59de1b980e9cebc7a40e4f32db
24,003
def deconv4x4_block(in_channels, out_channels, stride=1, padding=3, ext_padding=(2, 1, 2, 1), out_padding=0, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 4x4 version of the standard deconvolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default (2, 1, 2, 1) Padding value for deconvolution layer. ext_padding : tuple/list of 4 int, default None Extra padding value for deconvolution layer. out_padding : int or tuple/list of 2 int Output padding value for deconvolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return DeconvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=4, stride=stride, padding=padding, ext_padding=ext_padding, out_padding=out_padding, dilation=dilation, groups=groups, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation)
6f2274b8c23f2b649a274d8bec7d99174da785f4
24,004
def get_package_requirements(): """ Used to read requirements from requirements.txt file. :return: list of requirements :rtype: list """ requirements = [] for line in read_file_contents("requirements.txt").splitlines(): line = line.strip() if line == "" or line.startswith("#"): continue requirements.append(line) return requirements
0fea0b1ce42afdf6e4c130c6a5d5f22dbead5222
24,005
def graph2tree(mat, root, closedset=None): """Convert a graph to a tree data structure""" if closedset is None: closedset = set() tree = Tree() def walk(name): node = TreeNode(name) node.dist = 0 closedset.add(name) for child in mat[name]: if child not in closedset: child_node = walk(child) child_node.dist = mat[name][child] tree.add_child(node, child_node) return node tree.root = walk(root) tree.nextname = max(name for name in tree.nodes if isinstance(name, int)) return tree
70981e1ff28e6ce1c6278ec35206a1953499855d
24,008
import re def is_hex(hex_str): """Helper function to verify a string is a hex value.""" return re.fullmatch('[0-9a-f]+', hex_str)
c5a53ccbcec36d77bee88d9c81aea46d2a0eec2d
24,009
def jet(data, range=None, exp=1.0): """ Creates a JET colormap from data Parameters ---------- data : np.array [N,1] Data to be converted into a colormap range : tuple (min,max) Optional range value for the colormap (if None, use min and max from data) exp : float Exponential value to weight the color differently Returns ------- colormap : np.array [N,3] Colormap obtained from data """ # Return if data is not available if data is None or data.size == 0 or isinstance(data, tuple): return data else: # If data is a tensor, convert to numpy if is_tensor(data): data = data.detach().cpu().numpy() # If data is [N,1], remove second dimensions if len(data.shape) > 1: data = data.reshape(-1) # Determine range if not available if range is None: data = data.copy() - np.min(data) data = data / (np.max(data) + 1e-6) else: data = (data - range[0]) / (range[1] - range[0]) data = np.maximum(np.minimum(data, 1.0), 0.0) # Use exponential if requested if exp != 1.0: data = data ** exp # Initialize colormap jet = np.ones((data.shape[0], 3), dtype=np.float32) # First stage idx = (data <= 0.33) jet[idx, 1] = data[idx] / 0.33 jet[idx, 0] = 0.0 # Second stage idx = (data > 0.33) & (data <= 0.67) jet[idx, 0] = (data[idx] - 0.33) / 0.33 jet[idx, 2] = 1.0 - jet[idx, 0] # Third stage idx = data > 0.67 jet[idx, 1] = 1.0 - (data[idx] - 0.67) / 0.33 jet[idx, 2] = 0.0 # Return colormap return jet
6945558dd6fc53c458f0c5c3e3f98fd5a1486a10
24,011
def collatz_seq(n, collatz_dict={}): """ Takes an integer n and returs the resulting Collatz sequence as a list. """ seq = [n] while n > 1: n = next_collatz(n) if n in collatz_dict: seq.extend(collatz_dict[n]) collatz_dict[seq[0]] = seq return seq else: seq.append(n) collatz_dict[seq[0]] = seq return seq
ae7ec3a77c39262c15cde9f18a24d60ca237284e
24,012
def create_app(): """ Create a Flask application using the app factory pattern. :param settings_override: Override settings :return: Flask app """ app = Flask(__name__) app.config.from_object('config.settings') app.register_blueprint(contact) return app
3af4d596fe8fd32f88f06e860cae2e929aa799e7
24,013
def get_prebuilt_piccolo(): """ :return: pair of picollo feature model filename and fm.json as a string """ DEFAULT_PREBUILT_PICCOLO = f'/home/besspinuser/tool-suite/tutorial/piccolo-simple-pregen.fm.json' with open(DEFAULT_PREBUILT_PICCOLO, 'r') as f: feature_model = f.read() return 'piccolo-simple-pregen.fm.json', feature_model
90a1ecf20c6d6614b813250ff464b6f308c588dc
24,014
from datetime import datetime def conflict_algorithm_session(date, start_time, end_time, venue): #converting string to datetime type variable """ conflict_algorithm_session: this algorithm is used to find if there any avaiable slot for the given date , stat_time ,end_time and venue from the session_info to book the slot else it returns error. @param: date - date of the slot start_time - starting time for the slot end_time - ending time for the slot venue - venue for the slot @variables: booked_session - contains the session_info of all previously alloted slots """ start_time = datetime.datetime.strptime(start_time, '%H:%M').time() end_time = datetime.datetime.strptime(end_time, '%H:%M').time() booked_Sessions = Session_info.objects.select_related('club','club__co_ordinator','club__co_ordinator__id','club__co_ordinator__id__user','club__co_ordinator__id__department','club__co_coordinator','club__co_coordinator__id','club__co_coordinator__id__user','club__co_coordinator__id__department','club__faculty_incharge','club__faculty_incharge__id','club__faculty_incharge__id__user','club__faculty_incharge__id__department').filter(date=date, venue=venue) #placing start time and end time in tuple fashion inside this list slots = [(start_time, end_time)] for value in booked_Sessions: slots.append((value.start_time, value.end_time)) slots.sort() #if there isn't any slot present for the selected day just book the session if (len(slots) == 1): return "success" else: #this whole logic checks if the end time of any slot is less than the start time of next slot counter = slots[0][1] flag = 0 i=1 while i < len(slots): if (slots[i][0] < counter): flag = 1 break counter = slots[i][1] i = i + 1 if (flag == 0): return "success" else: return "error"
06fffa780ed4a9afa7cc2cc078bf64dada9b4f21
24,015
def get_income_share_summary(df_centile, k): """ :param df_centile: pd.DataFrame preprocessed {region}_{unit}_centile.csv !! (rank is 1~100) :param k: str key """ centile_range = { 'ํ•˜์œ„ 20%': (0, 20), '๋‹ค์Œ 30%': (20, 50), 'ํ•˜์œ„ 50%': (0, 50), '์ค‘์œ„ 30%': (50, 80), '์ƒ์œ„ 20%': (80, 100), '์ƒ์œ„ 10%': (90, 100), '์ƒ์œ„ 1%': (99, 100), } results = list() groupcols = ['std_yyyy', 'var'] yearly_count = df_centile.groupby(['var', 'std_yyyy']).max()['year_count'].rename('max_freq') cpi = load_cpi(translate(k.split('_')[0])) freq_adjustments = {} # how many centiles(?) are just 0? zero_thresh_mask = df_centile['rank'].diff() > 1.0 zero_thresh = df_centile.loc[zero_thresh_mask] # add 0 ranks expanded_centiles = [] for (year, var), gdf in df_centile.groupby(groupcols): zero_fillers = {'std_yyyy': [], 'var': [], 'rank': [], 'freq': [], 'rank_sum': [], 'share': []} mask = (zero_thresh['std_yyyy'] == year) & (zero_thresh['var'] == var) if mask.sum() == 0: expanded_centiles.append(gdf) continue t = int(zero_thresh[mask].iloc[0]['rank']) year_total = yearly_count[(var, year)] for i in range(2, t): zero_fillers['std_yyyy'].append(year) zero_fillers['var'].append(var) zero_fillers['rank'].append(i) zero_fillers['freq'].append(int(np.around(year_total / 100))) zero_fillers['rank_sum'].append(0) zero_fillers['share'].append(0) gdf.loc[gdf['rank'] == 1, 'freq'] = year_total * ((t-1) / 100) - np.sum(zero_fillers['freq']) gdf.loc[gdf['rank'] == t, 'freq'] = year_total - gdf.loc[(gdf['rank'] < t) | (gdf['rank'] > t), 'freq'].sum() - np.sum(zero_fillers['freq']) expanded = pd.concat([gdf, pd.DataFrame(zero_fillers)]).sort_values(by=groupcols + ['rank']) expanded_centiles.append(expanded) expanded_centiles = pd.concat(expanded_centiles) for name, r in centile_range.items(): mask = (expanded_centiles['rank'] > r[0]) & (expanded_centiles['rank'] <= r[1]) if mask.sum() == 0: # Find max_freq: expected number of people in this income group # (number of ppl can be very different because during quantile ranking, # in case of a tie the individual was assigned the lower rank) max_freq = ((r[1] - r[0]) * yearly_count / 100).apply(lambda x: int(np.around(x))) _df = yearly_count.reset_index().drop(columns=['year_count']) _df = _df.merge(max_freq.rename('freq').reset_index()) _df['share'] = 0 _df['group_mean'] = 0 _df['group_mean_real'] = 0 else: _df = expanded_centiles[mask].copy() _df = _df.groupby(groupcols).agg({'rank_sum': 'sum', 'freq': 'sum', 'share': 'sum'}).reset_index() _df = _df.merge(cpi, on='std_yyyy', how='left') _df['group_mean'] = _df['rank_sum'] / _df['freq'] _df['group_mean_real'] = _df['group_mean'] / _df.cpi _df = _df.drop(columns=['rank_sum']) _df.loc[:, 'income_group'] = name results.append(_df) df = pd.concat(results, axis=0).sort_values(by=['std_yyyy', 'var']) df = df.pivot(index=['var', 'std_yyyy'], columns=['income_group'], values=['freq', 'group_mean', 'group_mean_real', 'share']).reset_index() sorted_groups = ['ํ•˜์œ„ 20%', '๋‹ค์Œ 30%', 'ํ•˜์œ„ 50%', '์ค‘์œ„ 30%', '์ƒ์œ„ 20%', '์ƒ์œ„ 10%', '์ƒ์œ„ 1%'] df = df[[('var', ''), ('std_yyyy', '')] + [('freq', k) for k in sorted_groups] + [('group_mean', k) for k in sorted_groups] + [('group_mean_real', k) for k in sorted_groups] + [('share', k) for k in sorted_groups]] return df
63f9a1e3f813a21681ad6d0061cd97e49485b885
24,016
def _get_cmdline_descriptors_for_hashtree_descriptor(ht): """Generate kernel cmdline descriptors for dm-verity. Arguments: ht: A AvbHashtreeDescriptor Returns: A list with two AvbKernelCmdlineDescriptor with dm-verity kernel cmdline instructions. There is one for when hashtree is not disabled and one for when it is. """ c = 'dm="1 vroot none ro 1,' c += '0' # start c += ' {}'.format((ht.image_size / 512)) # size (# sectors) c += ' verity {}'.format(ht.dm_verity_version) # type and version c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # data_dev c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # hash_dev c += ' {}'.format(ht.data_block_size) # data_block c += ' {}'.format(ht.hash_block_size) # hash_block c += ' {}'.format(ht.image_size / ht.data_block_size) # #blocks c += ' {}'.format(ht.image_size / ht.data_block_size) # hash_offset c += ' {}'.format(ht.hash_algorithm) # hash_alg c += ' {}'.format(str(ht.root_digest).encode('hex')) # root_digest c += ' {}'.format(str(ht.salt).encode('hex')) # salt if ht.fec_num_roots > 0: c += ' 10' # number of optional args c += ' $(ANDROID_VERITY_MODE)' c += ' ignore_zero_blocks' c += ' use_fec_from_device PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' c += ' fec_roots {}'.format(ht.fec_num_roots) # Note that fec_blocks is the size that FEC covers, *not* the # size of the FEC data. Since we use FEC for everything up until # the FEC data, it's the same as the offset. c += ' fec_blocks {}'.format(ht.fec_offset / ht.data_block_size) c += ' fec_start {}'.format(ht.fec_offset / ht.data_block_size) else: c += ' 2' # number of optional args c += ' $(ANDROID_VERITY_MODE)' c += ' ignore_zero_blocks' c += '" root=/dev/dm-0' # Now that we have the command-line, generate the descriptor. desc = AvbKernelCmdlineDescriptor() desc.kernel_cmdline = c desc.flags = ( AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED) # The descriptor for when hashtree verification is disabled is a lot # simpler - we just set the root to the partition. desc_no_ht = AvbKernelCmdlineDescriptor() desc_no_ht.kernel_cmdline = 'root=PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' desc_no_ht.flags = ( AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_DISABLED) return [desc, desc_no_ht]
a57a33e50a8888781ed9a0fd1cddada12acea69e
24,017
from datetime import datetime def assign_time(): """Get latest time stamp value""" return datetime.strftime(datetime.now(), format='%Y-%m-%d %T')
2096222b23f5eb0d0aa11a6db4f3751b0a207463
24,018
def read_pb2(filename, binary=True): """Convert a Protobuf Message file into mb.Compound. Parameters ---------- filename : str binary: bool, default True If True, will print a binary file If False, will print to a text file Returns ------- root_compound : mb.Compound """ root_proto = compound_pb2.Compound() if binary: with open(filename, "rb") as f: root_proto.ParseFromString(f.read()) else: with open(filename, "r") as f: Merge(f.read(), root_proto) proto_to_cmpd = {} root_compound = _proto_to_mb(root_proto) proto_to_cmpd[root_proto.id] = root_compound for sub_proto, parent_proto in _proto_successors(root_proto): if parent_proto.id not in proto_to_cmpd: parent_cmpd = _proto_to_mb(parent_proto) proto_to_cmpd[parent_proto.id] = parent_cmpd parent_cmpd = proto_to_cmpd[parent_proto.id] if sub_proto.id not in proto_to_cmpd: sub_cmpd = _proto_to_mb(sub_proto) proto_to_cmpd[sub_proto.id] = sub_cmpd sub_cmpd = proto_to_cmpd[sub_proto.id] parent_cmpd.add(sub_cmpd) _add_mb_bonds(root_proto, root_compound, proto_to_cmpd) return root_compound
29c128020eab6b6a33fd1e4f36be395c0672c3b4
24,020
import logging def _OneSubChunk(wav_file): """Reads one subchunk and logs it. Returns: Returns a chunk if a chunk is found. None otherwise. """ chunk_id = _ReadChunkId(wav_file) if not chunk_id: return None size = _ReadSize(wav_file) data = wav_file.read(size) logging.info('Subchunk: {} {}'.format(chunk_id, size)) return Chunk(chunk_id, data)
c43cb5fb9367d4d255475ec8f1fc41df3ea422cc
24,021
def _convert_tensorshape_to_tensor(value, dtype=None): """Copied from TF's TensorShape conversion.""" if not value.is_fully_defined(): raise ValueError( 'Cannot convert a partially known TensorShape to a Tensor: {}'.format( value)) value_list = value.as_list() int64_value = 0 for dim in value_list: if dim >= 2**31: int64_value = dim break if dtype is not None: if dtype not in (np.int32, np.int64): raise TypeConversionError(value, dtype) if dtype == np.int32 and int64_value: raise ValueError('Cannot convert a TensorShape to dtype int32; ' 'a dimension is too large ({})'.format(int64_value)) else: dtype = np.int64 if int64_value else np.int32 return convert_to_tensor(value_list, dtype=dtype)
bc5cc28b6694faf1676aaf4f523a5c135a9bb0d0
24,022
def get_upside_capture( nav_data, benchmark_nav_data, risk_free_rate=None, window=250 * 3, annualiser=250, tail=True, ): """ The up-market capture ratio is the statistical measure of an investment manager's overall performance in up-markets. It is used to evaluate how well an investment manager performed relative to an index during periods when that index has risen. The ratio is calculated by dividing the manager's returns by the returns of the index during the up-market and multiplying that factor by 100. (Investopedia) :param nav_data: :param benchmark_nav_data: :param risk_free_rate: float :param window: int :param annualiser: int :param tail: bool :return: """ nav_dataframe = _transform_df(nav_data) benchmark_nav_dataframe = _transform_df(benchmark_nav_data) df = RatioCalculator( nav_dataframe, benchmark_nav_dataframe=benchmark_nav_dataframe, risk_free_rate=risk_free_rate, annualiser=annualiser, ).get_upside_capture(window) return float(df["upside_capture_ratio"][-1]) if tail else df
7828b6c6e222fe4e6886d4ebf4c3bec2cbf3e795
24,024
import ssl import urllib import json def generateToken(username, password, portalUrl): """Retrieves a token to be used with API requests.""" context = ssl._create_unverified_context() if NOSSL else None params = urllib.urlencode({'username' : username, 'password' : password, 'client' : 'referer', 'referer': portalUrl, 'expiration': 60, 'f' : 'json'}) resp = urlopen(portalUrl + '/sharing/rest/generateToken?', params, context=context) jsonResponse = json.load(resp) if 'token' in jsonResponse: return jsonResponse['token'] elif 'error' in jsonResponse: errMsg = jsonResponse['error']['message'] for detail in jsonResponse['error']['details']: errMsg += "\n"+ detail raise Exception( errMsg )
ba4cc48f88b088c899de9d278795706dee014c94
24,025
def get_logreg(prof, tm, j, prods): """ Train logistic regression (Markov-chain approach). prof: task-mode data generated using lhs.py tm: task-mode j: name of unit prods: list of products """ # Filter relevant data dfj = prof.loc[prof["unit"] == j, ].copy() dfj["tm"] = [row["task"] + "-" + row["mode"] for i, row in dfj.iterrows()] dfj["tm-1"] = dfj["tm"].shift(-1) dfj.loc[pd.isna(dfj["tm-1"]), "tm-1"] = "None-None" dfj = dfj[dfj["tm"] == tm] # Train logistic regression if dfj.shape[0] > 0 and len(np.unique(dfj["tm-1"])) > 1: X = np.array(dfj[prods]) Y = np.array(dfj["tm-1"]) if(len(np.unique(Y)) > 2): # Multinomial if more than 2 classes logreg = linear_model.LogisticRegression(multi_class="multinomial", solver="lbfgs", # solver="sag", max_iter=10000, verbose=2) else: # Binomial if only two classes logreg = linear_model.LogisticRegression(max_iter=10000, verbose=2) logreg.fit(X, Y) return logreg elif dfj.shape[0] > 0: return np.array(dfj["tm-1"])[0] else: return "None-None"
ff5279d3cb82e769abcff64fa7573b2601da31bb
24,026
def slowness2speed(value): """invert function of speed2slowness""" speed = (31 - value) / 30 return speed
54d192b2db667ee05b9c5bdd636af23313b72246
24,027
def complex_covariance_from_real(Krr, Kii, Kri): """Summary Parameters ---------- Krr : TYPE Description Kii : TYPE Description Kri : TYPE Description Returns ------- TYPE Description """ K = Krr + Kii + 1j * (Kri.T - Kri) Kp = Krr - Kii + 1j * (Kri.T + Kri) return K, Kp
0c3e7b01bb06ba6b5bbae50f1fab98cb8bd63f45
24,028
def is_in_cap(objs, radecrad): """Determine which of an array of objects lie inside an RA, Dec, radius cap. Parameters ---------- objs : :class:`~numpy.ndarray` An array of objects. Must include at least the columns "RA" and "DEC". radecrad : :class:`list`, defaults to `None` 3-entry list of coordinates [ra, dec, radius] forming a cap or "circle" on the sky. ra, dec and radius are all in degrees. Returns ------- :class:`~numpy.ndarray` ``True`` for objects in the cap, ``False`` for objects outside of the cap. Notes ----- - Tests the separation with <=, so include objects on the cap boundary. - See also is_in_circle() which handles multiple caps. """ ra, dec, radius = radecrad cobjs = SkyCoord(objs["RA"]*u.degree, objs["DEC"]*u.degree) center = SkyCoord(ra*u.degree, dec*u.degree) ii = center.separation(cobjs) <= radius*u.degree return ii
e85abee591dd2956ed8d9be5c47eafaa9c249cb2
24,029
from typing import Any from typing import List import numbers def _ensure_list(value: Any) -> List[Any]: """If value is a scalar, converts it to a list of size 1.""" if isinstance(value, list): return value if isinstance(value, str) or isinstance(value, numbers.Number): return [value] raise TypeError( f'Value must be a list, number or a string. Got {type(value)}')
e9cb9814060d9f2f2ad15fe42d0f6bbe192cc60e
24,031
import types from typing import Callable import copy def _ExtractMetaFeature( # pylint: disable=invalid-name extracts: types.Extracts, new_features_fn: Callable[[types.FeaturesPredictionsLabels], types.DictOfFetchedTensorValues] ) -> types.Extracts: """Augments FPL dict with new feature(s).""" # Create a new feature from existing ones. fpl_copy = get_fpl_copy(extracts) new_features = new_features_fn(fpl_copy) # Add the new features to the existing ones. update_fpl_features(fpl_copy, new_features) result = copy.copy(extracts) result[constants.FEATURES_PREDICTIONS_LABELS_KEY] = fpl_copy return result
14ad08a2e2158989ec48d605dd954311cac30cfe
24,032
import re def parse_notebook_index(ntbkpth): """ Parse the top-level notebook index file at `ntbkpth`. Returns a list of subdirectories in order of appearance in the index file, and a dict mapping subdirectory name to a description. """ # Convert notebook to RST text in string rex = RSTExporter() rsttxt = rex.from_filename(ntbkpth)[0] # Clean up trailing whitespace rsttxt = re.sub(r'\n ', r'', rsttxt, re.M | re.S) pthidx = {} pthlst = [] lines = rsttxt.split('\n') for l in lines: m = re.match(r'^-\s+`([^<]+)\s+<([^>]+).ipynb>`__', l) if m: # List of subdirectories in order of appearance in index.rst pthlst.append(m.group(2)) # Dict mapping subdirectory name to description pthidx[m.group(2)] = m.group(1) return pthlst, pthidx
74ce532c577df8b5ecf02cbe4163422a80360bf6
24,033
from pathlib import Path def import_requirements(): """Import ``requirements.txt`` file located at the root of the repository.""" with open(Path(__file__).parent / 'requirements.txt') as file: return [line.rstrip() for line in file.readlines()]
ee22aaa76e13c150a2a7981d171ba227887fbceb
24,034
from typing import Literal def _get_timestamp_range_edges( first: Timestamp, last: Timestamp, freq: BaseOffset, closed: Literal["right", "left"] = "left", origin="start_day", offset: Timedelta | None = None, ) -> tuple[Timestamp, Timestamp]: """ Adjust the `first` Timestamp to the preceding Timestamp that resides on the provided offset. Adjust the `last` Timestamp to the following Timestamp that resides on the provided offset. Input Timestamps that already reside on the offset will be adjusted depending on the type of offset and the `closed` parameter. Parameters ---------- first : pd.Timestamp The beginning Timestamp of the range to be adjusted. last : pd.Timestamp The ending Timestamp of the range to be adjusted. freq : pd.DateOffset The dateoffset to which the Timestamps will be adjusted. closed : {'right', 'left'}, default "left" Which side of bin interval is closed. origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a timestamp is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries offset : pd.Timedelta, default is None An offset timedelta added to the origin. Returns ------- A tuple of length 2, containing the adjusted pd.Timestamp objects. """ if isinstance(freq, Tick): index_tz = first.tz if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None): raise ValueError("The origin must have the same timezone as the index.") elif origin == "epoch": # set the epoch based on the timezone to have similar bins results when # resampling on the same kind of indexes on different timezones origin = Timestamp("1970-01-01", tz=index_tz) if isinstance(freq, Day): # _adjust_dates_anchored assumes 'D' means 24H, but first/last # might contain a DST transition (23H, 24H, or 25H). # So "pretend" the dates are naive when adjusting the endpoints first = first.tz_localize(None) last = last.tz_localize(None) if isinstance(origin, Timestamp): origin = origin.tz_localize(None) first, last = _adjust_dates_anchored( first, last, freq, closed=closed, origin=origin, offset=offset ) if isinstance(freq, Day): first = first.tz_localize(index_tz) last = last.tz_localize(index_tz) else: first = first.normalize() last = last.normalize() if closed == "left": first = Timestamp(freq.rollback(first)) else: first = Timestamp(first - freq) last = Timestamp(last + freq) return first, last
a1d5a9998fb8e88badd354779c50be0e26be37f8
24,035
def create_export(request, username, id_string, export_type): """ Create async export tasks view. """ owner = get_object_or_404(User, username__iexact=username) xform = get_form({'user': owner, 'id_string__iexact': id_string}) if not has_permission(xform, owner, request): return HttpResponseForbidden(_(u'Not shared.')) if export_type == Export.EXTERNAL_EXPORT: # check for template before trying to generate a report if not MetaData.external_export(xform): return HttpResponseForbidden(_(u'No XLS Template set.')) credential = None if export_type == Export.GOOGLE_SHEETS_EXPORT: credential = _get_google_credential(request) if isinstance(credential, HttpResponseRedirect): return credential query = request.POST.get("query") force_xlsx = request.POST.get('xls') != 'true' # export options group_delimiter = request.POST.get("options[group_delimiter]", '/') if group_delimiter not in ['.', '/']: return HttpResponseBadRequest( _("%s is not a valid delimiter" % group_delimiter)) # default is True, so when dont_.. is yes # split_select_multiples becomes False split_select_multiples = request.POST.get( "options[dont_split_select_multiples]", "no") == "no" binary_select_multiples = getattr(settings, 'BINARY_SELECT_MULTIPLES', False) remove_group_name = request.POST.get("options[remove_group_name]", "false") value_select_multiples = request.POST.get( "options[value_select_multiples]", "false") # external export option meta = request.POST.get("meta") options = { 'group_delimiter': group_delimiter, 'split_select_multiples': split_select_multiples, 'binary_select_multiples': binary_select_multiples, 'value_select_multiples': str_to_bool(value_select_multiples), 'remove_group_name': str_to_bool(remove_group_name), 'meta': meta.replace(",", "") if meta else None, 'google_credentials': credential } try: create_async_export(xform, export_type, query, force_xlsx, options) except ExportTypeError: return HttpResponseBadRequest( _("%s is not a valid export type" % export_type)) else: audit = {"xform": xform.id_string, "export_type": export_type} audit_log(Actions.EXPORT_CREATED, request.user, owner, _("Created %(export_type)s export on '%(id_string)s'.") % { 'export_type': export_type.upper(), 'id_string': xform.id_string, }, audit, request) return HttpResponseRedirect( reverse( export_list, kwargs={ "username": username, "id_string": id_string, "export_type": export_type }))
d94b674409b094693470132cc84b63c33bd657a1
24,036
def draft_bp(app): """Callable draft blueprint (we need an application context).""" with app.app_context(): return BibliographicDraftResource( service=BibliographicRecordService() ).as_blueprint("bibliographic_draft_resource")
e0c1846e74399d9a946b11cd68a586c55e93cb3a
24,037
import tarfile import io def _unpack(stream: bytes, path: str) -> str: """Unpack archive in bytes string into directory in ``path``.""" with tarfile.open(fileobj=io.BytesIO(stream)) as tar: tar.extractall(path) return path
81a05c0a60fb06d43592a0a4f4d30cf62d406e01
24,038
def create_client(client): """Creates a new client.""" rv = client.post('/v1/oauth/', follow_redirects=True, data={ 'submit': 'Add Client', }) db.app = jobaddservice.app oauth_clients = Client.query.all() client_id = oauth_clients[0].client_id return client_id
4de5a7e9feae199c8424871962d1a11a5a9f34c8
24,039
def FPS(name, sort, explicit_name=None): """ Creates a floating-point symbol. :param name: The name of the symbol :param sort: The sort of the floating point :param explicit_name: If False, an identifier is appended to the name to ensure uniqueness. :return: An FP AST. """ n = _make_name(name, sort.length, False if explicit_name is None else explicit_name, prefix='FP_') return FP('FPS', (n, sort), variables={n}, symbolic=True, length=sort.length)
137595ce164b72438c16fa8f74838270b43fb3db
24,041
def _calc_cat_outlier(df: dd.DataFrame, col_x: str, threshold: int = 1) -> Intermediate: """ calculate outliers based on the threshold for categorical values. :param df: the input dataframe :param col_x: the column of df (univariate outlier detection) :return: dict(index: value) of outliers """ groups = df.groupby([col_x]).size() result = {"outlier_index": list(groups[groups <= threshold].index.compute())} raw_data = {"df": df, "col_x": col_x, "threshold": threshold} return Intermediate(result, raw_data)
3b2c7ea6a7491fd81b2467d1f2b5faad36523177
24,045
def add_precursor_mz(spectrum_in: SpectrumType) -> SpectrumType: """Add precursor_mz to correct field and make it a float. For missing precursor_mz field: check if there is "pepmass"" entry instead. For string parsed as precursor_mz: convert to float. """ if spectrum_in is None: return None spectrum = spectrum_in.clone() if isinstance(spectrum.get("precursor_mz", None), str): spectrum.set("precursor_mz", float(spectrum.get("precursor_mz").strip())) elif spectrum.get("precursor_mz", None) is None: pepmass = spectrum.get("pepmass") if isinstance(pepmass[0], float): spectrum.set("precursor_mz", pepmass[0]) else: print("No precursor_mz found in metadata.") return spectrum
297fbb280aa00992d7658dc47306c4c98018d19e
24,046
import functools import click def echo_result(function): """Decorator that prints subcommand results correctly formatted. :param function: Subcommand that returns a result from the API. :type function: callable :returns: Wrapped function that prints subcommand results :rtype: callable """ @functools.wraps(function) def wrapper(*args, **kwargs): result = function(*args, **kwargs) context = click.get_current_context() params = context.params output_format = params["output_format"] formatter = FORMATTERS[output_format] if isinstance(formatter, dict): # For the text formatter, there's a separate formatter for each subcommand formatter = formatter[context.command.name] output = formatter(result, params.get("verbose", False)).strip("\n") click.echo( output, file=params.get("output_file", click.open_file("-", mode="w")) ) return wrapper
7a4956eea317f77ec816d7c68ff61749a971b025
24,047
import numpy def load_cifar10(): """ Load the CIFAR-10 dataset. """ def post(inputs, labels): return inputs.astype(numpy.float32) / 255, labels.flatten().astype(numpy.int32) return NumpySet.from_keras(tf.keras.datasets.cifar10.load_data, post=post)
436411f1258803a45d0ba4f570dc80c5ec0cd4b8
24,048
def parse_names(filename): """ Parse an NCBI names.dmp file. """ taxid_to_names = dict() with xopen(filename, 'rt') as fp: for n, line in enumerate(fp): line = line.rstrip('\t|\n') x = line.split('\t|\t') taxid, name, uniqname, name_class = x taxid = int(taxid) if name_class == 'scientific name': taxid_to_names[taxid] = (name, uniqname, name_class) return taxid_to_names
cca9249333c373a56c1829557caff876df210859
24,049
def parse(pm, doc): """ Parse one document using the given parsing model :type pm: ParsingModel :param pm: an well-trained parsing model :type fedus: string :param fedus: file name of an document (with segmented EDUs) """ pred_rst = pm.sr_parse(doc) return pred_rst
4389ac7993d370f2a7d404e5668eb7522ee4db70
24,051
def two_sum(nums, target): """ Given an array of integers, return indices of the two numbers such that they add up to a specific target. You may assume that each input would have exactly one solution, and you may not use the same element twice. :type nums: List[int] :type target: int :rtype: List[int] """ diffs_idx = {} for i in range(len(nums)): if nums[i] in diffs_idx: return [diffs_idx[nums[i]], i] diffs_idx[target - nums[i]] = i
ac72eb7137eb0f7161c26b172cd07553c984b5a8
24,052
def remove_columns(tx, header, columns_to_remove): """ Removes the given features from the given set of features. Args: tx: the numpy array representing the given set of features header: the header line of the .csv representing the data set columns_to_remove: The indices of the features that will be removed from the numpy array of features """ print("\nRemoving columns...") num_removed = 0 for col in columns_to_remove: tx = np.delete(tx, col - num_removed, 1) header = np.delete(header, col - num_removed + 2) num_removed += 1 print("\n... finished.") return tx, header
d7f2ed3092fc094ccc48f27813ad8a77358dfc19
24,053
def pangenome(groups_file, fasta_list): #creating the len_dict """ The len_dict is used to remember all of the common names and lengths of EVERY gene in each species, which are matched to the arbitrary name. The len_dict is in form len_dict = {speciesA:{arb_name1:[com_name, length], arb_name2:[com_name,length]}, speciesB: {arb_name1:[com_name, length], arb_name2:[com_name,length]}} """ len_dict={} for the_file in fasta_list: each_file=open(str(the_file), 'r') the_file = the_file.split('.') if the_file[0] == 'E': if the_file[1][0] == 'v': species = 'verr' elif the_file[2] == 'typeA': species = 'pistA' elif the_file[2] == 'typeB': species = 'pistB' else: species = the_file[1][:5] else: species = the_file[0] len_dict[species] = {} for each_line in each_file: if each_line[0] == '>': information = get_info(each_line) length = 0 else: #sequence line for amino_acid in each_line: length +=3 arb_name = information[0] com_name= information[1] len_dict[species][arb_name] = [com_name, length] each_file.close() # creating the gene_dict groups_file = open(str(groups_file), 'r') """ The gene_dict is used to match and remember the genes within each cluster (similar genes), lengths of the cluster as a whole, and which species they correspond to. The gene_dict only accounts for the genes that are in MULTIPLE species, NOT THE GENES ONLY IN A SINGLE SPECIES. The gene_dict is in the form gene_dict = {speciesA: {cluster1: [[arb_name1, arb_name2], [len1, len2]]}, speciesB: {cluster1:[[arb_name1, arbname2], [len1, len2]]}} """ gene_dict = {} arb_dict = {} for each_line in groups_file: # each_line consists of a cluster of genes, each with a different arbitrary name and species it belongs to each_line = each_line.split() cluster = each_line[0].rstrip(':') n=0 for each_segment in each_line: # each segment looks like 'species|arbitrary_name' if n == 0: # cluster name n = 1 else: n+=1 each_segment = each_segment.split('|') species = each_segment[0] arb_name = each_segment[1] # The arb_dict is simply a quick reference of ONLY the genes that are in MULTIPLE SPECIES. Genes that only have a single copy in one species will not be included. arb_dict[arb_name] = 0 if species not in gene_dict: gene_dict[species] = {} if cluster not in gene_dict[species]: gene_dict[species][cluster] = [[],[]] gene_dict[species][cluster][0].append(arb_name) length = len_dict[species][arb_name][1] gene_dict[species][cluster][1].append(length) """' The cluster_dict is used to remember the lengths of entire clusters (similar genes; essentially genes with very similar function). If speciesA has 3 copies of the gene and speciesB only has 1 copy, speciesA will take up more space. The purpose of this dict is to have all of the same genes line up visually. We want the maximum length for all the species. In the example, speciesB will have three slots for the gene and 2 will be empty. The cluster dict is in the format cluster_dict= {cluster1: integer, cluster2: integer} """ cluster_dict = {} for species in gene_dict: for cluster in gene_dict[species]: if cluster not in cluster_dict or cluster_dict[cluster] < sum(gene_dict[species][cluster][1]): cluster_dict[cluster] = sum(gene_dict[species][cluster][1]) return len_dict, gene_dict, cluster_dict, arb_dict
9b9fd884ab6e464b5e40ae84d01801743da222be
24,054
import torch def argmin(x): """Deterministic argmin. Different from torch.argmin, which may have undetermined result if the are multiple elements equal to the min, this argmin is guaranteed to return the index of the first element equal to the min in each row. Args: x (Tensor): only support rank-2 tensor Returns: rank-1 int64 Tensor represeting the column of the first element in each row equal to the minimum of the row. """ assert x.ndim == 2 m, _ = x.min(dim=1, keepdims=True) r, c = torch.nonzero(x == m, as_tuple=True) r, num_mins = torch.unique(r, return_counts=True) i = torch.cumsum(num_mins, 0) i = torch.cat([torch.tensor([0]), i[:-1]]) return c[i]
93efe99e616f2266c14c94fb81d9251d8bcfba18
24,055
import json def check_geometry_size(footprint): """ Excessive large geometries are problematic of AWS SQS (max size 256kb) and cause performance issues becuase they are stored in plain text in the JSON blob. This func reads the geojson and applies a simple heuristic to reduce the footprint size through simplification. With each iteration, the geometry is simplified by 0.01 degrees. Parameters ---------- footprint : obj A shapely Polygon or MultiPolygon Returns ------- geojson : dict A geojson representation of the geometry """ geojson = footprint.__geo_interface__ as_str = json.dumps(geojson) geomsize = len(as_str.encode('utf-8')) n_iterations = 0 while geomsize > 125000: footprint = footprint.simplify(0.01) geojson = footprint.__geo_interface__ as_str = json.dumps(geojson) geomsize = len(as_str.encode('utf-8')) n_iterations += 1 return geojson
b2525958a1440fc1ce0d2560150b7fe28b3ec450
24,056
def get_global_comments(): """Returns all global comments""" return GlobalComment.query.all()
f7dcf7be712187aac784787e4745c366e0759cf5
24,057
def is_descending(path): """ Return ``True`` if this profile is a descending profile. """ return _re_search(path, _re_descending)
6afb24abc76fa1fa1b33793aca14b1d425664e7e
24,058
def mises_promo_gain_cote(cotes, mise_minimale, rang, output=False): """ Calcule la rรฉpartition des mises pour la promotion "gain en freebet de la cote gagnรฉe" """ mis = [] gains = cotes[rang] * 0.77 + mise_minimale * cotes[rang] for cote in cotes: mis.append((gains / cote)) mis[rang] = mise_minimale if output: print("somme mises=", sum(mis)) print("gain=", gains) return mis
6dfbb9305e769982257a05cb41800a6d2656767b
24,059
def recalculateResult(request, question_id): """Called when poll owner wants to recalculate result manually.""" question = get_object_or_404(Question, pk=question_id) getPollWinner(question) return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
32e21808f8fd63b1f02981c966c58a6fdeecf6a5
24,060
def covden_win(cov_resampled, lut): """ Method to associate resampled vegitation coverage to PRMS covden_win Parameters ---------- cov_resampled : np.ndarray lut : dict Returns ------- gsflow.prms.ParameterRecord object """ covden = covden_sum(cov_resampled, lut) covden.name = "covden_win" return covden
5289954b2be41539981ad8af73bc683835c13bde
24,061
def login_webauthn_route(): """login webauthn route""" user = User.query.filter(User.id == session.get('webauthn_login_user_id')).one_or_none() if not user: return login_manager.unauthorized() form = WebauthnLoginForm() if form.validate_on_submit(): try: assertion = cbor.decode(b64decode(form.assertion.data)) webauthn.authenticate_complete( session.pop('webauthn_login_state'), webauthn_credentials(user), assertion['credentialRawId'], ClientData(assertion['clientDataJSON']), AuthenticatorData(assertion['authenticatorData']), assertion['signature']) regenerate_session() login_user(user) return redirect_after_login() except (KeyError, ValueError) as e: current_app.logger.exception(e) flash('Login error during Webauthn authentication.', 'error') return render_template('auth/login_webauthn.html', form=form)
d1f31035e696f539f58019c8a37c650089e92d23
24,062
def generate_feature_matrix(genotypes, phenotypes, reg_type,phewas_cov=''): # diff - done """ Generates the feature matrix that will be used to run the regressions. :param genotypes: :param phenotypes: :type genotypes: :type phenotypes: :returns: :rtype: """ feature_matrix = np.zeros((3, genotypes.shape[0], phewas_codes.shape[0]), dtype=float) count = 0 for i in genotypes['id']: if reg_type == 0: temp = pd.DataFrame(phenotypes[phenotypes['id'] == i][['phewas_code', 'MaxAgeAtICD','count']]).drop_duplicates() match = phewas_codes['phewas_code'].isin(list(phenotypes[phenotypes['id'] == i]['phewas_code'])) cts = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['count'] cts[np.isnan(cts)] = 0 match = (match)&(cts>0) feature_matrix[0][count, match[match == True].index] = 1 age = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['MaxAgeAtICD'] #assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled" age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx'] feature_matrix[1][count, :] = age if phewas_cov: feature_matrix[2][count, :] = int(phewas_cov in list(phenotypes[phenotypes['id'] == i]['phewas_code'])) else: if reg_type == 1: temp = pd.DataFrame( phenotypes[phenotypes['id'] == i][['phewas_code', 'MaxAgeAtICD', 'count','lor']]).drop_duplicates() cts = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['count'] cts[np.isnan(cts)] = 0 if temp.empty!=1: cts=cts/temp['lor'].iloc[0] feature_matrix[0][count, :] = cts age = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['MaxAgeAtICD'] #assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled" age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx'] feature_matrix[1][count, :] = age if phewas_cov: feature_matrix[2][count, :] = int( phewas_cov in list(phenotypes[phenotypes['id'] == i]['phewas_code'])) elif reg_type == 2: temp = pd.DataFrame( phenotypes[phenotypes['id'] == i][['phewas_code', 'MaxAgeAtICD', 'duration','lor']]).drop_duplicates() dura = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['duration'] dura[np.isnan(dura)] = 0 if temp.empty!=1: dura=dura/temp['lor'].iloc[0] feature_matrix[0][count, :] = dura age = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['MaxAgeAtICD'] #assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled" age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx'] feature_matrix[1][count, :] = age if phewas_cov: feature_matrix[2][count, :] = int( phewas_cov in list(phenotypes[phenotypes['id'] == i]['phewas_code'])) count += 1 return feature_matrix
95abe0e89060145b90abe4debe7dfd23ba6a4969
24,063
def get_entity(db: SQLAlchemy, id: str) -> EntityOut: """Get and entity by id.""" db_session = db.session_class() entry = repository.get_entity(db_session, id) if not entry: raise exc.NotFound() return EntityOut(entry)
16b64a90251eee3dfa79ede4d5524ecfb28a1a82
24,064
def cov(sources): """ Given the array of sources for all image patches, calculate the covariance array between all modes. Parameters ---------- sources : numpy array (floats) The {NUM_MODES x NUM_PATCHES} array of sources. Returns ------- numpy array (floats) The {NUM_MODES x NUM_MODES} covariance array between all modes. """ return (sources @ sources.T)/sources.shape[1]
268dfbc98a5b443e92aadd27ba577f7911ca398f
24,065
import json def VerifierMiddleware(verifier): """Common wrapper for the authentication modules. * Parses the request before passing it on to the authentication module. * Sets 'pyoidc' cookie if authentication succeeds. * Redirects the user to complete the authentication. * Allows the user to retry authentication if it fails. :param verifier: authentication module """ @wraps(verifier.verify) def wrapper(environ, start_response): data = get_post(environ) kwargs = dict(urlparse.parse_qsl(data)) kwargs["state"] = json.loads(urlparse.unquote(kwargs["state"])) val, completed = verifier.verify(**kwargs) if not completed: return val(environ, start_response) if val: set_cookie, cookie_value = verifier.create_cookie(val, "auth") cookie_value += "; path=/" url = "{base_url}?{query_string}".format( base_url="/authorization", query_string=kwargs["state"]["query"]) response = SeeOther(url, headers=[(set_cookie, cookie_value)]) return response(environ, start_response) else: # Unsuccessful authentication url = "{base_url}?{query_string}".format( base_url="/authorization", query_string=kwargs["state"]["query"]) response = SeeOther(url) return response(environ, start_response) return wrapper
1b8d62830ba57bf7761b77ac271a19c5a486bb20
24,066
def hls_stream(hass, hass_client): """Create test fixture for creating an HLS client for a stream.""" async def create_client_for_stream(stream): stream.ll_hls = True http_client = await hass_client() parsed_url = urlparse(stream.endpoint_url(HLS_PROVIDER)) return HlsClient(http_client, parsed_url) return create_client_for_stream
ca759edc6bf819ba8788e20d8d44da96bfaa73fe
24,067
def AsinhNorm(a=0.1): """Custom Arcsinh Norm. Parameters ---------- a : float, optional Returns ------- ImageNormalize """ return ImageNormalize(stretch=AsinhStretch(a=a))
fecc3bfec6e14033f5d29fb044fb099bddb8b4b3
24,068
def sub(x, y): """sub two numbers""" return y-x
345279da515a877c1f08a8b54ff8f2e7d6a95fec
24,069
def addSplashScreen(splashSDKName, decompileDir): """ add splash screen channel hasn't Splash if channel["bHasSplash"] = 0 otherwise channel["bHasSplash"] express orientation and color """ channelHasSplash ="0"; try: #read has splash to funcellconfig.xml config = ET.parse(file_operate.getConfigXmlPath()) root = config.getroot() splash = root.find("splash") channelHasSplash = splash.get('hasSplash'); except Exception,e: print e print "Error: cannot parse file: funcellconfig.xml." print 'channelHasSplash = '+channelHasSplash if channelHasSplash == "0": return (0, False) SplashPath = decompileDir + '/ForSplash/' + channelHasSplash + '/' SplashPath = file_operate.getFullPath(SplashPath) print "SplashPath : "+SplashPath SplashCodePath = 'channel/SplashActivity.smali' SplashCodePath = file_operate.getFullPath(SplashCodePath) print "SplashCodePath : "+SplashCodePath SplashCode2Path = 'channel/SplashActivity$1.smali' SplashCode2Path = file_operate.getFullPath(SplashCode2Path) print "SplashCode2Path : "+SplashCode2Path xmlSplashSrc = 'channel/funcell_plugin_splash.xml' xmlSplashSrc = file_operate.getFullPath(xmlSplashSrc) print "xmlSplashSrc : "+xmlSplashSrc if not os.path.exists(SplashPath) or not os.path.exists(SplashCodePath) or not os.path.exists(SplashCode2Path) or not os.path.exists(xmlSplashSrc): error_operate.error(111) return (1, False) codeDir = decompileDir+'/oldApkDir/' + '/smali/com/haowan/funcell/sdk/api/splash' newSplashCodePath = codeDir + '/SplashActivity.smali' print "newSplashCodePath : "+newSplashCodePath file_operate.copyFile(SplashCodePath, newSplashCodePath) newSplashCode2Path = codeDir + '/SplashActivity$1.smali' file_operate.copyFile(SplashCode2Path, newSplashCode2Path) activityName = removeStartActivity(channelHasSplash, decompileDir+'/oldApkDir/') modifyManifestForSplash(channelHasSplash, decompileDir+'/oldApkDir/') xmlSplashTarget = decompileDir+'/oldApkDir/' + '/res/layout' if not os.path.exists(xmlSplashTarget): os.mkdir(xmlSplashTarget) xmlSplashTarget = xmlSplashTarget + '/funcell_plugin_splash.xml' file_operate.copyFile(xmlSplashSrc, xmlSplashTarget) resDir = decompileDir +'/oldApkDir/'+ '/res' file_operate.copyFiles(SplashPath, resDir) # assetsDir = decompileDir + '/assets' # developerFile = assetsDir + '/developerInfo.xml' # if not os.path.exists(assetsDir): # os.makedirs(assetsDir) # targetTree = None # targetRoot = None # if not os.path.exists(developerFile): # targetTree = ElementTree() # targetRoot = Element('developer') # targetTree._setroot(targetRoot) # else: # targetTree = ET.parse(developerFile) # targetRoot = targetTree.getroot() # infoNode = targetRoot.find('channel') # if infoNode is None: # infoNode = SubElement(targetRoot, 'channel') # infoNode.set('GameMainActivity', activityName) # targetTree.write(developerFile, 'UTF-8') print "add splash activity name : "+activityName file_operate.modifyFileContent(newSplashCodePath, '.smali', '###FuncellSdk_Start_Activity###', activityName) return (0, True)
52964383a6ee440a9b18a2e1ab35a25d5f376753
24,070
def get_k8s_helper(namespace=None, silent=False): """ :param silent: set to true if you're calling this function from a code that might run from remotely (outside of a k8s cluster) """ global _k8s if not _k8s: _k8s = K8sHelper(namespace, silent=silent) return _k8s
81104cf72709a15fc56edca6d4e6fc005412ae75
24,072
def authenticate(): """Authorize.""" return redirect(Vend().authenticate())
f34d8df5e69b9552592349db71fac1b193effce1
24,073
import re def non_device_name_convention(host): """ Helper filter function to filter hosts based targeting host names which do NOT match a specified naming convention Examples: - lab-junos-08.tstt.dfjt.local - dfjt-arista-22.prd.dfjt.local - lab-nxos-001.lab.dfjt.local :param host: The host you want to filter on :return bool: True if does not match, False if it matches the convention """ # Perform regex match on host name and return boolean if re.match("\w{3}\-\w+\-\d{2}.\w{3}.dfjt.local", host.name): return False else: return True
ecb455eb61e3917ee41ebc5a6cacfd8829d6e3f6
24,074
def recursive_swagger_spec(minimal_swagger_dict, node_spec): """ Return a swager_spec with a #/definitions/Node that is recursive. """ minimal_swagger_dict['definitions']['Node'] = node_spec return Spec(minimal_swagger_dict)
be13e07689378b46964c45830b1ac5eb2f30be14
24,075
import json def _get_base_parts(config): """ Builds the base ip array for the first N octets based on supplied base or on the /N subnet mask in the cidr """ if 'base' in config: parts = config.get('base').split('.') else: parts = [] if 'cidr' in config: cidr = config['cidr'] if '/' in cidr: mask = cidr[cidr.index('/') + 1:] if not mask.isdigit(): raise datagen.SpecException('Invalid Mask in cidr for config: ' + json.dumps(config)) if int(mask) not in [8, 16, 24]: raise datagen.SpecException('Invalid Subnet Mask in cidr for config: ' + json.dumps(config) + ' only one of /8 /16 or /24 supported') ip_parts = cidr[0:cidr.index('/')].split('.') if len(ip_parts) < 4 or not all(part.isdigit() for part in ip_parts): raise datagen.SpecException('Invalid IP in cidr for config: ' + json.dumps(config)) if mask == '8': parts = ip_parts[0:1] if mask == '16': parts = ip_parts[0:2] if mask == '24': parts = ip_parts[0:3] else: raise datagen.SpecException('Invalid Subnet Mask in cidr for config: ' + json.dumps(config) + ' only one of /8 /16 or /24 supported') return parts
9257db10a07a9468ea81912e0818e220be240beb
24,076
def diff_exp(counts, group1, group2): """Computes differential expression between group 1 and group 2 for each column in the dataframe counts. Returns a dataframe of Z-scores and p-values.""" mean_diff = counts.loc[group1].mean() - counts.loc[group2].mean() pooled_sd = np.sqrt(counts.loc[group1].var() / len(group1) + counts.loc[group2].var() / len(group2)) z_scores = mean_diff / pooled_sd z_scores = z_scores.fillna(0) # t-test p_vals = (1 - stats.norm.cdf(np.abs(z_scores))) * 2 df = pd.DataFrame({'z': z_scores}) df['p'] = p_vals return df
be70eb9c843d9cfd72efb9c85f023b91c3a6730e
24,077
def load(source): """HTSใƒ•ใƒซใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใƒฉใƒ™ใƒซ(Sinsy็”จ)ใ‚’่ชญใฟๅ–ใ‚‹ source: path, lines """ song = HTSFullLabel() return song.load(source)
5cc2c7cfdf4e04071fc6d6525803c09592c7889f
24,078
def _binary_clf_curve(y_true, y_score, pos_label=None): """Calculate true and false positives per binary classification threshold. Parameters ---------- y_true : array, shape = [n_samples] True targets of binary classification y_score : array, shape = [n_samples] Estimated probabilities or decision function pos_label : int, optional (default=1) The label of the positive class Returns ------- fps : array, shape = [n_thresholds] A count of false positives, at index i being the number of negative samples assigned a score >= thresholds[i]. The total number of negative samples is equal to fps[-1] (thus true negatives are given by fps[-1] - fps). tps : array, shape = [n_thresholds := len(np.unique(y_score))] An increasing count of true positives, at index i being the number of positive samples assigned a score >= thresholds[i]. The total number of positive samples is equal to tps[-1] (thus false negatives are given by tps[-1] - tps). thresholds : array, shape = [n_thresholds] Decreasing score values. """ y_true, y_score = check_arrays(y_true, y_score) y_true = column_or_1d(y_true) y_score = column_or_1d(y_score) # ensure binary classification if pos_label is not specified classes = np.unique(y_true) if (pos_label is None and not (np.all(classes == [0, 1]) or np.all(classes == [-1, 1]) or np.all(classes == [0]) or np.all(classes == [-1]) or np.all(classes == [1]))): raise ValueError("Data is not binary and pos_label is not specified") elif pos_label is None: pos_label = 1. # make y_true a boolean vector y_true = (y_true == pos_label) # Sort scores and corresponding truth values desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1] y_score = y_score[desc_score_indices] y_true = y_true[desc_score_indices] # y_score typically has many tied values. Here we extract # the indices associated with the distinct values. We also # concatenate a value for the end of the curve. distinct_value_indices = np.where(np.diff(y_score))[0] threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1] # accumulate the true positives with decreasing threshold tps = y_true.cumsum()[threshold_idxs] fps = 1 + threshold_idxs - tps return fps, tps, y_score[threshold_idxs]
0dd0f1bb711b0df052c115534b886b93ae8e59f3
24,079
def term_open_failed(data=None): """ Construct a template for a term event """ tpl = term() tpl.addKey(name='event', data="open-failed") if data is not None: tpl.addKey(name='data', data=data) return tpl
7eae9e897294a545c4aaf27fe4204a6fd8b106a1
24,080
def create_error_payload(exception, message, endpoint_id): """ Creates an error payload to be send as a response in case of failure """ print(f'{exception}: {message}') error_payload = { 'status': 'MESSAGE_NOT_SENT', 'endpointId': endpoint_id if endpoint_id else 'NO_ENDPOINT_ID', 'message': f'{exception}: {message}' } return error_payload
90f266d22429d385e828dcdd92fca3d7b2e6df48
24,081
def has_columns(df, columns): """Check if DataFrame has necessary columns. Args: df (pd.DataFrame): DataFrame. columns (list(str): columns to check for. Returns: bool: True if DataFrame has specified columns. """ result = True for column in columns: if column not in df.columns: print("Missing column: {} in DataFrame".format(column)) result = False return result
d2752099fb13cf3fb220cb0c8402917488c32ef1
24,082
def render(template, **kwargs): """Render template with default values set""" return JINJA_ENV.get_template(template).render( autograde=autograde, css=CSS, favicon=FAVICON, timestamp=timestamp_utc_iso(), **kwargs )
a523822c6c3bb9ff2b41a63091d2c89f612b18b4
24,083
def _cytof_analysis_derivation(context: DeriveFilesContext) -> DeriveFilesResult: """Generate a combined CSV for CyTOF analysis data""" cell_counts_analysis_csvs = pd.json_normalize( data=context.trial_metadata, record_path=["assays", "cytof", "records"], meta=[prism.PROTOCOL_ID_FIELD_NAME], ) artifacts = [] for combined_f_kind in [ "cell_counts_assignment", "cell_counts_compartment", "cell_counts_profiling", ]: res_df = pd.DataFrame() for index, row in cell_counts_analysis_csvs.iterrows(): obj_url = row[f"output_files.{combined_f_kind}.object_url"] cell_counts_csv = context.fetch_artifact(obj_url, True) if not cell_counts_csv: raise Exception( f"Failed to read {obj_url} building Cytof analysis derivation" ) df = pd.read_csv(cell_counts_csv) # Each cell_counts_... file consist of just records for one sample. # The first column of each cell_counts_csv (CellSubset) contains cell group types # and the second contains counts for those types. # Create a new, transposed dataframe with cell group types as column headers # and a single row of cell count data. df = df.set_index("CellSubset") df = df.drop( columns="Unnamed: 0", axis=1 ) # Cell counts files contain an unnamed index column df = df.transpose() # and adding metadata, so we can distinguish different samples df = df.rename(index={"N": row["cimac_id"]}) df["cimac_id"] = row["cimac_id"] df["cimac_participant_id"] = participant_id_from_cimac(row["cimac_id"]) df[prism.PROTOCOL_ID_FIELD_NAME] = row[prism.PROTOCOL_ID_FIELD_NAME] # finally combine them res_df = pd.concat([res_df, df]) # and add as artifact artifacts.append( _build_artifact( context=context, file_name=f"combined_{combined_f_kind}.csv", data=res_df.to_csv(index=False), data_format="csv", # confusing, but right file_type=combined_f_kind.replace("_", " "), include_upload_type=True, ) ) return DeriveFilesResult( artifacts, context.trial_metadata # return metadata without updates )
21ca42e0d53f602c09a3901b6287d0d970bc7d7b
24,084
def get_field(name, data): """ Return a valid Field by given data """ if isinstance(data, AbstractField): return data data = keys_to_string(data) type = data.get('type', 'object') if type == "string": return StringField(name=name, **data) elif type == "boolean": return BooleanField(name=name, **data) elif type == "short": return ShortField(name=name, **data) elif type == "integer": return IntegerField(name=name, **data) elif type == "long": return LongField(name=name, **data) elif type == "float": return FloatField(name=name, **data) elif type == "double": return DoubleField(name=name, **data) elif type == "ip": return IpField(name=name, **data) elif type == "date": return DateField(name=name, **data) elif type == "multi_field": return MultiField(name=name, **data) elif type == "geo_point": return GeoPointField(name=name, **data) elif type == "attachment": return AttachmentField(name=name, **data) elif type == "object": if '_all' in data: return DocumentObjectField(name=name, **data) return ObjectField(name=name, **data) raise RuntimeError("Invalid type: %s" % type)
61abdb216ff76f54c752878023bc3c60128e18f4
24,085
def data_context_connectivity_context_connectivity_serviceuuid_namevalue_name_get(uuid, value_name): # noqa: E501 """data_context_connectivity_context_connectivity_serviceuuid_namevalue_name_get returns tapi.common.NameAndValue # noqa: E501 :param uuid: Id of connectivity-service :type uuid: str :param value_name: Id of name :type value_name: str :rtype: TapiCommonNameAndValue """ return 'do some magic!'
f703f90bfbd4f476b58f7aacceecca9a444835e4
24,086
def open_alleles_file(N0, n, U, Es, mmax, mwt, mutmax, rep): """ This function opens the output files and returns file handles to each. """ sim_id = 'N%d_n%d_U%.6f_Es%.5f_mmax%.2f_mwt%.2f_mutmax%d_rep%d' %(N0, n, U, Es, mmax, mwt, mutmax, rep) data_dir = '../SIM_DATA' outfile = open("%s/alleles_%s.csv" %(data_dir,sim_id),"w") return outfile
62455b1ba4dd65b6ef78f3b4cfdb31efd8433db6
24,087
def approximateWcs(wcs, camera_wrapper=None, detector_name=None, obs_metadata=None, order=3, nx=20, ny=20, iterations=3, skyTolerance=0.001*LsstGeom.arcseconds, pixelTolerance=0.02): """ Approximate an existing WCS as a TAN-SIP WCS The fit is performed by evaluating the WCS at a uniform grid of points within a bounding box. @param[in] wcs wcs to approximate @param[in] camera_wrapper is an instantiation of GalSimCameraWrapper @param[in] detector_name is the name of the detector @param[in] obs_metadata is an ObservationMetaData characterizing the telescope pointing @param[in] order order of SIP fit @param[in] nx number of grid points along x @param[in] ny number of grid points along y @param[in] iterations number of times to iterate over fitting @param[in] skyTolerance maximum allowed difference in world coordinates between input wcs and approximate wcs (default is 0.001 arcsec) @param[in] pixelTolerance maximum allowed difference in pixel coordinates between input wcs and approximate wcs (default is 0.02 pixels) @return the fit TAN-SIP WCS """ tanWcs = wcs # create a matchList consisting of a grid of points covering the bbox refSchema = afwTable.SimpleTable.makeMinimalSchema() refCoordKey = afwTable.CoordKey(refSchema["coord"]) refCat = afwTable.SimpleCatalog(refSchema) sourceSchema = afwTable.SourceTable.makeMinimalSchema() SingleFrameMeasurementTask(schema=sourceSchema) # expand the schema sourceCentroidKey = afwTable.Point2DKey(sourceSchema["slot_Centroid"]) sourceCat = afwTable.SourceCatalog(sourceSchema) # 20 March 2017 # the 'try' block is how it works in swig; # the 'except' block is how it works in pybind11 try: matchList = afwTable.ReferenceMatchVector() except AttributeError: matchList = [] bbox = camera_wrapper.getBBox(detector_name) bboxd = LsstGeom.Box2D(bbox) for x in np.linspace(bboxd.getMinX(), bboxd.getMaxX(), nx): for y in np.linspace(bboxd.getMinY(), bboxd.getMaxY(), ny): pixelPos = LsstGeom.Point2D(x, y) ra, dec = camera_wrapper.raDecFromPixelCoords( np.array([x]), np.array([y]), detector_name, obs_metadata=obs_metadata, epoch=2000.0, includeDistortion=True) skyCoord = LsstGeom.SpherePoint(ra[0], dec[0], LsstGeom.degrees) refObj = refCat.addNew() refObj.set(refCoordKey, skyCoord) source = sourceCat.addNew() source.set(sourceCentroidKey, pixelPos) matchList.append(afwTable.ReferenceMatch(refObj, source, 0.0)) # The TAN-SIP fitter is fitting x and y separately, so we have to # iterate to make it converge for _ in range(iterations): sipObject = makeCreateWcsWithSip(matchList, tanWcs, order, bbox) tanWcs = sipObject.getNewWcs() fitWcs = sipObject.getNewWcs() return fitWcs
a0dbf47c197033291541d3afc19406e161173b81
24,088
def parse_table_column_names(table_definition_text): """ Parse the table and column names from the given SQL table definition. Return (table-name, (col1-name, col2-name, ...)). Naรฏvely assumes that ","s separate column definitions regardless of quoting, escaping, and context. """ match = _table_def_pattern.match(table_definition_text) if match is None: raise ValueError('Cannot parse table definition from: {!r}' .format(table_definition_text)) tbl_nm = match[1] col_defs = match[2].split(',') col_nms = (col_def.split(maxsplit=1)[0] for col_def in col_defs) return (tbl_nm, tuple(col_nms))
3ec88640a0411799e35b25987a809b1489f4a03b
24,089
def show_all_positions(): """ This leads user to the position page when user clicks on the positions button on the top right and it is supposed to show user all the positions in the database """ db=main() conn = create_connection(db) mycur = conn.cursor() post=mycur.execute("SELECT * FROM positions") #positions = positions.query.all() return render_template('position-all.html', positions=post)
df59bf0f97e46cf79d23d93037972513759942d3
24,091
import copy def get_subtree_tips(terms: list, name: str, tree): """ get lists of subsubtrees from subtree """ # get the duplicate sequences dups = [e for e in terms if e.startswith(name)] subtree_tips = [] # for individual sequence among duplicate sequences for dup in dups: # create a copy of the tree temptree = copy.deepcopy(tree) # get the node path for the duplicate sequence node_path = temptree.get_path(dup) # for the terminals of the parent of the duplicate sequence # get the terminal names and append them to temp temp = [] for term in node_path[-2].get_terminals(): temp.append(term.name) subtree_tips.append(temp) return subtree_tips, dups
7bebf86ba95ede46f4e4c3ad0926784d4755124b
24,092
def pressure_to_cm_h2o(press_in): """Convert pressure in [pa] to [cm H2O] Returns a rounded integer""" conversion_factor = 98.0665 return int(round(press_in / conversion_factor))
cabf905a71747c0e053356d89beebad7f03e85e7
24,093
def parse_deceased_field(deceased_field): """ Parse the deceased field. At this point the deceased field, if it exists, is garbage as it contains First Name, Last Name, Ethnicity, Gender, D.O.B. and Notes. We need to explode this data into the appropriate fields. :param list deceased_field: a list where each item is a word from the deceased field :return: a dictionary representing a deceased field. :rtype: dict """ dob_index = -1 dob_tokens = [Fields.DOB, '(D.O.B', '(D.O.B.', '(D.O.B:', '(DOB', '(DOB:', 'D.O.B.', 'DOB:'] while dob_index < 0 and dob_tokens: dob_token = dob_tokens.pop() try: dob_index = deceased_field.index(dob_token) except ValueError: pass else: break if dob_index < 0: raise ValueError(f'Cannot parse {Fields.DECEASED}: {deceased_field}') d = {} d[Fields.DOB] = deceased_field[dob_index + 1] notes = deceased_field[dob_index + 2:] if notes: d[Fields.NOTES] = ' '.join(notes) # `fleg` stands for First, Last, Ethnicity, Gender. It represents the info stored before the DOB. fleg = deceased_field[:dob_index] # Try to pop out the results one by one. If pop fails, it means there is nothing left to retrieve, # For example, there is no first name and last name. try: d[Fields.GENDER] = fleg.pop().replace(',', '') d[Fields.ETHNICITY] = fleg.pop().replace(',', '') d[Fields.LAST_NAME] = fleg.pop().replace(',', '') d[Fields.FIRST_NAME] = fleg.pop().replace(',', '') except IndexError: pass return d
f87faed7b9def61f0533dc0d3bdd7823490031cf
24,094
def rate(epoch, rate_init, epochs_per_order): """ Computes learning rate as a function of epoch index. Inputs: epoch - Index of current epoch. rate_init - Initial rate. epochs_per_order - Number of epochs to drop an order of magnitude. """ return rate_init * 10.0 ** (-epoch / epochs_per_order)
cc1c7850d4bd98d30b97c7915ceb96eaeadef327
24,095
import networkx def has_loop(net): """ Check if the network is a loop """ try: networkx.algorithms.cycles.find_cycle(net) return True except networkx.exception.NetworkXNoCycle: return False
8141b0609e780297ba3ecabc1d9bd97ca7ea09c3
24,096
def get_query_name(hmma): """ get the panther family name from the query target """ hmma_list = hmma.split ('.') if len(hmma_list) > 2: hmm_fam = hmma_list[0] hmm_sf = hmma_list[1] something_else = hmma_list[2] elif len(hmma_list) == 2: hmm_fam = hmma_list[0] something_else = hmma_list[1] hmm_id = hmm_fam if hmm_sf and hmm_sf.startswith("SF"): hmm_id = hmm_fam + ':' + hmm_sf return hmm_id
cdb7bc6f9db022842872e710266937c872768407
24,097
def staff_member_required(view_func, redirect_field_name=REDIRECT_FIELD_NAME, login_url='admin:login'): """ Decorator for views that checks that the user is logged in and is a staff member, displaying the login page if necessary. """ return user_passes_test( lambda u: u.is_active and u.is_staff, login_url=login_url, redirect_field_name=redirect_field_name )(view_func)
bf62abb607e5fcef5a71938dce2fd8c0f008d9cf
24,098
from typing import Dict def get_event_listeners(ctx: Configuration) -> Dict: """List of events that is being listened for.""" try: req = restapi(ctx, METH_GET, hass.URL_API_EVENTS) return req.json() if req.status_code == 200 else {} # type: ignore except (HomeAssistantCliError, ValueError): # ValueError if req.json() can't parse the json _LOGGER.exception("Unexpected result retrieving event listeners") return {}
aebe497010e1719a815274e2f0e6ebb6177650d5
24,099
def process_tce(tce): """Processes the light curve for a Kepler TCE and returns processed data Args: tce: Row of the input TCE table. Returns: Processed TCE data at each stage (flattening, folding, binning). Raises: IOError: If the light curve files for this Kepler ID cannot be found. """ # Read and process the light curve. time, flattened_flux = preprocess.read_and_process_light_curve(tce.kepid, KEPLER_DATA_DIR) time, folded_flux = preprocess.phase_fold_and_sort_light_curve(time, flattened_flux, tce.tce_period, tce.tce_time0bk) # Generate the local and global views. local_view = preprocess.local_view(time, folded_flux, tce.tce_period, tce.tce_duration, num_bins=201, bin_width_factor=0.16, num_durations=4) global_view = preprocess.global_view(time, folded_flux, tce.tce_period, num_bins=2001, bin_width_factor=1 / 2001) return flattened_flux, folded_flux, local_view, global_view
f20e62b48828c7c4d9fac8697035f058f48f5799
24,100
def _build_kwic(docs, search_tokens, context_size, match_type, ignore_case, glob_method, inverse, highlight_keyword=None, with_metadata=False, with_window_indices=False, only_token_masks=False): """ Helper function to build keywords-in-context (KWIC) results from documents `docs`. :param docs: list of tokenized documents, optionally as 2-tuple where each element in `docs` is a tuple of (tokens list, tokens metadata dict) :param search_tokens: search pattern(s) :param context_size: either scalar int or tuple (left, right) -- number of surrounding words in keyword context. if scalar, then it is a symmetric surrounding, otherwise can be asymmetric :param match_type: One of: 'exact', 'regex', 'glob'. If 'regex', `search_token` must be RE pattern. If `glob`, `search_token` must be a "glob" pattern like "hello w*" (see https://github.com/metagriffin/globre). :param ignore_case: If True, ignore case for matching. :param glob_method: If `match_type` is 'glob', use this glob method. Must be 'match' or 'search' (similar behavior as Python's :func:`re.match` or :func:`re.search`). :param inverse: Invert the matching results. :param highlight_keyword: If not None, this must be a string which is used to indicate the start and end of the matched keyword. :param with_metadata: add document metadata to KWIC results :param with_window_indices: add window indices to KWIC results :param only_token_masks: return only flattened token masks for filtering :return: list with KWIC results per document """ tokens = docs if docs: first_elem = next(iter(docs)) if isinstance(first_elem, tuple) and len(first_elem) == 2: tokens = list(zip(*docs))[0] # find matches for search criteria -> list of NumPy boolean mask arrays matches = _token_pattern_matches(tokens, search_tokens, match_type=match_type, ignore_case=ignore_case, glob_method=glob_method) if not only_token_masks and inverse: matches = [~m for m in matches] left, right = context_size kwic_list = [] for mask, dtok in zip(matches, docs): if isinstance(dtok, tuple): dtok, dmeta = dtok else: dmeta = None dtok_arr = np.array(dtok, dtype=str) ind = np.where(mask)[0] ind_windows = make_index_window_around_matches(mask, left, right, flatten=only_token_masks, remove_overlaps=True) if only_token_masks: assert ind_windows.ndim == 1 assert len(ind) <= len(ind_windows) # from indices back to binary mask; this only works with remove_overlaps=True win_mask = np.repeat(False, len(dtok)) win_mask[ind_windows] = True if inverse: win_mask = ~win_mask kwic_list.append(win_mask) else: assert len(ind) == len(ind_windows) windows_in_doc = [] for match_ind, win in zip(ind, ind_windows): # win is an array of indices into dtok_arr tok_win = dtok_arr[win].tolist() if highlight_keyword is not None: highlight_mask = win == match_ind assert np.sum(highlight_mask) == 1 highlight_ind = np.where(highlight_mask)[0][0] tok_win[highlight_ind] = highlight_keyword + tok_win[highlight_ind] + highlight_keyword win_res = {'token': tok_win} if with_window_indices: win_res['index'] = win if with_metadata and dmeta is not None: for meta_key, meta_vals in dmeta.items(): win_res[meta_key] = np.array(meta_vals)[win].tolist() windows_in_doc.append(win_res) kwic_list.append(windows_in_doc) assert len(kwic_list) == len(docs) return kwic_list
d6252c716831bf51342f06aaab343d0f88f6811d
24,101
from datetime import datetime import pytz import logging def filter_records(records, arns_to_filter_for=None, from_date=datetime.datetime(1970, 1, 1, tzinfo=pytz.utc), to_date=datetime.datetime.now(tz=pytz.utc)): """Filter records so they match the given condition""" result = list(pipe(records, filterz(_by_timeframe(from_date, to_date)), filterz(_by_role_arns(arns_to_filter_for)))) if not result and records: logging.warning(ALL_RECORDS_FILTERED) return result
6a3f14e577cef4a2a883781f50d3c695e11f8c82
24,103
from typing import Tuple from typing import Dict from operator import inv def generate_prob_matrix(A: int, D: int)\ -> Tuple[Dict[Tuple[int, int], int], Dict[Tuple[int, int], int], np.ndarray]: """Generate the probability outcome matrix""" transient_state, absorbing_state = generate_states(A, D) transient_state_lookup = {s: i for i, s in enumerate(transient_state)} absorbing_state_lookup = {s: i for i, s in enumerate(absorbing_state)} transient_length, absorbing_length = len(transient_state), len(absorbing_state) # Add probability to transition elements Qrow = [] Qcol = [] Qdata = [] Rrow = [] Rcol = [] Rdata = [] for i, (a, d) in enumerate(transient_state): max_deaths = 2 if a > 1 and d > 1 else 1 for dl in range(0, max_deaths + 1): al = max_deaths - dl na, nd = a - al, d - dl if a - al > 0 and d - dl > 0: Qrow.append(i) Qcol.append(transient_state_lookup[(na, nd)]) Qdata.append(probable_outcome(min(a, 3), min(d, 2), dl)) else: Rrow.append(i) Rcol.append(absorbing_state_lookup[(na, nd)]) Rdata.append(probable_outcome(min(a, 3), min(d, 2), dl)) Q = csc_matrix((Qdata, (Qrow, Qcol)), shape=(transient_length, transient_length)) R = csc_matrix((Rdata, (Rrow, Rcol)), shape=(transient_length, absorbing_length)) iden = identity(transient_length) F = inv(iden - Q) * R return transient_state_lookup, absorbing_state_lookup, F
00f8074f19436fcd9031af5c553f8b307b6008ba
24,104
import threading import time def _prepare_func(app_id, run_id, train_fn, args_dict, local_logdir): """ Args: app_id: run_id: train_fn: args_dict: local_logdir: Returns: """ def _wrapper_fun(iter): """ Args: iter: Returns: """ for i in iter: executor_num = i experiment_utils._set_ml_id(app_id, run_id) tb_hdfs_path = '' hdfs_exec_logdir = experiment_utils._get_logdir(app_id, run_id) t = threading.Thread(target=devices._print_periodic_gpu_utilization) if devices.get_num_gpus() > 0: t.start() try: #Arguments if args_dict: param_string, params, args = experiment_utils.build_parameters(train_fn, executor_num, args_dict) hdfs_exec_logdir, hdfs_appid_logdir = experiment_utils._create_experiment_subdirectories(app_id, run_id, param_string, 'grid_search', params=params) logfile = experiment_utils._init_logger(hdfs_exec_logdir) tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir) print(devices._get_gpu_info()) print('-------------------------------------------------------') print('Started running task ' + param_string) task_start = time.time() retval = train_fn(*args) task_end = time.time() experiment_utils._handle_return_simple(retval, hdfs_exec_logdir, logfile) time_str = 'Finished task ' + param_string + ' - took ' + experiment_utils._time_diff(task_start, task_end) print(time_str) print('-------------------------------------------------------') else: tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_exec_logdir, executor_num, local_logdir=local_logdir) logfile = experiment_utils._init_logger(hdfs_exec_logdir) print(devices._get_gpu_info()) print('-------------------------------------------------------') print('Started running task') task_start = time.time() retval = train_fn() task_end = time.time() experiment_utils._handle_return_simple(retval, hdfs_exec_logdir, logfile) time_str = 'Finished task - took ' + experiment_utils._time_diff(task_start, task_end) print(time_str) print('-------------------------------------------------------') except: raise finally: experiment_utils._cleanup(tensorboard, t) return _wrapper_fun
93ec02a10e9dedd00787e302db5cca716e2d4f37
24,105
import re def convert_camel_to_snake(string, remove_non_alphanumeric=True): """ converts CamelCase to snake_case :type string: str :rtype: str """ if remove_non_alphanumeric: string = remove_non_alpha(string, replace_with='_', keep_underscore=True) s1 = _first_cap_re.sub(r'\1_\2', string) result = _all_cap_re.sub(r'\1_\2', s1).lower() result = re.sub(pattern='\s*_+', repl="_", string=result) return result
ed21ab5a58b08f2bc8b00d9b1cd5aa812b4e4148
24,106