content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def duration(start_time, end_time=None): """Get a timedelta between end_time and start_time, where end_time defaults to now(). WARNING: mixing tz-aware and naive datetimes in start_time and end_time will cause an error. """ if not start_time: return None last_time = end_time if end_time else current_time() return last_time - start_time
89febebf342225525bf7543342b884f130e7b3f2
28,086
def get_commands(cfg, clargs, *, what, **kwargs): """ Delegates the creation of commands lists to appropriate functions based on `what` parameter. Parameters ---------- cfg: dict Configuration dictionary. clargs: Namespace Command line arguments. cmds: iter(tuple(str)) what: str Determines the returned value (see: Returns[out]). kwargs: dict MANDATORY: path_i Dictionary with additional information from previous step. Returns ------- out: iter(tuple(str, tuple(str))) An iterator with the 1st element as a tag (the `what` parameter) and the 2nd element as the iterator of the actual commands. """ get_commands_f = { "video": get_commands_video_1, "image": get_commands_image_1, "check": get_commands_check, } ps = ( kwargs["path_i"] if what not in cfg["extensions"] else filter( lambda p: osp.splitext(p)[1].lower() in cfg["extensions"][what], kwargs["path_i"] ) ) ps = map(lambda p: (p, get_path(cfg, clargs, p, **kwargs)), ps) out = chain.from_iterable( map(lambda p: get_commands_f[what](cfg, clargs, path_i_1=p[0], path_o_1=p[1], **kwargs), ps) ) return map(lambda c: (what, c), out)
360410064a24d547729722c4f5843d78af9444c8
28,087
def heappush(heap, item): """ >>> heappush([4, 4, 8, 9, 4, 12, 9, 11, 13], 7) [4, 4, 8, 9, 4, 12, 9, 11, 13, 7] >>> heappush([4, 4, 8, 9, 4, 12, 9, 11, 13, 7], 10) [4, 4, 8, 9, 4, 12, 9, 11, 13, 7, 10] >>> heappush([4, 4, 8, 9, 4, 12, 9, 11, 13, 7, 10], 5) [4, 4, 5, 9, 4, 8, 9, 11, 13, 7, 10, 12] :param heap: :param item: :return: """ heap.append(item) bubble_up(heap, len(heap) - 1) return heap
99e6814828e42da8a14f4d0873e62af920a800b8
28,088
def dx(scalar_field): """ Computes first derivative of a 1D scalar field :param scalar_field: :return: """ first_derivative = np.zeros((scalar_field.size - 1)) for i_scalar in range(scalar_field.size - 1): i_next_scalar = i_scalar + 1 first_derivative[i_scalar] = scalar_field[i_next_scalar] - scalar_field[i_scalar] return first_derivative
b0af862210a2a395dcdfdab2e921f2c305a536d2
28,089
def get_genetic_profiles(study_id, profile_filter=None): """Return all the genetic profiles (data sets) for a given study. Genetic profiles are different types of data for a given study. For instance the study 'cellline_ccle_broad' has profiles such as 'cellline_ccle_broad_mutations' for mutations, 'cellline_ccle_broad_CNA' for copy number alterations, etc. Parameters ---------- study_id : str The ID of the cBio study. Example: 'paad_icgc' profile_filter : Optional[str] A string used to filter the profiles to return. Will be one of: - MUTATION - MUTATION_EXTENDED - COPY_NUMBER_ALTERATION - MRNA_EXPRESSION - METHYLATION The genetic profiles can include "mutation", "CNA", "rppa", "methylation", etc. Returns ------- genetic_profiles : list[str] A list of genetic profiles available for the given study. """ data = {'cmd': 'getGeneticProfiles', 'cancer_study_id': study_id} df = send_request(**data) res = _filter_data_frame(df, ['genetic_profile_id'], 'genetic_alteration_type', profile_filter) genetic_profiles = list(res['genetic_profile_id'].values()) return genetic_profiles
b409a1511112cafab0330a23684b3e255fa0a60c
28,091
import string def cipher(sentence, n_rotate): """ Cipher string with Caesar algorithm ( Anything else than letters stays the same. ) :param sentence: String containing sentence/sentences/word/words. :param n_rotate: number to translate letters :return: string with ciphered words """ upper = [char for char in string.ascii_uppercase] # Uppercase Letters lower = [char for char in string.ascii_lowercase] # Lowercase Letters string_ = [char for char in sentence] # String to cipher for i in range(len(string_)): transl = 0 # Cipher Uppercase Letters if string_[i] in upper: for j in range(len(upper)): if string_[i] == upper[j]: transl = j+n_rotate while transl >= len(upper): transl = transl - len(upper) string_[i] = upper[transl] break # Cipher Lowercase Letters elif string_[i] in lower: for j in range(len(lower)): if string_[i] == lower[j]: transl = j + n_rotate while transl >= len(lower): transl = transl - len(lower) string_[i] = lower[transl] break # Return Cipher sentence return ''.join(string_)
e0606949f254971431faf7899bd254f4792176d4
28,092
from typing import Union from typing import List def _assert_in_fc( r: RestClient, uuids: Union[str, List[str]], all_keys: bool = False ) -> StrDict: """Also return data.""" if isinstance(uuids, str): uuids = [uuids] if all_keys: data = r.request_seq('GET', '/api/files', {'all-keys': True}) else: data = r.request_seq('GET', '/api/files') assert '_links' in data assert 'self' in data['_links'] assert 'files' in data assert len(data['files']) == len(uuids) for f in data['files']: assert f['uuid'] in uuids return data
908f12309a93abc472e05598abbb0e5ee29cc798
28,093
def power_law(uref, h, href, shear): """ Extrapolate wind speed (or other) according to power law. NOTE: see https://en.wikipedia.org/wiki/Wind_profile_power_law :param uref: wind speed at reference height (same units as extrapolated wind speed, u) :param h: height of extrapolated wind speed (same units as href) :param href: reference height (same units as h) :param shear: shear exponent alpha (1/7 in neutral stability) (unitless) :return u: extrapolated wind speed (same units as uref) """ u = np.array(uref) * np.array(h / href) ** np.array(shear) return u
cb5d002dfeed022af694060bfe9e516191835742
28,094
def binary_weight_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, w_init=None, wb_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True): """Binary Weight Convolution, multiplier-less inner-product with a scale factor. Binary Weight Convolution is the convolution function, but the inner product in this function is the following, .. math:: y_{n, a, b} = \\frac{1}{\\|\\mathbf{w}_n\\|_{\\ell_1}} \sum_{m} \sum_{i} \sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}. Therefore :math:`sign(w_{n, m, i, j})` is either :math:`1` or :math:`-1` and the inner product simplifies to addition followed by scaling factor :math:`\\alpha = \\frac{1}{\\|\\mathbf{w}_n\\|_{\\ell_1}}`. The number of :math:`n` is the number of outmaps of the convolution function. References: Rastegari, Mohammad, et al. "XNOR-Net: ImageNet Classification Using Binary Convolutional Neural Networks." arXiv preprint arXiv:1603.05279 (2016). .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the binarized weights (`binary_weight`) 2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the binary weights will not be in sync. 3) Quantized values are stored as floating point number for `binary_weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction. w_init (~nnabla.initializer.BaseInitializer): Initializer for weight. wb_init (~nnabla.initializer.BaseInitializer): Initializer for binary weight. b_init (~nnabla.initializer.BaseInitializer): Initializer for bias. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable` """ if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if wb_init is None: wb_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis]) + tuple(kernel), w_init, not fix_parameters) wb = get_parameter_or_create( "Wb", (outmaps, inp.shape[base_axis]) + tuple(kernel), wb_init, not fix_parameters) alpha = get_parameter_or_create( "alpha", (outmaps, ), ConstantInitializer(0), False) b = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, not fix_parameters) return F.binary_weight_convolution(inp, w, wb, alpha, b, base_axis, pad, stride, dilation, group)
3cae56fee85ba0c7679e9de7fd2743c9ce252d1a
28,096
def dataset2Xy(dataset): """Convert a dataset (pd.DataFrame) to X, y and output_dim where X is the features, y is the labels (one-hot vectors), and output_dim is the number of labels overall. Args: dataset: A pandas dataframe that is composed of features columns and the class column which is the last one. Returns: tuple (X, y, output_dim) where X is the features matrix; y is the one-hot label matrix; and output_dim is the amount of different labels in y. """ output_col = dataset.columns[-1] output_dim = len(dataset[output_col].value_counts()) X = dataset.drop(columns=[output_col]).to_numpy() y = to_categorical(dataset[output_col].to_numpy(), num_classes=output_dim) return X, y, output_dim
ef07873db639a7a9c34b149959acd419d4a0b9d3
28,097
def pairwise_list(a_list): """ list转换为成对list "s -> (s0,s1), (s1,s2), (s2, s3), ..." :param a_list: list :return: 成对list """ if len(a_list) % 2 != 0: raise Exception("pairwise_list error!") r_list = [] for i in range(0, len(a_list) - 1, 2): r_list.append([a_list[i], a_list[i + 1]]) return r_list
5142fb2e00c931ab57fc9028eb9b6df5a98c0342
28,099
import math def BaseWaveguideOFF(args,state,options)->GeomGroup: """ Offset the waveguide (jumps left or right of waveguide) Parameters ---------- args : list 1 argument: offset (in um), positive means on left of waveguide direction. state : dict Current state. options : dict The sequencer options. Returns ------- samplemaker.shapes.GeomGroup The waveguide geometry. """ off = args[0] a = math.radians(state['a']+90) state['x']+=off*math.cos(a) state['y']+=off*math.sin(a) return GeomGroup()
235004836ea149b02c7dd12dde400861a6739954
28,100
def read_csv_profiles(file): """Read csv file and parse dates.""" return pd.read_csv(file, parse_dates=["valid"], index_col="valid")
ce45c4200e8cc71daaec4dcb91e5bdd9199709c6
28,102
def uCSIsMathematicalAlphanumericSymbols(code): """Check whether the character is part of MathematicalAlphanumericSymbols UCS Block """ ret = libxml2mod.xmlUCSIsMathematicalAlphanumericSymbols(code) return ret
2812f04afe4d977636a1c7c41dac712f1d63e184
28,103
import time def builtin_localtime(t): """ Convert an epoch time to a [yr mon day hr min sec wd yd dst] list structure in the local timezone, or vice-versa. """ if isinstance(t, BReal): tv = [BInt(xv) for xv in time.localtime(t.value)] return BList(tv) elif isinstance(t, BList): tvv = [x.value for x in t.value] return BInt(time.mktime(tvv))
ebc900d3b82f5223489e3fedb761302713e27fba
28,104
def avg_gate_infidelity(A, B, mxBasis): """ Returns the average gate infidelity between A and B, where B is the "target" operation.""" return _tools.average_gate_infidelity(A, B, mxBasis)
282b930fbaf1822ec52b937c854491ed8512ae89
28,105
from bs4 import BeautifulSoup def html_to_plain_text(html_str): """ Takes an HTML string and returns text with HTML tags removed and line breaks replaced with spaces. Shamelessly copied from open-discussions. Args: html_str (str): A string containing HTML tags Returns: str: Plain text """ soup = BeautifulSoup(html_str, features="html.parser") return soup.get_text().replace("\n", " ")
d681ffdf2878f13673d66dadf964dcb2d0d06644
28,106
from datetime import datetime def read_hotfilm_from_lvm(filename, dt=1e-3): """Reads 2-channel hotfilm data from a Labview text file.""" times = [] ch1 = [] ch2 = [] data = [line.rstrip() for line in open(filename).readlines()] line = data[0].split(',')[1:] t = [int(float(n)) for n in line[:5]] seconds = float(line[5]) useconds = int(1e6 * (seconds - int(seconds))) start_time = datetime(t[0], t[1], t[2], t[3], t[4], int(seconds), useconds) seconds = 0 for line in data: line = line.split(',')[1:] ch1.append(float(line[6])) ch2.append(float(line[7])) times.append(seconds) seconds += dt return start_time, times, ch1, ch2
e0dadac656120173e5833e6eb36498943613e8f5
28,107
import string def GetCategoryWrapper(func_name): """Return a C preprocessor token to test in order to wrap code. This handles extensions. Example: GetTestWrapper("glActiveTextureARB") = "CR_multitexture" Example: GetTestWrapper("glBegin") = "" """ cat = Category(func_name) if (cat == "1.0" or cat == "1.1" or cat == "1.2" or cat == "Chromium" or cat == "GL_chromium" or cat == "VBox"): return '' elif (cat == '1.3' or cat == '1.4' or cat == '1.5' or cat == '2.0' or cat == '2.1'): # i.e. OpenGL 1.3 or 1.4 or 1.5 return "OPENGL_VERSION_" + string.replace(cat, ".", "_") else: assert cat != '' return string.replace(cat, "GL_", "")
eba3d524b3bcebfe96d253fa9348e3927ae9c737
28,110
from typing import Dict def get_capability_text(src_capability: SourceCapability) -> str: """ Returns markdown format cell text for a capability, hyperlinked to capability feature page if known """ capability_docs_mapping: Dict[SourceCapability, str] = { SourceCapability.DELETION_DETECTION: "../../../../metadata-ingestion/docs/dev_guides/stateful.md#removal-of-stale-tables-and-views", SourceCapability.DOMAINS: "../../../domains.md", SourceCapability.PLATFORM_INSTANCE: "../../../platform-instances.md", SourceCapability.DATA_PROFILING: "../../../../metadata-ingestion/docs/dev_guides/sql_profiles.md", } capability_doc = capability_docs_mapping.get(src_capability) return ( src_capability.value if not capability_doc else f"[{src_capability.value}]({capability_doc})" )
df66582caa42828b5c0099c3399612973cfaab7c
28,111
import re def camel_case_split(identifier): """CamelCase split""" matches = re.finditer( ".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier) return [m.group(0) for m in matches]
bfaf845a0fa6eae46a4a8c96f91aee8c755255f5
28,112
def init_formation_goal_msg(): """Create formation goal msg Swarm goal is represented by a green sphere and arrow. Returns: [type]: [description] """ msg = Marker() msg.header.frame_id = "world" msg.header.stamp = rospy.Time() msg.ns = "swarm" msg.id = 2 msg.type = 2 # Sphere msg.action = 0 # Add msg.pose.position.x = 0 msg.pose.position.y = 0 msg.pose.position.z = 0 msg.pose.orientation.x = 0.0 msg.pose.orientation.y = 0. msg.pose.orientation.z = 0.0 msg.pose.orientation.w = 1.0 msg.scale.x = 0.03 msg.scale.y = 0.03 msg.scale.z = 0.03 msg.color.a = 0.8 msg.color.r = 0 msg.color.g = 1 msg.color.b = 0 msg_pose = Marker() msg_pose.header.frame_id = "world" msg_pose.header.stamp = rospy.Time() msg_pose.ns = "swarm" msg_pose.id = 3 msg_pose.type = 0 # Arrow msg_pose.action = 0 # Add msg_pose.pose.position.x = 0 msg_pose.pose.position.y = 0 msg_pose.pose.position.z = 0 msg_pose.pose.orientation.x = 0.0 msg_pose.pose.orientation.y = 0. msg_pose.pose.orientation.z = 0.0 msg_pose.pose.orientation.w = 1.0 msg_pose.scale.x = 0.08 msg_pose.scale.y = 0.01 msg_pose.scale.z = 0.01 msg_pose.color.a = 0.8 msg_pose.color.r = 0 msg_pose.color.g = 1 msg_pose.color.b = 0 return (msg, msg_pose)
359e5422908b94693968fb73c7ad5e367d715e42
28,113
def allow_pay_as_you_go(**kwargs): """Allow Pay As You Go Set pay as you go account settings to ``allow=True`` Reference: https://iexcloud.io/docs/api/#pay-as-you-go Data Weighting: ``Free`` .. warning:: This endpoint is only available using IEX Cloud. See :ref:`Migrating` for more information. """ return PayAsYouGo(allow=True, **kwargs).fetch()
f2435da3b6d8e8265bf19249fbe38c8734f06d1b
28,115
def _process_user_data_for_order(checkout: Checkout): """Fetch, process and return shipping data from checkout.""" shipping_address = checkout.shipping_address if checkout.user: store_user_address(checkout.user, shipping_address, AddressType.SHIPPING) if ( shipping_address and checkout.user.addresses.filter(pk=shipping_address.pk).exists() ): shipping_address = shipping_address.get_copy() return { "user": checkout.user, "user_email": checkout.get_customer_email(), "shipping_address": shipping_address, "customer_note": checkout.note, }
e6ab0d248469fadb9233dc45d563ec2c185da8e5
28,116
def catastrophic_energy(rpb,rho,Vi): """ Return the catastrofic energy in c.g.s. Based in Stewart and Leinhardt(2009) rpb: parental radius in km rho: mean density of the system in cgs Vi: impact velocity in km/s """ # qs, qg, fi, mi: constants of material if rho < 4.0: qs, qg, fi, mi = 7.0e4, 1e-4, 8, 0.5 else: qs, qg, fi, mi = 500.0, 1.0e-4, 6., 0.4 Mcomb = (4/3.)*np.pi*((rpb*1e5)**3)*rho Vi = (Vi*1.e5) RC = ((3*Mcomb)/(4*pi))**(1/3.) QD = qs*(RC**(9*mi*(3-2*fi)))*(Vi**(2-3*mi))+qg*(RC**(3*mi))*(Vi**(2-3*mi)) return QD
4b7472df40fa7e9dc2622854805055e28d2789f4
28,117
def hide_toolbar(notebook): """ Finds the display toolbar tag and hides it """ if 'celltoolbar' in notebook['metadata']: del(notebook['metadata']['celltoolbar']) return notebook
09d594f26f2ad8ee5af2563332ff77136713a7ef
28,118
def draft_github_comment(template, result): """ Use a template to draft a GitHub comment :template: (str) the name of the template file :result: (ContentError) the error to display in the comment """ # start with template with open(template, 'r') as f: contents = f.read() # replace variables in template with ContentError values for var in vars(result).keys(): contents = contents.replace(f'{{{{ {var} }}}}', str(getattr(result, var))) return contents
42842b7af06da8a54c0647e5aac079132e82de5a
28,119
def get_domains(sequence, disorder, disorder_threshold=0.42, minimum_IDR_size=12, minimum_folded_domain=50, gap_closure=10, override_folded_domain_minsize=False): """ Parameters ------------- sequence : str Amino acid sequence disorder : list of floats List of per-residue disorder values. Must be same length and sequence disorder_threshold : float Value that defines what 'disordered' is based on the metapredict disorder score. The higher the value the more stringent the cutoff. Default = 0.42. minimum_IDR_size : int Defines the smallest possible IDR. This is a hard limit - i.e. we CANNOT get IDRs smaller than this. Default = 12. minimum_folded_domain : int Defines where we expect the limit of small folded domains to be. This is NOT a hard limit and functions to modulate the removal of large gaps (i.e. gaps less than this size are treated less strictly). Note that, in addition, gaps < 35 are evaluated with a threshold of 0.35*disorder_threshold and gaps < 20 are evaluated with a threshold of 0.25*disorder_threshold. These two lengthscales were decided based on the fact that coiled-coiled regions (which are IDRs in isolation) often show up with reduced apparent disorder within IDRs, and but can be as short as 20-30 residues. The minimum_folded_domain is used based on the idea that it allows a 'shortest reasonable' folded domain to be identified. Default = 50. gap_closure : int Defines the largest gap that would be 'closed'. Gaps here refer to a scenario in which you have two groups of disordered residues seprated by a 'gap' of un-disordered residues. In general large gap sizes will favour larger contigous IDRs. It's worth noting that gap_closure becomes relevant only when minimum_region_size becomes very small (i.e. < 5) because really gaps emerge when the smoothed disorder fit is "noisy", but when smoothed gaps are increasingly rare. Default = 10. override_folded_domain_minsize : bool By default this function includes a fail-safe check that assumes folded domains really shouldn't be less than 35 or 20 residues. However, for some approaches we may wish to over-ride these thresholds to match the passed minimum_folded_domain value. If this flag is set to True this override occurs. This is generally not recommended unless you expect there to be well-defined sharp boundaries which could define small (20-30) residue folded domains. Default = False. Returns ------------ list This function takes an amino acid sequence, a disorder score, and returns a 4-position tiple with the following information: [0] - Smoothed disorder score used to aid in domain boundary identification [1] - a list of elements, where each element is itself a list where position 0 and 1 define the IDR location and position 2 gives the actual IDR sequence [2] - a list of elements, where each element is itself a list where position 0 and 1 define the folded domain location and position 2 gives the actual folded domain sequence """ # First set up for disorder smoothing function polynomial_order = 3 # larger means tight fit. 3 works well... # define window size for smoothing function. Note must be an odd number, # hence the if statement window_size = 2*minimum_IDR_size if window_size <= polynomial_order: window_size = polynomial_order+2 if len(disorder) <= window_size: print('Warning: length of disorder [%i] is <= window_size [%i]. This happens when you have a small IDR relative to the minimum IDR size. Updating windowsize to match sequence length.' % ( len(disorder), window_size)) window_size = len(disorder) if window_size % 2 == 0: window_size = window_size - 1 if polynomial_order >= window_size: polynomial_order = window_size - 1 # smoothe!!!! smoothed_disorder = savgol_filter(disorder, window_size, polynomial_order) # Using smoothed disorder extract out domains disordered_domain_info = __build_domains_from_values(smoothed_disorder, disorder_threshold, minimum_IDR_size=minimum_IDR_size, minimum_folded_domain=minimum_folded_domain, gap_closure=gap_closure, override_folded_domain_minsize=override_folded_domain_minsize) # finally cycle through and get the actual IDR and FD sequences. Note the if len(d) ==2 means we # skip over cases where no FDs or no IDRs were found idrs = [] for d in disordered_domain_info[0]: if len(d) == 2: idrs.append([d[0], d[1], sequence[d[0]:d[1]]]) fds = [] for d in disordered_domain_info[1]: if len(d) == 2: fds.append([d[0], d[1], sequence[d[0]:d[1]]]) return [smoothed_disorder, idrs, fds]
5d7f17e0cf2b54c2da927b4f2689dddb182dfb4a
28,120
def get_topic(topic_id): """ Endpunkt `/topic/<topic_id>`. Der Response enthält die JSON-Datei des Thema. """ try: file_path = queries.get_topic_file(topic_id) if file_path is None: err = flask.jsonify({"err_msg": "Unknown topic"}) return err, 400 return send_file(file_path, "application/json", True) except Exception: logger.exception("An error occurred: ") err = flask.jsonify({"err_msg": "An error occurred"}) return err, 400
2fee779cb7c0937441bf11470da31759a8704c0e
28,121
def get_epaipm_file(filename, read_file_args, data_dir): """Reads in files to create dataframes. No need to use ExcelFile objects with the IPM files because each file is only a single sheet. Args: filename (str): ['single_transmission', 'joint_transmission'] read_file_args (dict): dictionary of arguments for pandas read_* data_dir (path-like): Path to the top directory of the PUDL datastore. Returns: :class:`pandas.io.excel.ExcelFile`: an xlsx file of EPA IPM data. """ epaipm_file = {} logger.info( f"Extracting data from EPA IPM {filename} spreadsheet.") full_filename = get_epaipm_name(filename, data_dir) suffix = full_filename.suffix if suffix == '.xlsx': epaipm_file = pd.read_excel( full_filename, **read_file_args ) elif suffix == '.csv': epaipm_file = pd.read_csv( full_filename, **read_file_args ) return epaipm_file
0f19187bfc926abc926a5251fe52e48c93f9863c
28,122
import requests def futures_rule(trade_date: str = "20200712") -> pd.DataFrame: """ 国泰君安期货-交易日历数据表 https://www.gtjaqh.com/pc/calendar.html :return: 交易日历数据 :rtype: pandas.DataFrame """ url = "https://www.gtjaqh.com/fn/128" params = {"base_date": f"{trade_date}"} r = requests.post(url, json=params) temp_df = pd.DataFrame(r.json()["data"]) temp_df = temp_df[temp_df["tradingday"] == trade_date] if not temp_df["events"].values[0]: return f"{trade_date} 查询时间过早或者不是交易日" else: table_df = pd.read_html(temp_df["events"].values[0][0]["content"], header=1)[0] table_df.dropna(axis=1, how="all", inplace=True) return table_df
b15c538b11d73d22706786cc40fe9b480e647a63
28,123
import scipy.sparse.linalg def conjgrad_scipy(A, Y, sigma, tol=1e-4): """Solve the least-squares system using Scipy's conjugate gradient.""" Y, m, n, d, matrix_in = _format_system(A, Y) damp = m * sigma**2 calcAA = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x G = scipy.sparse.linalg.LinearOperator( (n, n), matvec=calcAA, matmat=calcAA, dtype=A.dtype) B = np.dot(A.T, Y) X = np.zeros((n, d), dtype=B.dtype) infos = np.zeros(d, dtype='int') itns = np.zeros(d, dtype='int') for i in range(d): def callback(x): itns[i] += 1 # use the callback to count the number of iterations X[:, i], infos[i] = scipy.sparse.linalg.cg( G, B[:, i], tol=tol, callback=callback) info = {'rmses': npext.rms(Y - np.dot(A, X), axis=0), 'iterations': itns, 'info': infos} return X if matrix_in else X.flatten(), info
d25c147223df15e1934e62c59f166d50d11c3bbe
28,124
def get_total_revenue(data: list) -> float: """ data: any list with product_id at [0] and revenue at [1] returns: total revenue for data """ revenue = get_revenue(data) total = 0 for r in revenue: total += r[1] return float(total)
0387a8aff8ff8163c284b395f1bce779ec6126c4
28,125
def plot_tttdprc(data_frame): """ Plot Change Total Travel Time % vs distance """ figtt, axtt = plot_var( data_frame=data_frame, x_var="distance", y_var="totTT %", label_var="mpr", pivot="flow", x_label="Distance [m]", y_label=r"Change in Total TT [\%]", t_label="Flow [veh/h]: ", legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"], fnt_size={"fontsize": 16}, ) return figtt, axtt
e7fb39a208bfba0e27968d728cd9980adff7c2ba
28,126
import apyfal.host.alibaba as alibaba from apyfal.host.alibaba import AlibabaCSP import apyfal.exceptions as exc from aliyunsdkcore import client from aliyunsdkcore.acs_exception.exceptions import ServerException import json def test_alibibaclass_request(): """AlibabaHost._request""" # Mocks some variables client_id = 'dummy_access_key' secret_id = 'dummy_secret_key' region = 'dummy_region_id' action = 'DummyAction' parameters = {'DummyString': 'dummy_value', 'DummyNumber': 0, 'DummyList': ['dummy_value']} response = {'DummyResponse': 0} raises_exception = [] status_desc = 'testing' # Mocks client class DummyAcsClient: """Mocked AcsClient""" def __init__(self, ak, secret, region_id): """Checks parameters""" assert ak == client_id assert secret == secret_id assert region_id == region @staticmethod def do_action_with_exception(acs_request): """Checks parameters returns fake response and raise exceptions""" # Checks request assert acs_request.get_action_name() == action acs_request_params = acs_request.get_query_params() for param in parameters: assert param in acs_request_params assert isinstance(acs_request_params[param], str) assert 'ClientToken' in acs_request_params assert acs_request.get_protocol_type() == "https" # Raises fake exceptions if raises_exception: raise ServerException(*raises_exception) # Returns fake response return json.dumps(response) client_acs_client = client.AcsClient client.AcsClient = DummyAcsClient alibaba._AcsClient = DummyAcsClient # Tests try: csp = AlibabaCSP(client_id=client_id, secret_id=secret_id, region=region) # Everything OK assert csp._request(action, **parameters) == response # Raise exception raises_exception = ['DummyCode', 'dummy_message'] with pytest.raises(exc.HostRuntimeException) as exc_info: csp._request(action, **parameters) for part in raises_exception: assert part in exc_info raises_exception[0] = 'InvalidParameter' with pytest.raises(exc.HostConfigurationException): csp._request(action, **parameters) raises_exception[0] = 'InvalidAccessKey' with pytest.raises(exc.HostAuthenticationException): csp._request(action, **parameters) # Filter codes raises_exception[0] = 'DummyCode' with pytest.raises(ServerException): csp._request(action, error_code_filter='DummyCode', **parameters) assert csp._request( action, error_code_ignore='DummyCode', **parameters) is None # Test "_instance_request" raises_exception = [] assert csp._instance_request(action, **parameters) == response # Tests "_instance_request" timeout if instance with incorrect status raises_exception = ['IncorrectInstanceStatus', 'dummy_message'] parameters['InstanceId'] = 'dummy_instance_id' csp.TIMEOUT = 0.0 with pytest.raises(exc.HostRuntimeException) as exc_info: csp._instance_request(action, status_desc=status_desc, **parameters) assert status_desc in exc_info # Tests "_instance_request" stills throw other exceptions raises_exception[0] = 'DummyCode' with pytest.raises(exc.HostRuntimeException) as exc_info: csp._instance_request(action, status_desc=status_desc, **parameters) for part in raises_exception: assert part in exc_info # Restore AcsClient finally: client.AcsClient = client_acs_client alibaba._AcsClient = client_acs_client
480b2408b55a3975bb317a3e0710753a6b256ed4
28,127
import pathlib def write_pdf_file( fpath: pathlib.Path, puzzle: Puzzle, key: Key, level: int, ) -> pathlib.Path: """Write a PDF file of the current puzzle to `path`. Args: fpath (pathlib.Path): Path to write the CSV to. puzzle (Puzzle): Current Word Search puzzle. key (Key): Puzzle Answer Key. level (int): Puzzle level. Raises: OSError: The file could not be written. Returns: pathlib.Path: Final save path. """ # setup the PDF document pdf = FPDF(orientation="P", unit="in", format="Letter") pdf.set_author(config.pdf_author) pdf.set_creator(config.pdf_creator) pdf.set_title(config.pdf_title) pdf.add_page() pdf.set_margin(0.5) # insert the title pdf.set_font("Helvetica", "B", config.pdf_title_font_size) pdf.cell(pdf.epw, 0.25, "WORD SEARCH", ln=2, align="C", center=True) pdf.ln(0.375) # calculate the puzzle size and letter font size pdf.set_left_margin(0.75) gsize = 7 / len(puzzle) gmargin = 0.6875 if gsize > 36 else 0.75 font_size = 72 * gsize * gmargin info_font_size = font_size if font_size < 18 else 18 pdf.set_font_size(font_size) # draw the puzzle for row in puzzle: for char in row: pdf.multi_cell(gsize, gsize, char, align="C", ln=3) pdf.ln(gsize) pdf.ln(0.25) # write word list info pdf.set_font("Helvetica", "BU", size=info_font_size) pdf.cell( pdf.epw, txt=f"Find words going {utils.get_level_dirs_str(level)}:", align="C", ln=2, ) pdf.ln(0.125) # write word list pdf.set_font("Helvetica", "B", size=info_font_size) pdf.set_font_size(info_font_size) pdf.multi_cell( pdf.epw, info_font_size / 72 * 1.125, utils.get_word_list_str(key), align="C", ln=2, ) # write the puzzle answer key # resetting the margin before rotating makes layout easier to figure pdf.set_margin(0) # rotate the page to write answer key upside down with pdf.rotation(angle=180, x=pdf.epw / 2, y=pdf.eph / 2): pdf.set_xy(pdf.epw - pdf.epw, 0) pdf.set_margin(0.25) pdf.set_font("Helvetica", size=config.pdf_key_font_size) pdf.write(txt="Answer Key: " + utils.get_answer_key_str(key)) # write the final PDF to the filesystem try: pdf.output(fpath) except OSError: raise OSError(f"File could not be saved to '{fpath}'.") return fpath.absolute()
8a41134437c8a8989021164e92c17fe3d64284be
28,128
import csv def get_zip_rate_areas(file_path): """ reads zips.csv file and returns the content as a dictionary Args: file_path: the path to zips.csv file Returns: a dictionary mapping each zip code into a set of rate areas """ zip_rate_areas = dict() with open(file_path, "r") as zip_codes_file: csv_reader = csv.DictReader(zip_codes_file, delimiter=",") for line in csv_reader: zipcode = line["zipcode"] rate_area = f"{line['state']} {line['rate_area']}" if zipcode not in zip_rate_areas: zip_rate_areas[zipcode] = set() zip_rate_areas[zipcode].add(rate_area) return zip_rate_areas
d8405bc466e7bbe949fded4360d4f184024653d2
28,130
def lucas_mod(n, mod): """ Compute n-th element of Fibonacci sequence modulo mod. """ P, Q = 1, -1 x, y = 0, 1 # U_n, U_{n+1}, n=0 for b in bin(n)[2:]: x, y = ((y - P * x) * x + x * y) % mod, (-Q * x * x + y * y) % mod # double if b == "1": x, y = y, (-Q * x + P * y) % mod # add return x
319e74c13c370becc255bbab9d304aa220e69a0d
28,131
def compute_demand_balancing(demand_sector, energy_service, energy_carrier): """aggregates the demand over balancing area and balancing time """ cols = get_group_cols_for_ec(energy_carrier) return compute_demand_with_coarseness(demand_sector, energy_service, energy_carrier, cols)
e70d9f42929ca88478f09ac0e5d5423457108de7
28,132
def get_file_encoding(filename): """ Get the file encoding for the file with the given filename """ with open(filename, 'rb') as fp: # The encoding is usually specified on the second line txt = fp.read().splitlines()[1] txt = txt.decode('utf-8') if 'encoding' in txt: encoding = txt.split()[-1] else: encoding = 'utf-8' # default return str(encoding)
c91a8f71429ed5f6eccd7379c66b4ee4c2d73989
28,133
def alias(name): """Make property given by name be known under a different name""" def get(self): return getattr(self,name) def set(self,value): setattr(self,name,value) return property(get,set)
37539fbb2d413a4964fec09f9c1b40fed203dc34
28,134
def is_feature_class(symbol): """Check whether the symbols imported from a module correspond to classes defining a feature. We assume that the symbol in question defines a feature if it inherits from the FeatureDefinition class. :param symbol: An imported symbol to check :type symbol: object :returns: Whether the symbol defines a feature :rtype: bool """ return isclass(symbol) and issubclass(symbol, FeatureDefinition) and \ symbol is not FeatureDefinition
f7556869105e3a20c05e05560baf56759d08f374
28,136
def overall_sentiment(text): """Function to calculate the overall sentiment using NLTK's vader library. param text: The input text blob being entered by user """ sid = SentimentIntensityAnalyzer() ss = sid.polarity_scores(text) for _ in sorted(ss): if ss["compound"] >= 0.15: return "positive" elif ss["compound"] <= -0.01: return "negative" else: return "neutral"
faf5ba826b6affbb9ee7f972fa55098b681a15ee
28,139
def new_category(blog_id, username, password, category_struct): """ wp.newCategory(blog_id, username, password, category) => category_id """ authenticate(username, password, 'zinnia.add_category') category_dict = {'title': category_struct['name'], 'description': category_struct['description'], 'slug': category_struct['slug']} if int(category_struct['parent_id']): category_dict['parent'] = Category.objects.get( pk=category_struct['parent_id']) category = Category.objects.create(**category_dict) return category.pk
9de1684fca80b7bf1a0190be1f239e2b4cae7428
28,140
def scatt(coords, id, plot_col='w', bckg_col='k', z_dim=0, **kwargs): """ 2D scatter plot Parameters ---------- coords : np.array unit : str id : str plot_col : str, default = 'w' bckg_col : str, default = 'k' z_dim : int, default = 0 axes : AxesSubplot, optional Returns ------- ax : AxesSubplot """ # Handle any other ax = plotKwargs(kwargs, id) if len(coords.shape) == 2: # 2D COORDINATE ARRAY scatter_x = coords[:, 0] scatter_y = coords[:, 1] else: # 3D COORDINATE ARRAY scatter_x = coords[:, 0, z_dim] scatter_y = coords[:, 1, z_dim] ax.set_facecolor(bckg_col) ax.scatter(x=scatter_x, y=scatter_y, s=10, facecolors=plot_col, edgecolors='none') ax.set_aspect('equal') return ax
a59d176e1032c745b9b066fe9dc306c7ecae04a9
28,141
import re def keep_text_characters(text): """ INPUT: text - Python str object - the raw text OUTPUT: text- Python str object - the text after removing all non-text characters """ filtered_tokens = [] tokens = tokenize_text(text) for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) filtered_text = ' '.join(filtered_tokens) return filtered_text
e0da2f6517c1aa2b0f34349583147323a4c17ef7
28,142
def global_access(key, val): """function test""" local = 1 MY_DICT[key] = val for i in val: if i: del MY_DICT[i] continue else: break else: return local
b48b8e86f266abe79f64d665ea73055e133f04ce
28,143
def generate_auxiliar_basis( sett: Settings, auxiliar_basis: str, quality: str) -> Settings: """Generate the `auxiliar_basis` for all the atoms in the `sett`. Use the`quality` of the auxiliar basis provided by the user. """ quality_to_number = {"low": 0, "medium": 1, "good": 2, "verygood": 3, "excellent": 4} kind = sett.cp2k.force_eval.subsys.kind for atom in kind.keys(): index = quality_to_number[quality.lower()] cfit = aux_fit[atom][index] kind[atom]["basis_set"].append(f"AUX_FIT CFIT{cfit}") return sett
178e9f5eee8d192252ad48f6df19dfab670d9f4c
28,144
import pathlib def extract_user_inputs(job: Job): """Extract the various combinations of user inputs for a job.""" # variables to be used while looping through the runs all_inputs = {} job_id = job.id results_folder = pathlib.Path('data', job_id).resolve() # loop through the runs and find all unique user inputs Directory={} #Directory['Simulation Name']=[i.status.inputs.value for i in job.runs if i.status.inputs.name=='_name_'] Directory["File Path"]=[str(results_folder.joinpath(i.id).resolve())+'\\eplusout.sql' for i in job.runs] Directory["Job Path"]=[str(results_folder) for i in job.runs] print(Directory) inp_value = [] for run in job.runs: for inp in run.status.inputs: if inp.name =='_name_' : try: #print(inp.value) Name=inp.value #all_inputs[inp.name].add(inp.value) except KeyError: print("na") inp_value.append(Name) #print(inp_value) Directory['Simulation Name']=inp_value DF=pd.DataFrame.from_dict(Directory) FP=str(results_folder)+"\\Directory.xlsx" DF.to_excel(FP) print(FP) return DF
a0bcd3db9ff90c30b6981c9f76c54531da0e2d49
28,145
def __process_args(args): """ Process the command-line arguments and prompt the user for any missing information :param args: the command-line arguments list :raises CLAException: if an error occurs while validating and processing the command-line arguments """ cla_util = CommandLineArgUtil(_program_name, __required_arguments, __optional_arguments) required_arg_map, optional_arg_map = cla_util.process_args(args, True) __verify_required_args_present(required_arg_map) __process_java_home_arg(optional_arg_map) __process_domain_location_args(optional_arg_map) __process_model_args(optional_arg_map) # # Verify that the domain type is a known type and load its typedef. # domain_type = required_arg_map[CommandLineArgUtil.DOMAIN_TYPE_SWITCH] domain_typedef = DomainTypedef(_program_name, domain_type) optional_arg_map[CommandLineArgUtil.DOMAIN_TYPEDEF] = domain_typedef __process_rcu_args(optional_arg_map, domain_type, domain_typedef) __process_encryption_args(optional_arg_map) combined_arg_map = optional_arg_map.copy() combined_arg_map.update(required_arg_map) model_context = ModelContext(_program_name, combined_arg_map) domain_typedef.set_model_context(model_context) return model_context
8806a4050a1cbcfa549584da4832a1c6e7f7faa6
28,146
from typing import Any def float_converter(value: Any) -> float: """Validator that ensures value is a float.""" if isinstance(value, bool): raise ValueError() if isinstance(value, float): return value elif isinstance(value, int): return float(value) else: raise ValueError()
c46832a017c0d83017e75fa58090a030bc5091c2
28,147
from operator import add def generate_row(world_x, world_rpy=[0.0, 0.0, 0.0], algae_obj_dir=None, algae_texture_dir=None): """Generate a row of algaes using the given world transform""" components = [] buoy_z = 0 rope_y = 0 rope_z = buoy_z + 2 anchoring_buoy_params = { 'west': { 'y_offset': -95, 'rpy': [0, 0, 3.14] }, 'east': { 'y_offset': 95, 'rpy': [0, 0, 0] } } # Add rope rope_world_xyz = [world_x, rope_y, rope_z] rope = generate_static_obj_def(name="Rope", look="black", mesh="$(param rope)", world_xyz=rope_world_xyz, world_rpy=world_rpy) components.append(rope) # Add A0 buoys, 1 per 10 meters for y in range(rope_y - 85, rope_y + 90, 10): a0_buoy = generate_a0_buoy_group(world_xyz=[world_x, y, buoy_z], world_rpy=world_rpy) components.append(a0_buoy) # Add anchoring to the end of the rope for _, param in anchoring_buoy_params.items(): anchoring_buoy = generate_anchoring_group( world_xyz=[world_x, rope_y + param['y_offset'], buoy_z], world_rpy=list(map(add, world_rpy, param['rpy']))) components.append(anchoring_buoy) # Attach algaes to the rope if both algae_obj_dir algae_texture_dir given if algae_obj_dir and algae_texture_dir: algae_data = get_algae_obj_and_textures(algae_obj_dir, algae_texture_dir) algae_row = generate_algae_row(rope_world_xyz=rope_world_xyz, rope_world_rpy=world_rpy, algae_data=algae_data) components.append(algae_row) return '\n'.join(components)
c8ee8afab4d3ca4ebf386fcd0795aad9c7768eef
28,149
def split_chunk_for_display(raw_bytes): """ Given some raw bytes, return a display string Only show the beginning and end of largish (2x CONTENT_CHUNK_SIZE) arrays. :param raw_bytes: :return: display string """ CONTENT_CHUNK_SIZE = 50 # Content repeats after chunks this big - used by echo client, too if len(raw_bytes) > 2 * CONTENT_CHUNK_SIZE: result = repr(raw_bytes[:CONTENT_CHUNK_SIZE]) + " ... " + repr(raw_bytes[-CONTENT_CHUNK_SIZE:]) else: result = repr(raw_bytes) return result
de53bf679552c52f5075aedf9156cdb3b3fc69f7
28,150
def get_cvi(nodes): """ Returns a dictionary whose keys correspond to samples, and whose values are sets of tuples, where the tuples represnts the two characters this sample caused incompatability :param nodes: A list of target nodes, where each node is in the form 'Ch1|Ch2|....|Chn' :return: A dictionary whose keys correspond to samples, and whose values are sets of tuples, where the tuples represnts the two characters this sample caused incompatability for """ compatability_network, cvi = build_incompatability_graph_and_violating_samples(nodes) return cvi
aeae708f8e7cd2bc1a3863dd4e77b00be8c2fc4a
28,151
import torch def warp(x, flo): """ warp an image/tensor (im2) back to im1, according to the optical flow x: [B, C, H, W] (im2) flo: [B, 2, H, W] flow """ B, C, H, W = x.size() # mesh grid xx = torch.arange(0, W).view(1,-1).repeat(H,1) yy = torch.arange(0, H).view(-1,1).repeat(1,W) xx = xx.view(1,1,H,W).repeat(B,1,1,1) yy = yy.view(1,1,H,W).repeat(B,1,1,1) grid = torch.cat((xx,yy),1).float() if x.is_cuda: grid = grid.cuda() vgrid = Variable(grid) + flo # scale grid to [-1,1] vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:]/max(W-1,1)-1.0 vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:]/max(H-1,1)-1.0 vgrid = vgrid.permute(0,2,3,1) output = nn.functional.grid_sample(x, vgrid) mask = torch.autograd.Variable(torch.ones(x.size())).cuda() mask = nn.functional.grid_sample(mask, vgrid) # if W==128: # np.save('mask.npy', mask.cpu().data.numpy()) # np.save('warp.npy', output.cpu().data.numpy()) mask[mask<0.9999] = 0 mask[mask>0] = 1 return output*mask
9f5ab5338f18cf249962454deff996a556f7b2ea
28,153
import logging async def search(username): """ Do Maigret search on a chosen username :return: - list of telegram messages - list of dicts with found results data """ try: results = await maigret_search(username) except Exception as e: logging.error(e) return ['An error occurred, send username once again.'], [] found_exact_accounts = [] general_results = [] general_results.append((username, id_type, results)) report_context = generate_report_context(general_results) save_pdf_report(f"{username}_report.pdf", report_context) for site, data in results.items(): if data['status'].status != QueryStatus.CLAIMED: continue url = data['url_user'] account_link = f'[{site}]({url})' # filter inaccurate results if not data.get('is_similar'): found_exact_accounts.append(account_link) if not found_exact_accounts: return [], [] messages = merge_sites_into_messages(found_exact_accounts) # full found results data results = list(filter(lambda x: x['status'].status == QueryStatus.CLAIMED, list(results.values()))) return messages, results
b0cbebdea838c0a0a422af4ece86c8c0af6a7aff
28,154
def train(frame, columns, mean_centered=True, k=None): """ Creates a PcaModel by training on the given frame Parameters ---------- :param frame: (Frame) A frame of training data. :param columns: (str or list[str]) Names of columns containing the observations for training. :param mean_centered: (bool) Whether to mean center the columns. :param k: (int) Principal component count. Default is the number of observation columns. :return: (PcaModel) The trained PCA model """ if frame is None: raise ValueError("frame cannot be None") tc = frame._tc _scala_obj = get_scala_obj(tc) scala_columns = tc.jutils.convert.to_scala_vector_string(columns) if not isinstance(mean_centered, bool): raise ValueError("mean_centered must be a bool, received %s" % type(mean_centered)) scala_k = tc.jutils.convert.to_scala_option(k) scala_model = _scala_obj.train(frame._scala, scala_columns, mean_centered, scala_k) return PcaModel(tc, scala_model)
057261f8a1fd7d2a02f094d35fb28a5084f5ab47
28,155
def validate_spin(value): """ Validate the value of the spin """ if not isinstance(value, int): raise ValidationError('spin must be integer') if value < -1 or value > 1: raise ValidationError('spin must be among -1, 1 or 0 (undefined)') return value
d651d8ace39e92d324c823ab88a5b0e1a6637760
28,156
def _get_basis_functions(basis, deriv): """ Returns a list of interpolation function for the interpolation definition and derivatives specified by the user. Also returns the number of dimensions as defined in the basis parameter. """ # List of basis functions bsfn_list = { 'L1': [L1, L1d1, L1d1d1], 'L2': [L2, L2d1], 'L3': [L3, L3d1], 'L4': [L4, L4d1], 'H3': [H3, H3d1, H3d1d1], 'T11': [T11], 'T22': [T22], 'T33': [T33, T33d1, T33d2], 'T44': [T44, T44d1, T44d2]} # Set the index of the basis function in BFn from the deriv input di = [] if deriv == None: for bs in basis: di.append(0) else: ind = 0 for bs in basis: if bs[0] == 'T': if deriv[ind:ind+2] == [0, 0]: di.append(0) elif deriv[ind:ind+2] == [1, 0]: di.append(1) elif deriv[ind:ind+2] == [0, 1]: di.append(2) else: raise ValueError( 'Derivative (%d) for %s basis not implemented' % (ind, bs)) ind += 2 else: di.append(deriv[ind]) ind += 1 # Set the basis functions pointers and index in X for each basis in # the basis input dimensions = 0 basis_functions = [] for ind, bs in enumerate(basis): if bs[0] == 'T': if bs in bsfn_list.keys(): basis_functions.append([bsfn_list[bs][di[ind]], [dimensions, dimensions + 1]]) dimensions += 2 else: if bs in bsfn_list.keys(): basis_functions.append([bsfn_list[bs][di[ind]], [dimensions]]) dimensions += 1 return basis_functions, dimensions
1116989fd5d3af74bb0c499e930f98013b3b1b9a
28,157
from typing import List import logging def get_all_loggers() -> List: """Return list of all registered loggers.""" logger_dict = logging.root.manager.loggerDict # type: ignore loggers = [logging.getLogger(name) for name in logger_dict] return loggers
97059a78925ff669a841644b186e39ccd366472d
28,158
from typing import cast def argmax(input, axis=None, dtype='int64', name=None): """Index of the maximum value along an axis. Parameters ---------- input : tensor_like Input tensor. axis : () tensor_like, default=None Axis along which to extract the maximum index. If None, work on the flattened tensor. dtype : str or type, default='int64' Data type of the returned index. name : str, optional Name for the operation. Returns ------- arg : tensor[dtype] or array[dtype] Index of the maximum value along an axis. """ if is_tensor(input, 'tf'): return tf.math.argmax(input, axis=axis, output_type=dtype, name=name) else: input = np.argmax(input, axis=axis) if dtype is not None: input = cast(input, dtype) return input
2c04eaafadd91b19bbd3342ece5ad0a6cefa3cc0
28,159
import string def __clean(contents): """ remove comments from the list 'contents', and remove unnecessary whitespace """ answer = [] for line in contents: cleaned = string.strip(line) if len(cleaned)>0: if cleaned[0] != "#": answer.append(cleaned) return answer
e7c6a1f6a7b065fbc4da9e32f37f9e3b152848b4
28,160
import inspect def _get_extract_params(): """ Utility function for extracting the parameters from a Prophet model. The following attributes are not considered for parameter extraction for the reasons listed: The changepoints attribute is being removed due to the fact that it is a utility NumPy array defining the detected changepoint indexes (datetime values) from the input training data set. There is no utility to recording this for parameter adjustment or historical reference as it is used exclusively internally by Prophet. The changepoints_t attribute is being removed for a similar reason as changepoints is. It is a list of changepoints datetime values that are generated during model training and cross validation that are outside of the control of the end-user. The seasonalities attribute is a collection that is populated during fit of the model and is not a parameter that can be adjusted directly by the user. stan_fit is a result of the underlying solver's iterations for fitting. It is non-useful for collection as a model parameter as it cannot be direclty changed by the user. params are additional attributes set during training that are not directly controllable by the user. history and history_dates are extracted copies of the underlying training data used during training of the model and to generate certain plots from within the library. train_component_cols are generated during training as part of the trend decomposition. A user has no ability to modify this behavior. :return: A list of parameters to extract from a Prophet model for model tracking purposes. """ denylist = { "changepoints", "changepoints_t", "seasonalities", "stan_fit", "params", "history", "history_dates", "train_component_cols", } prophet_signature = [ attr for attr, value in inspect.getmembers(Prophet()) if not callable(value) and not attr.startswith("_") and not attr in denylist ] return prophet_signature
00b9c6526be2cc78766426386d443f3500e8668c
28,161
from typing import Mapping def deep_update(target, source): """Update a nested dictionary with another nested dictionary.""" for key, value in source.items(): if isinstance(value, Mapping): target[key] = deep_update(target.get(key, {}), value) else: target[key] = value return target
01ca285ed907850b17490290c6eb92f931ed537a
28,162
from typing import Callable def distmat(func: Callable, x: np.ndarray, y: np.ndarray) -> np.ndarray: """distance matrix""" return jax.vmap(lambda x1: jax.vmap(lambda y1: func(x1, y1))(y))(x)
0c09289e795f9f90af1a3d0363eefb82eebe6822
28,164
def feature_length(attribute_name_var): """ Return number of a attribute values """ attribute = attrDictionary[attribute_name_var] attribute_index, attribute_values = attribute return len(attribute_values)
4aa5816d38dac02cc528d6d5ee528fcaf5e39b4a
28,165
def iso2_to_country_name(country_2_code): """Convert country code to country name. """ if country_2_code not in COUNTRY_ALPHA2_TO_COUNTRY_NAME: print(country_2_code, 'NOT FOUND in iso2 to country_name') raise KeyError return COUNTRY_ALPHA2_TO_COUNTRY_NAME[country_2_code]
4c3e5f46a8c7aac6851a1d304c89e298fcc31a71
28,166
from typing import List from typing import Dict def get_related_objects(api_key: str, obj: MISPObject, rel: str, disable_output: bool = False) -> List[Dict]: """Gets related objects from VT.""" if obj.name == "file": vt_id = obj.get_attributes_by_relation("sha256")[0].value else: print_err("[REL] Currently only file objects are supported.") return [] if not disable_output: print(f"[REL] Receiving {rel} for {vt_id}...") with VTClient(api_key) as client: res = client.get(f"/files/{vt_id}/{rel}?limit=40").json() if "error" in res: print_err(f"[REL] Error during receiving related objects: {res['error']}.") return [] related_objects = [] for related_object in res.get("data", []): if "error" in related_object: print_err(f"[REL] File {related_object['id']} not available on VT.") else: related_objects.append(related_object) if not disable_output: print(f"[REL] Got {len(related_objects)} {rel} objects.") return related_objects
5c0c36febc2f5c79c98496b835dbb2e8e5b564e6
28,167
def _take_along_axis(arr, indices, axis): """Implements a simplified version of np.take_along_axis if numpy version < 1.15""" if np_version >= parse_version('1.15'): return np.take_along_axis(arr=arr, indices=indices, axis=axis) else: if axis is None: arr = arr.flatten() if not np.issubdtype(indices.dtype, np.intp): raise IndexError('`indices` must be an integer array') if arr.ndim != indices.ndim: raise ValueError( "`indices` and `arr` must have the same number of dimensions") shape_ones = (1,) * indices.ndim dest_dims = ( list(range(axis)) + [None] + list(range(axis+1, indices.ndim)) ) # build a fancy index, consisting of orthogonal aranges, with the # requested index inserted at the right location fancy_index = [] for dim, n in zip(dest_dims, arr.shape): if dim is None: fancy_index.append(indices) else: ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:] fancy_index.append(np.arange(n).reshape(ind_shape)) fancy_index = tuple(fancy_index) return arr[fancy_index]
b7eb8c49503e6364694af3becadf7faba5814f68
28,168
def moveeffect_022(score: int, move: Move, user: Pokemon, target: Pokemon, battle: AbstractBattle) -> int: """ Move Effect Name: Increases user evasion by one stage """ if move.category == MoveCategory.STATUS: if user.boosts.get("evasion", 0) == 6: score -= 90 else: score -= 10 * user.boosts.get("evasion", 0) else: if user.boosts.get("evasion", 0) < 0: score += 20 return score
bde07c2101132d44e78bb9877689d7ffc36058e7
28,169
def chan_freq(header, fine_channel, tdwidth, ref_frame): """ Args: header: fine_channel: tdwidth: ref_frame: Returns: """ fftlen = header['NAXIS1'] chan_index = fine_channel - (tdwidth-fftlen)/2 chanfreq = header['FCNTR'] + (chan_index - fftlen/2)*header['DELTAF'] #/* apply doppler correction */ if ref_frame == 1: chanfreq = (1 - header['baryv']) * chanfreq return chanfreq
b7cb67d2d7b21a475fdaddee47ebbd6d112125f8
28,170
import torch from typing import Any from typing import Dict def extract_logger_info( model_a: torch.nn.Module, model_b: torch.nn.Module, model_name_to_use_for_layer_names: str, ) -> Any: """ Extracts intermediate activations from model_a and model_b. """ model_name_a, results_a = _extract_logger_info_one_model(model_a) model_name_b, results_b = _extract_logger_info_one_model(model_b) assert len(results_a) == len(results_b), 'results length mismatch' results: Dict[str, Any] = {} if len(results_a) == 0: return results for op_idx in range(len(results_a[0])): # currently using global_idx for layer_name layer_name = ( results_a[0][op_idx][0] if model_name_to_use_for_layer_names == model_name_a else results_a[0][op_idx][0]) values_a = [results_a[forward_idx][op_idx][3] for forward_idx in range(len(results_a))] values_b = [results_b[forward_idx][op_idx][3] for forward_idx in range(len(results_b))] node_output = { model_name_a: [{ 'type': 'node_output', 'values': values_a, 'ref_node_target_type': str(results_a[0][op_idx][2]), 'fqn': str(results_a[0][op_idx][1]), 'index_of_arg': 0, 'index_within_arg': 0, }], model_name_b: [{ 'type': 'node_output', 'values': values_b, 'ref_node_target_type': str(results_b[0][op_idx][2]), 'fqn': str(results_b[0][op_idx][1]), 'index_of_arg': 0, 'index_within_arg': 0, }], } results[layer_name] = { 'node_output': node_output, } return results
73872f1a3083373a8767ca60668479e24b0aa240
28,171
def make_masked_msa(protein, config, replace_fraction): """Create data for BERT on raw MSA.""" # Add a random amino acid uniformly random_aa = tf.constant([0.05] * 20 + [0., 0.], dtype=tf.float32) categorical_probs = ( config.uniform_prob * random_aa + config.profile_prob * protein['hhblits_profile'] + config.same_prob * tf.one_hot(protein['msa'], 22)) # Put all remaining probability on [MASK] which is a new column pad_shapes = [[0, 0] for _ in range(len(categorical_probs.shape))] pad_shapes[-1][1] = 1 mask_prob = 1. - config.profile_prob - config.same_prob - config.uniform_prob assert mask_prob >= 0. categorical_probs = tf.pad( categorical_probs, pad_shapes, constant_values=mask_prob) sh = shape_helpers.shape_list(protein['msa']) mask_position = tf.random.uniform(sh) < replace_fraction bert_msa = shaped_categorical(categorical_probs) bert_msa = tf.where(mask_position, bert_msa, protein['msa']) # Mix real and masked MSA protein['bert_mask'] = tf.cast(mask_position, tf.float32) protein['true_msa'] = protein['msa'] protein['msa'] = bert_msa return protein
47d3f40c27c6a6722ac7a8cab21844decdd21a0f
28,172
from winguhub.settings import HTTP_SERVER_ROOT def get_httpserver_root(): """ Construct wingufile httpserver address and port. Returns: Constructed httpserver root. """ assert HTTP_SERVER_ROOT is not None, "SERVICE_URL is not set in ccnet.conf." return HTTP_SERVER_ROOT
5327f05fdbdbd062435c90d51cb8ee1667199e9f
28,173
def csys_torusv1_to_xyz_jacobian(v: np.ndarray, rho_0: float, rho_1: float, aspect: float = 1) -> np.ndarray: """ Compute Jacobian matrix of the toroidal coordinate system with respect to the Cartesian coordinate system. :param v: Toroidal coordinates (r, psi, phi) :param rho_0: Torus Radius (major) :param rho_1: Torus Radius (minor) :param aspect: Cross section aspect ratio (a / b) :return: Jacobian matrix """ dr = np.array([ -rho_1 * np.sin(v[1] / 2) * np.cos(v[2]) * np.cos(v[1]), rho_1 * np.sin(v[1] / 2) * np.cos(v[2]) * np.sin(v[1]), rho_1 * np.sin(v[1] / 2) * np.sin(v[2]) / aspect ]) dpsi = np.array([ rho_0 * np.sin(v[1]) + v[0] * rho_1 * np.sin(v[1] / 2) * np.cos(v[2]) * np.sin(v[1]) - 0.5 * v[ 0] * rho_1 * np.cos( v[1] / 2) * np.cos(v[2]) * np.cos(v[1]), rho_0 * np.cos(v[1]) + v[0] * rho_1 * np.sin(v[1] / 2) * np.cos(v[2]) * np.cos(v[1]) + 0.5 * v[ 0] * rho_1 * np.cos( v[1] / 2) * np.cos(v[2]) * np.sin(v[1]), v[0] * rho_1 / 2 / aspect * np.cos(v[1] / 2) * np.sin(v[2]) ]) dphi = np.array([ v[0] * rho_1 * np.sin(v[1] / 2) * np.sin(v[2]) * np.cos(v[1]), -v[0] * rho_1 * np.sin(v[1] / 2) * np.sin(v[2]) * np.sin(v[1]), v[0] * rho_1 * np.sin(v[1] / 2) * np.cos(v[2]) / aspect ]) return np.array([ [dr[0], dpsi[0], dphi[0]], [dr[1], dpsi[1], dphi[1]], [dr[2], dpsi[2], dphi[2]] ])
e402a0fbc6f8f33ccc556ab9eeaeee323f3fde99
28,174
def normalize_answer(s): """ Lower text and remove punctuation, articles and extra whitespace. """ def remove_articles(text): return re_art.sub(' ', text) def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): return re_punc.sub(' ', text) # convert punctuation to spaces def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s))))
1146af739b33992aae551a0b1ab5059c0d645608
28,175
import typing def intensity(pixel: typing.Tuple[int, int, int]) -> int: """Sort by the intensity of a pixel, i.e. the sum of all the RGB values.""" return pixel[0] + pixel[1] + pixel[2]
5ba060d409a2f0df148cdc50684b80f920cf314f
28,176
def load_statistics( input_path): """Loads data statistics proto from file. Args: input_path: Data statistics file path. Returns: A DatasetFeatureStatisticsList proto. """ serialized_stats = tf.python_io.tf_record_iterator(input_path).next() result = statistics_pb2.DatasetFeatureStatisticsList() result.ParseFromString(serialized_stats) return result
f7c094e79e7f6c8654aa703cd75da62164f9d421
28,177
def plot_grid(data): """ Plot a grid of heatmaps showing performance for each tile pairing. """ def _draw_heatmap(*args, **kwargs): """ Draw a heatmap showing flops for (ch_vect x feat_vect). """ data = kwargs.pop('data') reshaped = pd.pivot_table( data, index='ch_vect', columns='feat_vect', values='items_per_second') reshaped = reshaped.sort_index(ascending=False, axis=0) sns.heatmap(reshaped, **kwargs) def _get_title(prefix, data): main_title = '{}Flops for different tile sizes and vector sizes'.format( prefix) return get_conv_title(data, main_title) if (len(data.index) == 0): print("Skipping plot, as dataframe is empty") return vmin = data['items_per_second'].min() vmax = data['items_per_second'].max() div, fmt, prefix = get_readable_float_fmt_string(vmin, vmax) tile_row_order = sorted(data['tile_rows'].unique(), reverse=True) tile_col_order = data['tile_cols'].unique().sort() scaled_data = data.copy() scaled_data['items_per_second'] = scaled_data['items_per_second'] / div scaled_vmin = vmin / div scaled_vmax = vmax / div fg = sns.FacetGrid( scaled_data, row='tile_rows', col='tile_cols', row_order=tile_row_order, col_order=tile_col_order, margin_titles=True) fg.map_dataframe( _draw_heatmap, annot=True, fmt=fmt, cmap='YlGnBu', vmin=scaled_vmin, vmax=scaled_vmax, cbar=False) fg.set_titles( template='Tile size: {row_name}x{col_name}', row_template='{row_name} rows per tile', col_template='{col_name} cols per tile') fg.set_axis_labels('Feature vectors', 'Channel vectors') fg.fig.subplots_adjust(top=0.9, hspace=0.25, wspace=0.15) fg.fig.suptitle(_get_title(prefix, data)) return fg
834472a852b98916f9a9507bc56dde5bb71c55ec
28,178
from datetime import datetime from typing import Optional def get_national_holiday(day: datetime.date) -> Optional[Holiday]: """ For a given date, return the national holiday object if the particular date is a Czech national holiday; return None otherwise """ for holiday in Holidays(year=day.year): if day == holiday: return holiday return None
8cc2b83a16e6a07fc8e5f1c8cc95bec66bf8ffe5
28,179
def robust_topological_sort(graph): """ First identify strongly connected components, then perform a topological sort on these components. graph should be a dictionary mapping node names to lists of successor nodes. """ components = strongly_connected_components(graph) node_component = { } for component in components: for node in component: node_component[node] = component component_graph = { } for component in components: component_graph[component] = [ ] for node in graph: node_c = node_component[node] for successor in graph[node]: successor_c = node_component[successor] if node_c != successor_c: component_graph[node_c].append(successor_c) return topological_sort(component_graph)
3ecb7dbeea4f4b9f2d205472ee2c1a59465b09ea
28,180
from typing import List def similar_consonants_to(phonet_1: UnmarkableConsonant) -> List[Phonet]: """ a list of vowels that are similar to a specified consonant. """ vocal_folds: List[VocalFolds] = similar_in_voice(phonet_1.vocal_folds) place: List[Place] = similar_in_place(phonet_1.place) manner: List[Manner] = similar_in_manner(phonet_1.manner) airstream: List[Airstream] = similar_in_airstream(phonet_1.airstream) secondary_articulation: List[ SecondaryArticulation ] = similar_in_secondary_articulation(phonet_1.secondary_articulation) return [ Consonant(v, p, m, a, s) for p in place for v in vocal_folds for m in manner for a in airstream for s in secondary_articulation ]
168603fff454bb6ba9cb29a051ec674547720344
28,181
def error_handler(error): """Render errors as JSON""" if not all(hasattr(error, attr) for attr in ("code", "name", "description")): LOGGER.exception("An error occured: %r", error) error = InternalServerError() resp = { "code": error.code, "name": error.name, "description": error.description } return jsonify(resp), resp["code"]
4fd4834254e78503817e621cafc14e8d477b3bad
28,182
def get_encoded_len_value(typename, package_specs): """ Get the encoded size of a given type on the wire """ if typename in PRIMITIVE_TYPES: return PRIMITIVE_SIZES[typename] encoded_len = 0 for package in package_specs: for msg in package.definitions: if get_v4_typename(msg.identifier) == typename: for f in msg.fields: encoded_len = encoded_len + get_field_encoded_len(f, package_specs) return encoded_len
0d11c409fb760a317b456d0397876a97cf43a593
28,183
def parsestream(stream, encoding=None): """Parses sql statements from file-like object. :param stream: A file-like object. :param encoding: The encoding of the stream contents (optional). :returns: A generator of :class:`~sqlparse.sql.Statement` instances. """ stack = engine.FilterStack() stack.full_analyze() return stack.run(stream, encoding)
90c0cb061f1cdd370b5c31b8326fc80be16f310d
28,184
import torch def apply_hardmask_with_map(input, attention, mask): """ Apply any number of attention masks over the input. input: [batch, num_objs, visual_features] = [b, o ,v] attention: [batch, num_objs, glimpses] = [b, o ,g] return: masked_input, masked_weight """ b, o, v = input.shape # remain the lower att-weights mask_map = torch.ones_like(attention) # [b, o, g] mask_val, mask_idx = attention.topk(mask, dim=1) # [b, m, g] mask_map = mask_map.scatter(1, mask_idx, 0.0) # [b, o, g] return input * mask_map # attention = attention * mask_map # [b, o, g] # input = input.unsqueeze(2) # [b, o, 1, v] # attention = attention.unsqueeze(-1) # [b, o, g, 1] # weighted = attention * input # [b, o, g, v] # weighted_mean = weighted.sum(dim=1) # [b, g, v] # return weighted_mean.view(b, -1), mask_idx
3dd40399e42a02770351cd772feb89ed7b2dd16b
28,185
def _make_merge_query(increment_table_id, target_table_id): """merge incremental_load to existing target_table""" client = bigquery.Client() #try ge target table columns for merge, if not possible then it means it does not exist so just copy instead try: table = client.get_table(target_table_id) columns_list = list(c.name for c in table.schema) columns = ','.join(columns_list) #print(columns_list) except: return None merge_query = f""" delete from `{target_table_id}` where date in (select distinct date from `{increment_table_id}`); insert into `{target_table_id}` ({columns}) select {columns} from `{increment_table_id}`; """ print("Merge_query:" , merge_query) return merge_query
f42f895268e5417c904c2d2c40c7f4d75306c8d2
28,186
def delete_alert(id): """ delete an alert and associated thread from the database """ alert = Alert.query.get(id) if alert is None: return {'message': 'Alert ' + str(id) + ' does not exist'}, 404 # delete associated thread delete_thread(id) # delete alert db.session.delete(alert) db.session.commit() return 'Alert ' + str(id) + ' deleted'
414e448d9b0b256342d13baea0e316cbddf007aa
28,187
def data_file_read_calltree(filename): """ Extracts the calltree of a fuzzer from a .data file. This is for C/C++ files """ read_tree = False function_call_depths = [] tmp_function_depths = { 'depth' : -2, 'function_calls' : [] } with open(filename, "r") as flog: for line in flog: line = line.replace("\n", "") if read_tree and "======" not in line: stripped_line = line.strip().split(" ") # Type: {spacing depth} {target filename} {line count} if len(stripped_line) == 3: filename = stripped_line[1] linenumber = int(stripped_line[2].replace("linenumber=","")) else: filename = "" linenumber=0 space_count = len(line) - len(line.lstrip(' ')) depth = space_count / 2 curr_node = { 'function_name' : stripped_line[0], 'functionSourceFile' : filename, 'depth' : depth, 'linenumber' : linenumber} if tmp_function_depths['depth'] != depth: if tmp_function_depths['depth'] != -2: function_call_depths += list(sorted(tmp_function_depths['function_calls'], key=lambda x: x['linenumber'])) tmp_function_depths = { 'depth' : depth, 'function_calls' : [] } tmp_function_depths['function_calls'].append(curr_node) #function_call_depths.append(curr_node) if "====================================" in line: read_tree = False if "Call tree" in line: read_tree = True # Add the remaining list of nodes to the overall list. tmp_function_depths['function_calls'] += list(sorted(tmp_function_depths['function_calls'], key=lambda x: x['linenumber'])) return function_call_depths
10d876a8aa585a767f1939434edc018e5c44404d
28,188
def bidirected(DAG): """Return a graph with an opposing edge added for every edge in DAG.""" return skeleton(DAG).to_directed()
8ca8d79f9d5fb6d66543b91004e83854b5b9d57e
28,189
def get_census_data(independent_var, dependent_var): """ Returns a dict containing the data for the requested variables in a table format :param census_vars: the chosen variable :return: """ # Adjust the names of the variables for querying if dependent_var == PAYPEREMP: dependent_var = PAYANN elif dependent_var == EMPPERFIRM: dependent_var = FIRMPDEMP year_2014 = 2014 bus_2014 = get_business_data(year_2014, independent_var, dependent_var) # print(bus_2014) df_bus_2014 = get_census_dataframe(year_2014, bus_2014, independent_var, dependent_var) print(df_bus_2014) """ Fix 2012 code once done with 2014 year_2012 = 2012 bus_2012 = get_business_data(year_2012, list(census_vars)) df_bus_2012 = get_census_dataframe(bus_2012, year_2012) # Combine the data merged_df = df_bus_2014.merge(df_bus_2012, on="GROUP", suffixes=suffixes) """ return df_bus_2014.to_json()
9dd13284ab28aefa93d5caa8940208211eeb5bbc
28,191
def generate_chirp_exp(dur, freq_start, freq_end, Fs=22050): """Generation chirp with exponential frequency increase Notebook: C1/C1S3_Dynamics.ipynb Args: dur (float): Length (seconds) of the signal freq_start (float): Start frequency of the chirp freq_end (float): End frequency of the chirp Fs (scalar): Sampling rate (Default value = 22050) Returns: x (np.ndarray): Generated chirp signal t (np.ndarray): Time axis (in seconds) freq (np.ndarray): Instant frequency (in Hz) """ N = int(dur * Fs) t = np.arange(N) / Fs freq = np.exp(np.linspace(np.log(freq_start), np.log(freq_end), N)) phases = np.zeros(N) for n in range(1, N): phases[n] = phases[n-1] + 2 * np.pi * freq[n-1] / Fs x = np.sin(phases) return x, t, freq
c0c9b7b374fbef62e73fab3bb97caaca11388a15
28,192
def get_geoId(row: str, geo_map: dict) -> str: """ Dataframe utility function to map DC geoId based on latitude, longitude """ loc = f"{str(row['Latitude'])},{str(row['Longitude'])}" try: return ', '.join(geo_map[loc]) except: print(f"{loc} -- does not exist in the map")
a06edaaf6b7c848f4580bdd78e5514b65c23bf16
28,193
import shutil def update_cm_working_file(source_file, target): """Copies source file to target and stages in git. Returns True on success.""" print('<<Copy {} to {}>>'.format(source_file, target)) try: shutil.copyfile(source_file, target) except OSError as ex: print('Exception copying file: {}'.format(ex)) return False return run_command(['git', 'add', target], cwd=CM_WORKING_DIR, uid=CM_OWNER_UID)
2ec65b077a9c1954071f4daf4082c4eabc245252
28,195
def farthest(pts, xlims, ylims, n): """ Find the 'n' points that lie farthest from the points given in the region bounded by 'xlims' and 'ylims'. 'pts' is an array of points. 'xlims' and 'ylims are tuples storing the maximum and minimum values to consider along the x and y axes.""" # There are a ton of ways to do this, this is a shorter one. # The 'inside' function tests whether or not a point is on # the interior of the given square. ins = lambda pt: inside(pt, xlims, ylims) # Construct the Voronoi diagram. V = st.Voronoi(pts) # Construct the KD Tree. KD = st.cKDTree(pts) # Now we'll construct a list of tuples where the first # entry is the distance from a point to the nearest node # and the second entry is a tuple with the coordinates for the point. # Process the vertices of the Voronoi diagram. Q = [(KD.query(pt)[0], pt) for pt in V.vertices if ins(pt)] # Process the intersections of the edges of the # Voronoi diagram and the edges of the box. Q += [(KD.query(pt)[0], pt) for pt in edge_intersections(V, xlims, ylims)[0]] # Process the corners of the box. Q += [(KD.query(pt)[0], (x, y)) for x in xlims for y in ylims] # Return the 'n' points with farthest distance from the points # used to generate the Voronoi diagram. return np.array([pair[1] for pair in hq.nlargest(n, Q)])
708d8ac66da7bc478c9df64be70f0fccc5b5f28d
28,196
def vis_ss_barplot(m, s, sample, hyperparams, stats=None, t_on=None, t_off=None, with_ss=True, with_params=True, voltage_trace=None, test_idx=None, case=None, title=None, date_today=None, counter=0, save_fig=False, legend_offset=0.0, axss=None, axmemparams=None, axsynparams=None, max_stats=None, min_stats=None, mem_dimensions=None, mode='13D', mode_for_membrane_height=None, labels_=True, color_input='k', stat_mean=None, stat_std=None, scale_bar=True, stat_scale=None, current_col='g', max_conds=None, min_conds=None, legend=True, ss_names=True, param_names=True): """ Based on vis_sample. Is called when the pdf should be shown next ot the sample. :param m: generator object, from m = netio.create_simulators(params)[0] :param s: summstat object, from s = netio.create_summstats(params) :param sample: membrane/synaptic conductances :param t_on: :param t_off: :param with_ss: bool, True if bars for summary stats are wanted :param with_params: bool, True if bars for parameters are wanted :return: figure object """ # Hyperparameters for plotting font_size=8.0 # fontsize of the labels col_bar = color_input # color of the bars for summstats and conductances col_minmax = color_input # color of the horizontal line indicating the max and min value of summstats and conds col_shade = color_input # color of the shade between the max and min values values_each = 100 # not so important. How many values we evaluate for the max and min values indicator_fraction = 0.8 # breath of the horizontal bars for max and min, should be within [0,1] opacity = 0.5 # opacity of the shade width = 0.35 # the width of the bars neuron_labels = ['AB/PD', 'LP', 'PY'] # labels for the legends scale_bar_breadth = 1000 scale_bar_voltage_breadth = 50 plot_bars=False params = sample stats_nan = deepcopy(stats) bar_scaling_factors = np.asarray([[1.0, 100.0, 100.0, 10.0, 100.0, 1.0, 10000, 10000], [1.0, 100.0, 100.0, 10.0, 100.0, 1.0, 10000, 10000], [1.0, 100.0, 100.0, 10.0, 100.0, 1.0, 10000, 10000]]) bar_vals = bar_scaling_factors[np.asarray(hyperparams.use_membrane)] if mem_dimensions is not None: params_trunc = params[mem_dimensions].tolist() params_trunc += params[-7:].tolist() bar_vals = bar_vals[mem_dimensions] params = np.asarray(params_trunc) if with_ss: lticks = np.arange(len(stats)) if stat_scale is None: stats[8:] *= 2000 min_stats_scaled = deepcopy(min_stats) max_stats_scaled = deepcopy(max_stats) if stat_scale is None: min_stats_scaled[8:] = min_stats_scaled[8:] * 2000 max_stats_scaled[8:] = max_stats_scaled[8:] * 2000 if plot_bars: axss.bar(lticks + width / 2, stats, width, color=col_bar) end_of_time_axis = len(stats) - 1 + width full_time = np.linspace(width / 2 - 0.5, end_of_time_axis + 0.5 - width / 2, values_each * len(stats)) full_min_ss = np.tile(min_stats_scaled, (values_each, 1)) full_min_ss = full_min_ss.flatten(order='F') full_max_ss = np.tile(max_stats_scaled, (values_each, 1)) full_max_ss = full_max_ss.flatten(order='F') for k in range(len(stats)): start_t = int(values_each * k + (1 - indicator_fraction) / 2 * values_each) end_t = int(values_each * (k + 1) - (1 - indicator_fraction) / 2 * values_each) time_diff = end_t - start_t axss.plot(full_time[start_t:end_t][::time_diff-1], full_min_ss[start_t:end_t][::time_diff-1], c=col_minmax) axss.plot(full_time[start_t:end_t][::time_diff-1], full_max_ss[start_t:end_t][::time_diff-1], c=col_minmax) axss.fill_between(full_time[start_t:end_t][::time_diff-1], full_min_ss[start_t:end_t][::time_diff-1], full_max_ss[start_t:end_t][::time_diff-1], facecolor=col_shade, alpha=opacity) if labels_: axss.text(0.33, -0.68, 'Summary statistics', fontsize=font_size, transform=axss.transAxes) axss.text(0.322, -0.80, '[st. dev. of samples]', fontsize=font_size, transform=axss.transAxes) nan_pos = np.where(np.isnan(stats_nan))[0] if stat_scale is not None: axss.scatter(nan_pos+width/2, 1.7*np.ones_like(nan_pos), c=col_minmax, s=25.0, zorder=2, marker='x') else: axss.scatter(nan_pos + width / 2, 1900 * np.ones_like(nan_pos), c=col_minmax, s=25.0, zorder=2, marker='x') # add some text for labels, title and axes ticks names = [] for num in range(15): names.append(get_summ_stat_name_text(num)) #axss.set_yticks([-4, -2, 0, 2, 4]) axss.set_yticks([-2, -1, 0, 1, 2]) #axss.set_yticklabels([r'$-4 \sigma$', '$-2 \sigma$', '0', '$2 \sigma$', '$4 \sigma$']) axss.set_yticklabels(['$\mathdefault{-2} \sigma$', '$\mathdefault{-}\sigma$', '0', '$\sigma$', '$\mathdefault{2} \sigma$']) axss.set_xticks(lticks + width / 2) if ss_names: axss.set_xticklabels(names, rotation='vertical', fontsize=font_size) else: axss.axes.get_xaxis().set_visible(False) #axss.axes.get_yaxis().set_visible(False) axss.xaxis.set_tick_params(labelsize=font_size) axss.yaxis.set_tick_params(labelsize=font_size) if stat_scale is not None: axss.set_ylim([-2.0, 2.0]) else: axss.set_ylim([-450, 2100]) axss.spines['right'].set_visible(False) axss.spines['top'].set_visible(False) axss.tick_params(width=2.0 * 0.666, length=5.0 * 0.666) #axss.get_xaxis().set_tick_params( # which='both', direction='out', labelsize=font_size*3) sns.set(style="ticks", font_scale=1) sns.despine() if save_fig: plt.savefig('../../thesis_results/pdf/'+date_today+'_sample_prinz_'+case+'_{}_{}.pdf'.format(test_idx[0], counter), bbox_inches='tight') plt.savefig('../../thesis_results/png/'+date_today+'_sample_prinz_'+case+'_{}_{}.png'.format(test_idx[0], counter), bbox_inches='tight') plt.savefig('../../thesis_results/svg/'+date_today+'_sample_prinz_'+case+'_{}_{}.svg'.format(test_idx[0], counter), bbox_inches='tight') if axmemparams is not None and axss is not None: return axss, axmemparams, axsynparams elif axss is not None: return axss elif axmemparams is not None: return axmemparams, axsynparams
ad374ff232505579f9a557d26553e9e860147d29
28,197
import torch def delta_to_binary_mask(delta, k, hop_size, sample_rate, return_indeces=False): """returns a binary mask indicating the k segments with the largest norm / magnitude """ delta_factorization = TimeFrequencyTorchFactorization(delta, frequency_segments=4, temporal_segmentation_params=TEMPORAL_SEGMENTS, hop_length=hop_size, target_sr=sample_rate, baseline="zero") # for this we use the TimeFrequencyTorchFactorization and fill it with ones and baseline 'zero', # because then retrieving segments will result in a binary mask # and we can ensure that it uses exactly the same "grid" for segmentation as we use for the LIME # algorithm binary_factorization = TimeFrequencyTorchFactorization(torch.ones_like(delta), frequency_segments=4, temporal_segmentation_params=TEMPORAL_SEGMENTS, hop_length=hop_size, target_sr=sample_rate, baseline="zero") segment_values = [] for i in range(binary_factorization.get_number_components()): single_components = delta_factorization.retrieve_components([i]) segment_values.append((single_components ** 2).sum()) ranked = np.argsort(segment_values) largest_indices = ranked[::-1][:k] binary_mask = binary_factorization.retrieve_components(largest_indices) if return_indeces: return binary_mask, largest_indices return binary_mask
fb8eb2234565a8247df24588a77a9c6d3fe302d2
28,199
def get_patron_pid(record): """Get patron_pid from existing users.""" user = get_user_by_legacy_id(record["id_crcBORROWER"]) if not user: # user was deleted, fallback to the AnonymousUser anonym = current_app.config["ILS_PATRON_ANONYMOUS_CLASS"]() patron_pid = str(anonym.id) else: patron_pid = user.pid return str(patron_pid)
e2ba9102052c0ce84f7fc04fdc5be97a6050634c
28,200
import PySide.QtGui as QtGui import PyQt5.QtWidgets as QtWidgets def get_QGroupBox(): """QGroupBox getter.""" try: return QtGui.QGroupBox except ImportError: return QtWidgets.QGroupBox
9c0333c9c8a4fafd1c4f4f5739546aaa19a6e321
28,201
import collections def update_dict(orig_dict: dict, new_dict: collections.abc.Mapping) -> dict: """Updates exisitng dictionary with values from a new dictionory. This function mimics the dict.update() function. However, it works for nested dictionories as well. Ref: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth Args: orig_dict: The original dictionory to insert items to. new_dict: The new dictionory to insert items from. Returns: The updated dictionory. """ for keyname, value in new_dict.items(): if isinstance(value, collections.abc.Mapping): orig_dict[keyname] = update_dict(orig_dict.get(keyname, {}), value) else: orig_dict[keyname] = value return orig_dict
e176f4cf293be67aece6e9811fe75de679bb1e94
28,202