content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def ask_daemon_sync(view, ask_type, ask_kwargs, location=None): """Jedi sync request shortcut. :type view: sublime.View :type ask_type: str :type ask_kwargs: dict or None :type location: type of (int, int) or None """ daemon = _get_daemon(view) return daemon.request( ask_type, ask_kwargs or {}, *_prepare_request_data(view, location) )
665a302445c4661d3e5610914bde688cd4512968
30,534
import logging import time def _etl_epacems(etl_params, datapkg_dir, pudl_settings, ds_kwargs): """Extract, transform and load CSVs for EPA CEMS. Args: etl_params (dict): ETL parameters required by this data source. datapkg_dir (path-like): The location of the directory for this package, wihch will contain a datapackage.json file and a data directory in which the CSV file are stored. pudl_settings (dict) : a dictionary filled with settings that mostly describe paths to various resources and outputs. Returns: list: Names of PUDL DB tables output by the ETL for this data source. """ epacems_dict = pudl.etl._validate_params_epacems(etl_params) epacems_years = epacems_dict['epacems_years'] epacems_states = epacems_dict['epacems_states'] # If we're not doing CEMS, just stop here to avoid printing messages like # "Reading EPA CEMS data...", which could be confusing. if not epacems_states or not epacems_years: logger.info('Not ingesting EPA CEMS.') # NOTE: This a generator for raw dataframes epacems_raw_dfs = pudl.extract.epacems.extract( epacems_years, epacems_states, Datastore(**ds_kwargs)) # NOTE: This is a generator for transformed dataframes epacems_transformed_dfs = pudl.transform.epacems.transform( epacems_raw_dfs=epacems_raw_dfs, datapkg_dir=datapkg_dir) logger.info("Loading tables from EPA CEMS into PUDL:") if logger.isEnabledFor(logging.INFO): start_time = time.monotonic() epacems_tables = [] # run the cems generator dfs through the load step for transformed_df_dict in epacems_transformed_dfs: pudl.load.csv.dict_dump(transformed_df_dict, "EPA CEMS", datapkg_dir=datapkg_dir) epacems_tables.append(list(transformed_df_dict.keys())[0]) if logger.isEnabledFor(logging.INFO): delta_t = time.strftime("%H:%M:%S", time.gmtime( time.monotonic() - start_time)) time_message = f"Loading EPA CEMS took {delta_t}" logger.info(time_message) start_time = time.monotonic() return epacems_tables
5e9b951205c8e5d50d8b07f5b7661fcbc4595a80
30,535
def GetNvccOptions(argv): """Collect the -nvcc_options values from argv. Args: argv: A list of strings, possibly the argv passed to main(). Returns: 1. The string that can be passed directly to nvcc. 2. The leftover options. """ parser = ArgumentParser() parser.add_argument('-nvcc_options', nargs='*', action='append') args, leftover = parser.parse_known_args(argv) if args.nvcc_options: options = _update_options(sum(args.nvcc_options, [])) return (['--' + a for a in options], leftover) return ([], leftover)
bb143edb6099eb6182fe7b79e53422321cb3e03d
30,536
import json def show_node(request, name='', path='', revision=''): """ View for show_node page, which provides context for show_node.html Shows description for yang modules. :param request: Array with arguments from webpage data submition. :param module: Takes first argument from url if request does not contain module argument. :param path: Path for node. :param revision: revision for yang module, if specified. :return: returns context for show_node.html """ alerts = [] context = dict() try: if not revision: revision = get_latest_mod(name) revision = revision.split('@')[1] query = json.load(open('search/templates/json/show_node.json', 'r')) query['query']['bool']['must'][0]['match_phrase']['module.keyword']['query'] = name query['query']['bool']['must'][1]['match_phrase']['path']['query'] = path query['query']['bool']['must'][2]['match_phrase']['revision']['query'] = revision hits = es.search(index='yindex', doc_type='modules', body=query)['hits']['hits'] if len(hits) == 0: alerts.append('Could not find data for {} at {}'.format(name, path)) else: result = hits[0]['_source'] context['show_node'] = result context['properties'] = json.loads(result['properties']) except: alerts.append('Module and path must be specified') context['alerts'] = alerts return render(request, 'search/show_node.html', context)
20e3a87be8f2d85632fe26cc86a3cc7742d2de33
30,537
def compute_F1(TP, TN, FP, FN): """ Return the F1 score """ numer = 2 * TP denom = 2 * TP + FN + FP F1 = numer/denom Acc = 100. * (TP + TN) / (TP + TN + FP + FN) return F1, Acc
6f012246337534af37ff233ad78d9645907739e3
30,538
def name_full_data(): """Full name data.""" return { "name": "Doe, John", "given_name": "John", "family_name": "Doe", "identifiers": [ { "identifier": "0000-0001-8135-3489", "scheme": "orcid" }, { "identifier": "gnd:4079154-3", "scheme": "gnd" } ], "affiliations": [ { "id": "cern" }, { "name": "CustomORG" } ] }
ac590635dbe33e68dc88acd890d16dd3137befb2
30,539
def platypus(in_file, data): """Filter Platypus calls, removing Q20 filter and replacing with depth and quality based filter. Platypus uses its own VCF nomenclature: TC == DP, FR == AF Platypus gVCF output appears to have an 0/1 index problem so the reference block regions are 1 base outside regions of interest. We avoid limiting regions during filtering when using it. """ filters = ('(FR[0] <= 0.5 && TC < 4 && %QUAL < 20) || ' '(TC < 13 && %QUAL < 10) || ' '(FR[0] > 0.5 && TC < 4 && %QUAL < 50)') limit_regions = "variant_regions" if not vcfutils.is_gvcf_file(in_file) else None return cutoff_w_expression(in_file, filters, data, name="PlatQualDepth", extra_cmd="| sed 's/\\tQ20\\t/\\tPASS\\t/'", limit_regions=limit_regions)
00979a3de36b051882e42e2231cac69a67dfec20
30,540
def delete(i): """ Input: { See 'rm' function } Output: { See 'rm' function } """ return rm(i)
048742483608b7530ee217a60c96f4c4f6ec6fb0
30,541
def test_circuit_str(default_compilation_configuration): """Test function for `__str__` method of `Circuit`""" def f(x): return x + 42 x = hnp.EncryptedScalar(hnp.UnsignedInteger(3)) inputset = range(2 ** 3) circuit = hnp.compile_numpy_function(f, {"x": x}, inputset, default_compilation_configuration) assert str(circuit) == format_operation_graph(circuit.op_graph)
abf2955b7cd440124eb2e2acf685aa84d69a3e4a
30,543
def add_game(): """Adds game to database""" check_admin() add_game = True form = GameForm() # Checks if form is valid if form.validate_on_submit(): game = Game(name=form.name.data) try: db.session.add(game) db.session.commit() flash('Game successfully added') except: flash('Error: game name already exists') return redirect(url_for('admin.list_games')) return render_template('admin/games/game.html', action='Add', add_game=add_game, form=form, title='Add Game')
9134516408a1931a41a901b4523713871f580da0
30,544
def requires_moderation(page): """Returns True if page requires moderation """ return bool(page.get_moderator_queryset().count())
8f1cfa852cbeccfae6157e94b7ddf61d9597936e
30,545
from typing import Optional def _get_node_info( node: NodeObject, current_path: str, node_type: str, label: Optional[str] = None, is_leaf: bool = True ) -> NodeInfo: """ Utility method for generating a NodeInfo from a NodeObject :param node: NodeObject to convert into a NodeInfo. node.name will be used for the label of the node (unless label is provided) node.oid will be appended to the end of the current URI to create the node's path :param current_path: URI provided in the request to expand/refresh :param node_type: Node type, determines icon used in UI :param label: Overrides the node.name if provided, display name of the node displayed as-is :param is_leaf: Whether or not the node is a leaf. Default is true. If false, a trailing slash will be added to the node path to indicate it behaves as a folder :return: NodeInfo based on the NodeObject provided """ # Generate the object metadata metadata = ObjectMetadata(node.urn, None, type(node).__name__, node.name, None) node_info: NodeInfo = NodeInfo() node_info.is_leaf = is_leaf node_info.label = label if label is not None else node.name node_info.metadata = metadata node_info.node_type = node_type # Build the path to the node. Trailing slash is added to indicate URI is a folder trailing_slash = '' if is_leaf else '/' node_info.node_path = urljoin(current_path, str(node.name) + trailing_slash) return node_info
d0084dc757dd9501dc2853a1445524cbde9a0756
30,546
def get_peers(): """Retrieve PeerIds and SSIDs for peers that are ready for OOB transfer""" query = 'SELECT Ssid, PeerId from EphemeralState WHERE PeerState=1' data = exec_query(query, db_path_peer) return data
9ab81631cf1f779b3a80b246e0a6144e46599a64
30,547
from bs4 import BeautifulSoup def get_html_text(html): """ Return the raw text of an ad """ if html: doc = BeautifulSoup(html, "html.parser") return doc.get_text(" ") return ""
14353f368078ea6b1673d1066b0a529cc3e257d9
30,548
def valid_parentheses(string): """ Takes a string of parentheses, and determines if the order of the parentheses is valid. :param string: a string of parentheses and characters. :return: true if the string is valid, and false if it's invalid. """ stack = [] for x in string: if x == "(": stack.append(x) elif x == ")": if len(stack) > 0: stack.pop() else: return False return not stack
e8438404c461b7a113bbbab6417190dcd1056871
30,549
def in_2core_graph_slow(cats: ArrayLike) -> BoolArray: """ Parameters ---------- cats: {DataFrame, ndarray} Array containing the category codes of pandas categoricals (nobs, ncats) Returns ------- retain : ndarray Boolean array that marks non-singleton entries as True Notes ----- This is a reference implementation that can be very slow to remove all singleton nodes in some graphs. """ if isinstance(cats, DataFrame): cats = np.column_stack([np.asarray(cats[c].cat.codes) for c in cats]) if cats.shape[1] == 1: return in_2core_graph(cats) nobs, ncats = cats.shape retain_idx = np.arange(cats.shape[0]) num_singleton = 1 while num_singleton > 0 and cats.shape[0] > 0: singleton = np.zeros(cats.shape[0], dtype=bool) for i in range(ncats): ucats, counts = np.unique(cats[:, i], return_counts=True) singleton |= np.isin(cats[:, i], ucats[counts == 1]) num_singleton = int(singleton.sum()) if num_singleton: cats = cats[~singleton] retain_idx = retain_idx[~singleton] retain = np.zeros(nobs, dtype=bool) retain[retain_idx] = True return retain
46fc377643849b68d9071c9e592c1bba68a23a83
30,551
def has_form_encoded_header(header_lines): """Return if list includes form encoded header""" for line in header_lines: if ":" in line: (header, value) = line.split(":", 1) if header.lower() == "content-type" \ and "x-www-form-urlencoded" in value: return True return False
e4fe797e4884161d0d935853444634443e6e25bb
30,552
from pathlib import Path def get_best_checkpoint_path(path: Path) -> Path: """ Given a path and checkpoint, formats a path based on the checkpoint file name format. :param path to checkpoint folder """ return path / LAST_CHECKPOINT_FILE_NAME_WITH_SUFFIX
b0637dd0fac5df3b7645cceec62f23fc3d48d4eb
30,553
def storage_charge_rule(model, technology, timepoint): """ Storage cannot charge at a higher rate than implied by its total installed power capacity. Charge and discharge rate limits are currently the same. """ return model.Charge[technology, timepoint] + model.Provide_Power[technology, timepoint] <= model.capacity[technology]
9f437d11f1eb1ce894381de10b719c9c08271396
30,554
def blck_void(preprocessor: Preprocessor, args: str, contents: str) -> str: """The void block, processes commands inside it but prints nothing""" if args.strip() != "": preprocessor.send_warning("extra-arguments", "the void block takes no arguments") preprocessor.context.update(preprocessor.current_position.end, "in void block") contents = preprocessor.parse(contents) preprocessor.context.pop() return ""
841be11c1f8f7b9d4c3552cdafe0aaa590b8fb9d
30,555
import typing def resize_image(image: np.ndarray, width: typing.Optional[int] = None, height: typing.Optional[int] = None, interpolation=cv2.INTER_AREA): """ Resize image using given width or/and height value(s). If both values are passed, aspect ratio is not preserved. If none of the values are given, original image is returned. """ img_h, img_w = image.shape[:2] if not width and not height: return image elif width and height: dim = (width, height) else: if not width: ratio = height / float(img_h) dim = (int(img_w * ratio), height) else: ratio = width / float(img_w) dim = (width, int(img_h * ratio)) return cv2.resize(image, dim, interpolation=interpolation)
ee8bf8424bb23a941a7858a97d1b4ff6b5187d38
30,556
def attr(*args, **kwargs): """Decorator that adds attributes to classes or functions for use with unit tests runner. """ def wrapped(element): for name in args: setattr(element, name, True) for name, value in kwargs.items(): setattr(element, name, value) return element return wrapped
77d20af87cef526441aded99bd6e24e21e5f81f9
30,557
def convert_units(table_name, value, value_unit, targets): """ Converts a given value in a unit to a set of target units. @param table_name Name of table units are contained in @param value Value to convert @param value_unit Unit value is currently in @param targets List of units to convert to @return List of conversions """ table = get_table(table_name) results = list() for target in targets: result = {'dest_unit': target} try: result['converted_value'] = table.convert(value_unit, target, value) except ValueError: continue results.append(result) return results
b8cdbeafa78ec71450e69cec6913e805bd26fa8a
30,558
def hex2binary(hex_num): """ converts from hexadecimal to binary """ hex1 = h[hex_num[0]] hex2 = h[hex_num[1]] return str(hex1) + str(hex2)
78d2a804d5f02c985d943e6242bc66143905df2f
30,560
def nn(value: int) -> int: """Casts value to closest non negative value""" return 0 if value < 0 else value
08672feaefa99881a110e3fc629d4a9256f630af
30,561
def resolve_vcf_counts_data(vcf_data, maf_data, matched_normal_sample_id, tumor_sample_data_col): """ Resolves VCF allele counts data. """ vcf_alleles = [vcf_data["REF"]] vcf_alleles.extend(vcf_data["ALT"].split(",")) tumor_sample_format_data = vcf_data["MAPPED_TUMOR_FORMAT_DATA"] normal_sample_format_data = None if matched_normal_sample_id in vcf_data.keys(): normal_sample_format_data = vcf_data["MAPPED_NORMAL_FORMAT_DATA"] variant_allele_idx = get_vcf_variant_allele_idx(tumor_sample_format_data, normal_sample_format_data, vcf_alleles) (t_ref_count, t_alt_count, t_depth) = resolve_vcf_allele_depth_values(tumor_sample_format_data, vcf_alleles, variant_allele_idx, vcf_data) maf_data["t_ref_count"] = t_ref_count maf_data["t_alt_count"] = t_alt_count maf_data["t_depth"] = t_depth # only resolve values for normal allele depths if "NORMAL" data is present in VCF if normal_sample_format_data: (n_ref_count, n_alt_count, n_depth) = resolve_vcf_allele_depth_values(normal_sample_format_data, vcf_alleles, variant_allele_idx, vcf_data) maf_data["n_ref_count"] = n_ref_count maf_data["n_alt_count"] = n_alt_count maf_data["n_depth"] = n_depth return maf_data
5e10d54038a84bc93a4d6b09ae5368e61ae3312f
30,562
def example_profile_metadata_target(): """Generates an example profile metadata document. >>> root = example_profile_metadata_target() >>> print_tree(root) <?xml version='1.0' encoding='UTF-8'?> <Profile xmlns="http://soap.sforce.com/2006/04/metadata"> <classAccesses> <apexClass>ARTransactionsTest</apexClass> <enabled>false</enabled> </classAccesses> <classAccesses> <apexClass>AccountAddressManager</apexClass> <enabled>true</enabled> </classAccesses> <classAccesses> <apexClass>AccountHierarchyBuilder</apexClass> <enabled>true</enabled> </classAccesses> <classAccesses> <apexClass>TransactionTestData</apexClass> <enabled>false</enabled> </classAccesses> </Profile> <BLANKLINE> """ root = example_profile_metadata_source() example_class_access_element(root, 'AccountHierarchyBuilder', 'true') example_class_access_element(root, 'TransactionTestData', 'false') return root
6e43847aec021e188c001ad59e297ecdfc31d202
30,563
def separate(expr, deep=False): """Rewrite or separate a power of product to a product of powers but without any expanding, ie. rewriting products to summations. >>> from sympy import * >>> x, y, z = symbols('x', 'y', 'z') >>> separate((x*y)**2) x**2*y**2 >>> separate((x*(y*z)**3)**2) x**2*y**6*z**6 >>> separate((x*sin(x))**y + (x*cos(x))**y) x**y*cos(x)**y + x**y*sin(x)**y #>>> separate((exp(x)*exp(y))**x) #exp(x*y)*exp(x**2) Notice that summations are left un touched. If this is not the requested behaviour, apply 'expand' to input expression before: >>> separate(((x+y)*z)**2) z**2*(x + y)**2 >>> separate((x*y)**(1+z)) x**(1 + z)*y**(1 + z) """ expr = Basic.sympify(expr) if isinstance(expr, Basic.Pow): terms, expo = [], separate(expr.exp, deep) #print expr, terms, expo, expr.base if isinstance(expr.base, Mul): t = [ separate(Basic.Pow(t,expo), deep) for t in expr.base ] return Basic.Mul(*t) elif isinstance(expr.base, Basic.exp): if deep == True: return Basic.exp(separate(expr.base[0], deep)*expo) else: return Basic.exp(expr.base[0]*expo) else: return Basic.Pow(separate(expr.base, deep), expo) elif isinstance(expr, (Basic.Add, Basic.Mul)): return type(expr)(*[ separate(t, deep) for t in expr ]) elif isinstance(expr, Basic.Function) and deep: return expr.func(*[ separate(t) for t in expr]) else: return expr
ae30943f0073508d85212f97d4298f63e16fcc05
30,564
def app_base(request): """ This should render the required HTML to start the Angular application. It is the only entry point for the pyramid UI via Angular :param request: A pyramid request object, default for a view :return: A dictionary of variables to be rendered into the template """ dev_endpoints = ['localhost', '0.0.0.0', '127.0.', '192.168.', '10.19.', 'dev.squizzlezig.com'] is_dev = False for point in dev_endpoints: if request.host.split(':', 1)[0].startswith(point) or request.remote_addr.startswith(point): is_dev = True return { 'is_dev': is_dev, 'some_key': request.registry.settings['some_key']}
3a097e920b33248b436e2eea00e05b5708b35779
30,566
from typing import List import re def check_lists(document: Document, args: Args) -> List[Issue]: """Check that markdown lists items: - Are preceded by a blank line. - Are not left empty. - End with a period if they're a list of sentences. - End without a period if they're a list of items.""" issues = [] markdown_list_re = re.compile(r"\s*(\d+\.|-) \s*(.*)\n") is_front_matter = False is_inside_list = False for number, line in enumerate(document.lines): # We skip lines inside the front matter as that's YAML data. if line.startswith("---"): is_front_matter = not is_front_matter if is_front_matter: continue match = markdown_list_re.match(line) if not match: if is_inside_list: is_inside_list = False continue # Figure out if this is the first item in the list. # If it is, we need to check that the previous line was blank. if not is_inside_list: is_inside_list = True if document.lines[number - 1].strip() != "": issues.append( Issue( line=number + 1, column_start=0, column_end=0, message="Missing blank line before list.", rule=Rules.missing_blank_line_before_list, ) ) content = match.group(2).strip() is_pascal_case_sequence = ( re.match(r"^\*?[A-Z]\w*\*?( [A-Z]\w*)*\*?$", content) is not None ) if is_pascal_case_sequence and content.endswith("."): issues.append( Issue( line=number + 1, column_start=match.start(2), column_end=match.end(2), message="List item ends with a period.", rule=Rules.list_item_ends_with_period, ) ) elif not is_pascal_case_sequence and not content[-1] in ".?!": issues.append( Issue( line=number + 1, column_start=match.start(2), column_end=match.end(2), message="Sentence in list does not end with a period.", rule=Rules.list_item_does_not_end_with_period, ) ) elif content.strip() == "": issues.append( Issue( line=number + 1, column_start=match.start(), column_end=match.end(), message=f"Empty list item.", rule=Rules.empty_lists, ) ) return issues
e224206b0683239fe957dd78795b8f2de69d4149
30,567
def transform_one(mt, vardp_outlier=100_000) -> Table: """transforms a gvcf into a form suitable for combining The input to this should be some result of either :func:`.import_vcf` or :func:`.import_vcfs` with `array_elements_required=False`. There is a strong assumption that this function will be called on a matrix table with one column. """ mt = localize(mt) if mt.row.dtype not in _transform_rows_function_map: f = hl.experimental.define_function( lambda row: hl.rbind( hl.len(row.alleles), '<NON_REF>' == row.alleles[-1], lambda alleles_len, has_non_ref: hl.struct( locus=row.locus, alleles=hl.cond(has_non_ref, row.alleles[:-1], row.alleles), rsid=row.rsid, __entries=row.__entries.map( lambda e: hl.struct( DP=e.DP, END=row.info.END, GQ=e.GQ, LA=hl.range(0, alleles_len - hl.cond(has_non_ref, 1, 0)), LAD=hl.cond(has_non_ref, e.AD[:-1], e.AD), LGT=e.GT, LPGT=e.PGT, LPL=hl.cond(has_non_ref, hl.cond(alleles_len > 2, e.PL[:-alleles_len], hl.null(e.PL.dtype)), hl.cond(alleles_len > 1, e.PL, hl.null(e.PL.dtype))), MIN_DP=e.MIN_DP, PID=e.PID, RGQ=hl.cond( has_non_ref, e.PL[hl.call(0, alleles_len - 1).unphased_diploid_gt_index()], hl.null(e.PL.dtype.element_type)), SB=e.SB, gvcf_info=hl.case() .when(hl.is_missing(row.info.END), hl.struct( ClippingRankSum=row.info.ClippingRankSum, BaseQRankSum=row.info.BaseQRankSum, MQ=row.info.MQ, MQRankSum=row.info.MQRankSum, MQ_DP=row.info.MQ_DP, QUALapprox=row.info.QUALapprox, RAW_MQ=row.info.RAW_MQ, ReadPosRankSum=row.info.ReadPosRankSum, VarDP=hl.cond(row.info.VarDP > vardp_outlier, row.info.DP, row.info.VarDP))) .or_missing() ))), ), mt.row.dtype) _transform_rows_function_map[mt.row.dtype] = f transform_row = _transform_rows_function_map[mt.row.dtype] return Table(TableMapRows(mt._tir, Apply(transform_row._name, TopLevelReference('row'))))
7961d5ea3d0b0e58332552c9c3c72692f34868db
30,568
def volume_rebalance(volume: str) -> Result: """ # This function doesn't do anything yet. It is a place holder because # volume_rebalance is a long running command and I haven't decided how to # poll for completion yet # Usage: volume rebalance <VOLNAME> fix-layout start | start # [force]|stop|status :param volume: str. The name of the volume to start rebalancing :return: Result. Ok or Err """ arg_list = ["volume", "rebalance", volume, "start"] return run_command("gluster", arg_list, True, True)
03df7752b45d90f84720be12f32703c1109d71c2
30,569
import json def get_droplet_ip(): """get droplet ip from cache.""" cached_droplet_info_file = 'droplet_info.json' with open(cached_droplet_info_file, 'r') as info_f: droplet_info = json.load(info_f) return droplet_info['networks']['v4'][0]['ip_address']
21d0bfbbe6aebd7e88cc6465d49b221da271753a
30,570
def country_converter(text_input, abbreviations_okay=True): """ Function that detects a country name in a given word. :param text_input: Any string. :param abbreviations_okay: means it's okay to check the list for abbreviations, like MX or GB. :return: """ # Set default values country_code = "" country_name = "" if len(text_input) <= 1: # Too short, can't return anything for this. pass elif ( len(text_input) == 2 and abbreviations_okay is True ): # This is only two letters long text_input = text_input.upper() # Convert to upper case for country in COUNTRY_LIST: if text_input == country[1]: # Matches exactly country_code = text_input country_name = country[0] elif len(text_input) == 3 and abbreviations_okay is True: # three letters long code text_input = text_input.upper() # Convert to upper case for country in COUNTRY_LIST: if text_input == country[2]: # Matches exactly country_code = country[1] country_name = country[0] else: # It's longer than three, probably a name. Or abbreviations are disabled. text_input = text_input.title() for country in COUNTRY_LIST: if text_input == country[0]: # It's an exact match country_code = country[1] country_name = country[0] return country_code, country_name # Exit the loop, we're done. elif text_input in country[0] and len(text_input) >= 3: country_code = country[1] country_name = country[0] if country_code == "" and country_name == "": # Still nothing # Now we check against a list of associated words per country. for country in COUNTRY_LIST: try: country_keywords = country[ 4 ] # These are keywords associated with it. for keyword in country_keywords: if text_input.title() == keyword: # A Match! country_code = country[1] country_name = country[0] except IndexError: # No keywords associated with this country. pass if "," in country_name: # There's a comma. country_name = country_name.split(",")[0].strip() # Take first part if there's a comma (Taiwan, Province of China) return country_code, country_name
19bdd3be63ee2a1165d8fc121203694da9732fea
30,571
def lat_long_to_idx(gt, lon, lat): """ Take a geotransform and calculate the array indexes for the given lat,long. :param gt: GDAL geotransform (e.g. gdal.Open(x).GetGeoTransform()). :type gt: GDAL Geotransform tuple. :param lon: Longitude. :type lon: float :param lat: Latitude. :type lat: float """ return (int((lat - gt[3]) / gt[5]), int((lon - gt[0]) / gt[1]))
3fafcc4750daa02beaedb330ab6273eab6abcd56
30,572
def BSMlambda(delta: float, S: float, V: float) -> float: """Not really a greek, but rather an expression of leverage. Arguments --------- delta : float BSM delta of the option V : float Spot price of the option S : float Spot price of the underlying Returns ------- float lambda Note ---- Percentage change in the option price per percentage change in the underlying asset's price. """ return delta*(S / V)
ea9bf546a7cf46b3c2be01e722409663b05248e1
30,574
import pwd def uid_to_name(uid): """ Find the username associated with a user ID. :param uid: The user ID (an integer). :returns: The username (a string) or :data:`None` if :func:`pwd.getpwuid()` fails to locate a user for the given ID. """ try: return pwd.getpwuid(uid).pw_name except Exception: return None
f9054e4959a385d34c18d88704d376fb4b718e47
30,575
def fit_poly(data, error_func, degree = 3): """ Fit a polynomial to given data, using supplied error function. Parameters ---------- data: 2D array where each row is a point (X0, Y) error_func: function that computes the error between a polynomial and observed data degree: polynomial degree Returns line that optimizes the error function. """ # Generate initial guess for polynomial model (all coeffs = 1) Cguess = np.poly1d(np.ones(degree + 1, dtype = np.float32)) # Plot initial guess (optional) x = np.linspace(-5, 5, 21) plt.plot(x, np.polyval(Cguess, x), 'm--', linewidth = 2.0, label = 'Initial guess') # Call optimizer to minimize error function result = spo.minimize(error_func, Cguess, args = (data, ), method = 'SLSQP', options = {'disp': True}) return np.poly1d(result.x)
007693c1e01edc69cee27dd1da0836087d8a2d11
30,576
def find_skyrmion_center_2d(fun, point_up=False): """ Find the centre the skyrmion, suppose only one skyrmion and only works for 2d mesh. `fun` accept a dolfin function. `point_up` : the core of skyrmion, points up or points down. """ V = fun.function_space() mesh = V.mesh() coods = V.dofmap().tabulate_all_coordinates(mesh).reshape(3, -1)[0] coods.shape = (-1, mesh.topology().dim()) xs = coods[:, 0] ys = coods[:, 1] mxys = fun.vector().array().reshape(3, -1) mzs = mxys[2] if point_up: mzs = - mxys[2] mins = [i for i, u in enumerate(mzs) if u < -0.9] xs_max = np.max(xs[mins]) xs_min = np.min(xs[mins]) ys_max = np.max(ys[mins]) ys_min = np.min(ys[mins]) xs_refine = np.linspace(xs_min, xs_max, 101) ys_refine = np.linspace(ys_min, ys_max, 101) coods_refine = np.array([(x, y) for x in xs_refine for y in ys_refine]) mzs_refine = np.array([fun(xy)[2] for xy in coods_refine]) min_id = np.argmin(mzs_refine) if point_up: min_id = np.argmax(mzs_refine) center = coods_refine[min_id] return center[0], center[1]
030c704681a48cdeca1f880f08fe9fb039572640
30,577
import time def test(num_games, opponent, silent): """ Test running a number of games """ def autoplayer_creator(state): """ Create a normal autoplayer instance """ return AutoPlayer(state) def minimax_creator(state): """ Create a minimax autoplayer instance """ return AutoPlayer_MiniMax(state) players = [None, None] if opponent == "minimax": players[0] = ("AutoPlayer", autoplayer_creator) players[1] = ("MiniMax", minimax_creator) else: players[0] = ("AutoPlayer A", autoplayer_creator) players[1] = ("AutoPlayer B", autoplayer_creator) gameswon = [0, 0] totalpoints = [0, 0] sumofmargin = [0, 0] t0 = time.time() # Run games for ix in range(num_games): if not silent: print("\nGame {0}/{1} starting".format(ix + 1, num_games)) if ix % 2 == 1: # Odd game: swap players players[0], players[1] = players[1], players[0] p1, p0 = test_game(players, silent) # Swap back players[0], players[1] = players[1], players[0] else: # Even game p0, p1 = test_game(players, silent) if p0 > p1: gameswon[0] += 1 sumofmargin[0] += p0 - p1 elif p1 > p0: gameswon[1] += 1 sumofmargin[1] += p1 - p0 totalpoints[0] += p0 totalpoints[1] += p1 t1 = time.time() print( "Test completed, {0} games played in {1:.2f} seconds, " "{2:.2f} seconds per game".format( num_games, t1 - t0, (t1 - t0) / num_games) ) def reportscore(player): """ Report the result of a number of games """ if gameswon[player] == 0: print( "{2} won {0} games and scored an average of {1:.1f} points per game" .format( gameswon[player], float(totalpoints[player]) / num_games, players[player][0], ) ) else: print( "{3} won {0} games with an average margin of {2:.1f} and " "scored an average of {1:.1f} points per game" .format( gameswon[player], float(totalpoints[player]) / num_games, float(sumofmargin[player]) / gameswon[player], players[player][0], ) ) reportscore(0) reportscore(1)
c7560e2d298039b5f201b57779e14b4a38054160
30,578
import webbrowser def pseudo_beaker(UserId: str, SessionId: str, replay=True, scope=True, browser=None, OrgId: str=None, is_staging: bool=True) -> dict: """ Mimic the Beaker admin tool in opening up one or both of session replay and Scope tools for a given User Id and Session Id. Option to specify a browser (e.g. "safari", "chrome") otherwise the system default is used. """ url_dict = get_beaker_lookup(UserId, SessionId, OrgId, is_staging) if browser is None: w = webbrowser else: w = webbrowser.get(browser) if replay: w.open_new(url_dict["session_url"]) if scope: w.open_new(url_dict["scope_url"]) return url_dict
6cf905762b76d90a4d32459d9259b452ffc89240
30,579
def table_parse(table): """ """ data = [] rows = table.find_all('tr') for row in rows: cols = row.find_all('td') cols = [ele.text.strip() for ele in cols] data.append([ele for ele in cols if ele]) return data
528008ada0ad7d594554ed5d577472a126df0cd1
30,580
import pandas import numpy def scan_mv_preprocessing_fill_pivot_nan(df): """ Value imputation. Impute missing data in pivot table. Parameters ---------- df : dataframe Pivot table data with potentially missing values. Returns ------- df : dataframe Pivot table data with no missing values. """ df_new = pandas.DataFrame() for group in set(df.index.get_level_values('Group')): df_group = df.loc[df.index.get_level_values('Group') == group] for analyte in df_group.columns[~df_group.columns.isin(['Component Name'])]: series_fill = df_group[analyte].copy() # Missing at random series_fill[pandas.isna(series_fill)] = round(numpy.nanmean(series_fill)) # Missing not at random if True in set(pandas.isna(series_fill)): series_fill = numpy.nanmin(df_new[analyte])/2 df_group[analyte] = series_fill df_new = df_new.append(df_group) # Get group and analytes with all nan df_filled = df_new.copy() return df_filled
e88d1b2b0a3d4fc27afe29a10512116323046cef
30,581
def image_show(request,item_container): """ zeigt die Beschreibung der Datei an """ app_name = 'image' vars = get_item_vars_show(request, item_container, app_name) file_path = DOWNLOAD_PATH + item_container.container.path file_name = file_path + item_container.item.name width, height = get_image_size(file_name) p = item_container.container.is_protected() vars['size'] = get_file_size(item_container, p) vars['width'] = width vars['height'] = height vars['mtime'] = get_file_modification_date(item_container, _('german'), p) vars['link'] = show_link(get_file_url(item_container, p), _(u'Download/Anzeigen')) return render_to_response ( 'app/file/base_details.html', vars )
b68286bedd92aba7991e8994bf76d84bfe5d4c2e
30,582
def common_kwargs(cfg, bin_count, pointing): """Creates a prepfold-friendly dictionary of common arguments to pass to prepfold""" name = generate_prep_name(cfg, bin_count, pointing) prep_kwargs = {} if cfg["run_ops"]["mask"]: prep_kwargs["-mask"] = cfg["run_ops"]["mask"] prep_kwargs["-o"] = name prep_kwargs["-n"] = bin_count prep_kwargs["-start"] = cfg["source"]["enter_frac"] prep_kwargs["-end"] = cfg["source"]["exit_frac"] prep_kwargs["-runavg"] = "" prep_kwargs["-noxwin"] = "" prep_kwargs["-noclip"] = "" prep_kwargs["-nsub"] = 256 prep_kwargs["-pstep"] = 1 prep_kwargs["-pdstep"] = 2 prep_kwargs["-dmstep"] = 1 prep_kwargs["-npart"] = 120 prep_kwargs["-npfact"] = 1 prep_kwargs["-ndmfact"] = 1 if bin_count >= 300: #greatly reduces search time prep_kwargs["-nopdsearch"] = "" if bin_count == 100 or bin_count == 50: #init fold - do large search prep_kwargs["-npfact"] = 4 prep_kwargs["-ndmfact"] = 3 if cfg["source"]["ATNF_P"] < 0.005: # period less than 50ms prep_kwargs["-npfact"] = 4 prep_kwargs["-ndmfact"] = 3 prep_kwargs["-dmstep"] = 3 prep_kwargs["-npart"] = 40 prep_kwargs["-dm"] = cfg["source"]["ATNF_DM"] prep_kwargs["-p"] = cfg["source"]["ATNF_P"] if cfg["source"]["my_DM"]: prep_kwargs["-dm"] = cfg["source"]["my_DM"] if cfg["source"]["my_P"]: prep_kwargs["-p"] = cfg["source"]["my_P"] return prep_kwargs
c6a1f2ceb475e8f0d2b3e905d8109f79d77d3b79
30,584
def quatMultiply(q1,q2): """Returns a quaternion that is a composition of two quaternions Parameters ---------- q1: 1 x 4 numpy array representing a quaternion q2: 1 x 4 numpy array representing a quatnernion Returns ------- qM: 1 x 4 numpy array representing a quaternion that is the rotation of q1 followed by the rotation of q2 Notes ----- q2 * q1 is the correct order for applying rotation q1 and then rotation q2 """ Q2 = np.array([[q2[0],-q2[1],-q2[2],-q2[3]],[q2[1],q2[0],-q2[3],q2[2]], [q2[2],q2[3],q2[0],-q2[1]],[q2[3],-q2[2],q2[1],q2[0]]]) qM = np.dot(Q2,q1) return qM
2c32f0390d01b36258c9bcabc290a47dca592ded
30,585
def expandingPrediction(input_list, multiple=5): """ :param input_list: :param multiple: :return: """ expanded_list = [] for prediction in input_list: for i in range(multiple): expanded_list.append(prediction) return expanded_list
9a502adb15160e656bd727748eb5dae73858d7f8
30,586
def pages_siblings_menu(context, page, url='/'): """Get the parent page of the given page and render a nested list of its child pages. Good for rendering a secondary menu. :param page: the page where to start the menu from. :param url: not used anymore. """ lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE) page = get_page_from_string_or_id(page, lang) if page: siblings = page.get_siblings() context.update({'children': siblings, 'page': page}) return context
723249cd73ec95b947f279a99e88afe2ec51868d
30,587
def aes(img, mask=None, canny_edges=None, canny_sigma=2): """Calculate the Average Edge Strength Reference: Aksoy, M., Forman, C., Straka, M., Çukur, T., Hornegger, J., & Bammer, R. (2012). Hybrid prospective and retrospective head motion correction to mitigate cross-calibration errors. Magnetic Resonance in Medicine, 67(5), 1237–1251. https://doi.org/10.1002/mrm.23101 Args: img (np.array): Image mask (np.array, optional): Brain mask. Defaults to None. canny_edges (np.array, optional): Edges to use for calculation, calculates if `None`. Defaults to None. canny_sigma (int, optional): Sigma for canny edge detection filter. Defaults to 2. Returns: float, np.array, np.array: aes, edges, canny edge mask """ imax = np.quantile(img[mask == 1], 0.99) if canny_edges is None: canny_edges = np.zeros_like(img) for z in range(img.shape[2]): canny_edges[:, :, z] = canny( img[:, :, z], sigma=canny_sigma) canny_edges *= mask img_edges = sobel(img/imax) * canny_edges aes = np.mean(img_edges[canny_edges == 1]) return aes, img_edges, canny_edges
52cdcf45609e7ee35eb7d05a2529d4330b6509b4
30,588
def projection(basis, vectors): """ The vectors live in a k dimensional space S and the columns of the basis are vectors of the same space spanning a subspace of S. Gives a representation of the projection of vector into the space spanned by basis in term of the basis. :param basis: an n-by-k array, a matrix whose vertical columns are the vectors of the basis :param vectors: an m-by-k array, a vector to be represented in the basis :return: an m-by-k array """ return matrix_mult(matrix_mult(vectors, basis), basis.T)
107a1db030d0af7af346128fea10e5f7657b1a6a
30,589
import numpy def grab(sequence, random = numpy.random): """ Return a randomly-selected element from the sequence. """ return sequence[random.randint(len(sequence))]
1760dc08b5971647f55248bd1b1f04d700dac38e
30,591
def csl_url_args_retriever(): """Returns the style and locale passed as URL args for CSL export.""" style = resource_requestctx.args.get("style") locale = resource_requestctx.args.get("locale") return style, locale
96f87dd927f998b9599663432a95c2330b15b2d0
30,592
import random def select_parents(population, m): """Select randomly parents for the new population from sorted by fitness function existing population.""" fitness_population = sorted( population, key=lambda child: fitness_function(child, m), reverse=True) # ordered_population = [] total_cost = get_population_cost(fitness_population, m) rand_num = random() for item in fitness_population: item_cost = fitness_function(item, m) percentage = item_cost / total_cost if percentage > rand_num: return item break else: rand_num -= percentage
3f0a2de28da7355ce34f692f7bb0722896903c51
30,594
def rsqrt(x: Tensor): """Computes reciprocal of square root of x element-wise. Args: x: input tensor Returns: output tensor Examples: >>> x = tf.constant([2., 0., -2.]) >>> rsqrt(x) <Tensor: shape=(3,), dtype=float32, numpy=array([0.707, inf, nan], dtype=float32)> """ return tf.math.rsqrt(x)
39b4574311eb74ccef18ddb936d1d92fbb0c1fd9
30,595
def averageObjPeg(objpegpts, planet, catalog=None, sceneid='NO_POL'): """ Average peg points. """ logger.info('Combining individual peg points: %s' % sceneid) peg = stdproc.orbit.pegManipulator.averagePeg([gp.getPeg() for gp in objpegpts], planet) pegheights = [gp.getAverageHeight() for gp in objpegpts] pegvelocities = [gp.getProcVelocity() for gp in objpegpts] peg.averageheight = float(sum(pegheights)) / len(pegheights) peg.averagevelocity = float(sum(pegvelocities)) / len(pegvelocities) if catalog is not None: isceobj.Catalog.recordInputsAndOutputs(catalog, peg, "runSetmocomppath.averagePeg.%s" % sceneid, logger, "runSetmocomppath.averagePeg.%s" % sceneid) return peg
92e41d33d3aa21ee6036e3f1a6550d81d793129e
30,596
def _bytes_chr_py2(i): """ Returns a byte string of length 1 whose ordinal value is i in Python 2. Do not call directly, use bytes_chr instead. """ return chr(i)
de524d1ec303cc297d7981570ef30aa9ae6840ed
30,597
from typing import Any def convert(parser: Any) -> c2gtypes.ParserRep: """Convert getopt to a dict. Args: parser (Any): docopt parser Returns: c2gtypes.ParserRep: dictionary representing parser object """ return {"parser_description": "", "widgets": extract(parser)}
cf6e53bd514bdb114c3bc5d3b7429c6a8f17881d
30,598
def redirect_vurlkey(request, vurlkey, *args, **kwargs): """redirect_vurlkey(vurlkey) looks up the Vurl with base58-encoded index VURLKEY and issues a redirect to the target URL""" v = Vurl.get_with_vurlkey(vurlkey.encode('utf-8')) return v.http_response()
99e4be6b43a8b983f9c8efdb60ccf2873ce3caf2
30,599
def rhand (x,y,z,iopt,parmod,exname,inname): """ Calculates the components of the right hand side vector in the geomagnetic field line equation (a subsidiary subroutine for the subroutine step) :param x,y,z: :param iopt: :param parmod: :param exname: name of the subroutine for the external field. :param inname: name of the subroutine for the internal field. Last mofification: March 31, 2003 Author: N.A. Tsyganenko :return: r1,r2,r3. """ # common /geopack1/ a(15),psi,aa(10),ds3,bb(8) global a, psi, aa, ds3, bb bxgsm,bygsm,bzgsm = call_external_model(exname, iopt, parmod, psi, x,y,z) hxgsm,hygsm,hzgsm = call_internal_model(inname, x,y,z) bx=bxgsm+hxgsm by=bygsm+hygsm bz=bzgsm+hzgsm b=ds3/np.sqrt(bx**2+by**2+bz**2) r1=bx*b r2=by*b r3=bz*b return r1,r2,r3
008912796a0ac5c61de3b1fa5de90edbf8ed1f61
30,600
def unique_slug_generator_by_email(instance, new_slug=None): """ This is for a Django project and it assumes your instance has a model with a slug field and a title character (char) field. """ slug = new_slug if new_slug is not None else slugify(instance.email) Klass = instance.__class__ qs_exists = Klass.objects.filter(email=slug).exists() if qs_exists: new_slug = "{slug}-{randstr}".format( slug=slug, randstr=random_string_generator(size=4) ) return unique_slug_generator_by_email(instance, new_slug=new_slug) return slug
a1e1ae8b25e67a9a5f1d93164deb4b769afb4588
30,601
import logging def register_provider(price_core_min=1): """Register Provider""" mine(1) web3.eth.defaultAccount = accounts[0] prices = [price_core_min, price_data_transfer, price_storage, price_cache] tx = config.ebb.registerProvider( GPG_FINGERPRINT, provider_email, federation_cloud_id, ipfs_address, available_core_num, prices, commitmentBlockNum, {"from": accounts[0]}, ) provider_registered_bn = tx.block_number print(f"Block number when the provider is registered={provider_registered_bn}") gpg_fingerprint = remove_zeros_gpg_fingerprint(tx.events["LogProviderInfo"]["gpgFingerprint"]) assert gpg_fingerprint == GPG_FINGERPRINT logging.info(f"gpg_fingerprint={gpg_fingerprint}") orc_id = "0000-0001-7642-0442" orc_id_as_bytes = str.encode(orc_id) assert not config.ebb.isOrcIDVerified(accounts[0]), "orc_id initial value should be false" config.ebb.authenticateOrcID(accounts[0], orc_id_as_bytes, {"from": accounts[0]}) assert config.ebb.isOrcIDVerified(accounts[0]), "isOrcIDVerified is failed" # orc_id should only set once for the same user with brownie.reverts(): config.ebb.authenticateOrcID(accounts[0], orc_id_as_bytes, {"from": accounts[0]}) *_, b = config.ebb.getRequesterInfo(accounts[0]) assert orc_id == b.decode("utf-8").replace("\x00", ""), "orc_id set false" return provider_registered_bn
9385a0291af2f306075bc2775d53cd67b442d985
30,602
from typing import Callable def get_signature_and_params(func: Callable): """Get the parameters and signature from a coroutine. func: Callable The coroutine from whom the information should be extracted. Returns ------- Tuple[List[Union[:class:`str`, :class:`inspect.Parameter`]]] Signature and list of parameters of the coroutine. """ if isclass(func): func = getattr(func, "__init__") if func is object.__init__: return [], [] sig = signature(func).parameters params = list(sig) if should_pass_cls(func): del params[0] return sig, params
cc53ba8f8cf54d8cf6167b57bc6ecb626605d333
30,603
def quadratic_formula(polynomial): """ input is single-variable polynomial of degree 2 returns zeros """ if len(polynomial.term_matrix) == 3: if polynomial.term_matrix[2][1] == 1: a, b = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0] return 0, -b/a a, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0] return (-c/a)**.5, -(-c/a)**.5 if len(polynomial.term_matrix) == 2: a, b, c, = polynomial.term_matrix[1][0], 0, 0 elif len(polynomial.term_matrix) == 3: a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], 0 else: a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], polynomial.term_matrix[3][0] ans1 = (-b + (b**2 - 4*a*c)**.5)/2*a ans2 = (-b - (b**2 - 4*a*c)**.5)/2*a if ans1 == ans2: return ans1 return ans1, ans2
5501abff2fadcd237e3cb0efc4bca615eef455da
30,604
def handle_domain_deletion_commands(client: Client, demisto_args: dict) -> str: """ Removes domains from the inbound blacklisted list. :type client: ``Client`` :param client: Client to use. :type demisto_args: ``dict`` :param demisto_args: The demisto arguments. :return: A message which says that the domains were successfully deleted from the list. :rtype: ``str`` """ demisto_args = handle_args(demisto_args) domain = demisto_args.get('domain') if not domain: raise DemistoException('A domain must be provided in order to remove it from the inbound blacklisted list.') demisto_args['domain'] = ','.join(argToList(domain)) raw_result = client.inbound_blacklisted_domain_remove_command(demisto_args) if raw_result.status_code != 204: raise DemistoException( f'Failed to remove the Domains from the inbound blacklisted list [{raw_result.status_code}]') return 'Domains were successfully removed from the inbound blacklisted list'
54f9174a3b8db9820e612cd3782dac4b9af6554e
30,605
def create_l2_lag_interface(name, phys_ports, lacp_mode="passive", mc_lag=False, fallback_enabled=False, vlan_ids_list=[], desc=None, admin_state="up", **kwargs): """ Perform a POST call to create a Port table entry for L2 LAG interface. :param name: Alphanumeric name of LAG Port :param phys_ports: List of physical ports to aggregate (e.g. ["1/1/1", "1/1/2", "1/1/3"]) :param lacp_mode: Should be either "passive" or "active." Defaults to "passive" if not specified. :param mc_lag: Boolean to determine if the LAG is multi-chassis. Defaults to False if not specified. :param fallback_enabled: Boolean to determine if the LAG uses LACP fallback. Defaults to False if not specified. :param vlan_ids_list: Optional list of integer VLAN IDs to add as trunk VLANS. Defaults to empty list if not specified. :param desc: Optional description for the interface. Defaults to nothing if not specified. :param admin_state: Optional administratively-configured state of the port. Defaults to "up" if not specified :param kwargs: keyword s: requests.session object with loaded cookie jar keyword url: URL in main() function :return: True if successful, False otherwise """ if kwargs["url"].endswith("/v1/"): return _create_l2_lag_interface_v1(name, phys_ports, lacp_mode, mc_lag, fallback_enabled, vlan_ids_list, desc, admin_state, **kwargs) else: # Updated else for when version is v10.04 success = _create_l2_lag_interface(name, phys_ports, lacp_mode, mc_lag, fallback_enabled, vlan_ids_list, desc, admin_state, **kwargs) if mc_lag or fallback_enabled: return success and _update_l2_lag_interface(name, mc_lag, fallback_enabled, **kwargs) else: return success
7dcce04a7c9dd5d533bcf40bdda94c5fc8ff2951
30,606
def get_train_image_matrices(folder_name, num_images=4): """Gets image matrices for training images. :param folder_name: String with name of training image folder in input_data/train_images directory path. :param num_images: Integer with number of images. :return: Matrices from training images. """ image_matrices = [] path = './input_data/train_images/' + folder_name + '/' for image_num in range(4, 4 + num_images): image_name = path + str(image_num) + '.tif' image_matrices.append(utils.read_image(image_name=image_name)) return image_matrices
be75cd1246421b13830931fd6551c94c0bd673f6
30,608
import torch def to_chainer_device(device): """Create a chainer device from a given torch device. Args: device (torch.device): Device to be converted. Returns: A ``chainer.device`` object corresponding to the given input. """ if not isinstance(device, torch.device): raise TypeError('The argument should be torch device.') if device.type == 'cpu': return chainer.get_device('@numpy') if device.type == 'cuda': device_index = 0 if device.index is None else device.index return chainer.get_device('@cupy:{}'.format(device_index)) raise ValueError('{} is not supported.'.format(device.type))
d2d1c9ddf50792225260133f1d434e3166b6338b
30,609
def decode_region(code): """ Returns the region name for the given region code. For example: decode_region("be") => "Belgium". """ for tag, (language, region, iso639, iso3166) in LANGUAGE_REGION.iteritems(): if iso3166 == code.upper(): return region
5a4467088d8824a8647d9c7ed89381b94ddab096
30,610
def neighbourhood_peaks(signal, n=10): """Computes the number of peaks from a defined neighbourhood of the signal. Reference: Christ, M., Braun, N., Neuffer, J. and Kempa-Liehr A.W. (2018). Time Series FeatuRe Extraction on basis of Scalable Hypothesis tests (tsfresh -- A Python package). Neurocomputing 307 (2018) 72-77 Parameters ---------- signal : nd-array Input from which the number of neighbourhood peaks is computed n : int Number of peak's neighbours to the left and to the right Returns ------- int The number of peaks from a defined neighbourhood of the signal """ signal = np.array(signal) subsequence = signal[n:-n] # initial iteration peaks = ((subsequence > np.roll(signal, 1)[n:-n]) & (subsequence > np.roll(signal, -1)[n:-n])) for i in range(2, n + 1): peaks &= (subsequence > np.roll(signal, i)[n:-n]) peaks &= (subsequence > np.roll(signal, -i)[n:-n]) return np.sum(peaks)
b684419844a747633d667abab9b6819f61d13d05
30,612
def check_success(env, policy, act_noise_pct, render=False): """Tests whether a given policy solves an environment Args: env (metaworld.envs.MujocoEnv): Environment to test policy (metaworld.policies.policies.Policy): Policy that's supposed to succeed in env act_noise_pct (float): Decimal value indicating std deviation of the noise as a % of action space render (bool): Whether to render the env in a GUI Returns: (bool, int): Success flag, Trajectory length """ action_space_ptp = env.action_space.high - env.action_space.low env.reset() env.reset_model() o = env.reset() assert o.shape == env.observation_space.shape t = 0 done = False success = False while not success and not done: a = policy.get_action(o) a = np.random.normal(a, act_noise_pct * action_space_ptp) try: o, r, done, info = env.step(a) if render: env.render() t += 1 success |= bool(info['success']) except ValueError: break return success, t
260a03bc47c3864894b5d2922a636bd54b8d1253
30,613
import glob def import_data(file_regex, index_col_val=None, parse_dates=None, date_format=None): """ takes in a regular expression describing the filepath to the data files and returns a pandas dataFrame Usage1: var_name = import_data.import_data("./hackathon_data/*20*.dat") Usage2: var_name = import_data.import_data("./hackathon_data/*20*.dat", "column to index with", "column of dates", "format of dates") """ all_files = glob.glob(file_regex) all_files.sort() list_ = [] for file_ in all_files: if index_col_val is not None and parse_dates is not None and \ date_format is not None: df = pd.read_csv(file_, parse_dates=[parse_dates], index_col=index_col_val, date_parser=lambda x: parse_date(x, date_format)) elif index_col_val is not None: df = pd.read_csv(file_, index_col=index_col_val) elif parse_dates is not None and date_format is not None: df = pd.read_csv(file_, parse_dates=[parse_dates], date_parser=lambda x: parse_date(x, date_format)) else: df = pd.read_csv(file_) list_.append(df) ret = pd.concat(list_) ret = ret[ret.index.notnull()] ret.on_promotion.replace(('Y', 'N'), (1, 0), inplace=True) return ret
411f767bd27cb40d9aaea28feb94da9f29b1f5aa
30,614
def shift_num_right_by(num: int, digits: int) -> int: """Shift a number to the right by discarding some digits We actually use string conversion here since division can provide wrong results due to precision errors for very big numbers. e.g.: 6150000000000000000000000000000000000000000000000 // 1e27 6.149999999999999e+21 <--- wrong """ try: return int(str(num)[:-digits]) except ValueError: # this can happen if num is 0, in which case the shifting code above will raise # https://github.com/rotki/rotki/issues/3310 # Also log if it happens for any other reason if num != 0: log.error(f'At shift_num_right_by() got unecpected value {num} for num') return 0
ff29f5fbc53c8cfa5fa4172fd4e6e7c0b8b4e27b
30,615
def index_handler(request): """ List latest 6 articles, or post a new article. """ if request.method == 'GET': return get_article_list(request) elif request.method == 'POST': return post_article(request)
b64a81c4bde4d83f99663ffb6384ff6cde8a217c
30,616
import numpy def back_propogation(weights, aa, zz, y1hot, lam=0.0): """Perform a back propogation step Args: weights (``list`` of numpy.ndarray): weights between each layer aa (``list`` of numpy.ndarray): activation of nodes for each layer. The last item in the list is the hypothesis. zz (``list`` of numpy.ndarray): input into nodes for each layer. y1hot (numpy.ndarray) 2-D array of one-hot vectors (1 per row) lam (``float``): regularization parameter Returns: weights_grad (``list`` of numpy.ndarray): d_J/d_weight """ weights_grad = [] m = y1hot.shape[0] n_layers = len(weights) + 1 di_plus_1 = aa[-1] - y1hot i = n_layers - 2 while i > 0: ones_col = numpy.ones(zz[i].shape[0]) di = ( di_plus_1.dot(weights[i]) * sigmoid_gradient(numpy.c_[ones_col, zz[i]]) ) di = di[:, 1:] weights_grad.append(di_plus_1.T.dot(aa[i])) i -= 1 di_plus_1 = di.copy() weights_grad.append(di.T.dot(aa[0])) # we built it backwards weights_grad.reverse() # normalize by m weights_grad = [wg/m for wg in weights_grad] # add regularization (skip first columns) for i in range(n_layers-1): weights_grad[i][:, 1:] += lam/m * weights[i][:, 1:] return weights_grad
2909809699ae3b3fd5ab97b6294391322cf3d8bb
30,621
async def get_reverse_objects_topranked_for_lst(entities): """ get pairs that point to the given entity as the primary property primary properties are those with the highest rank per property see https://www.wikidata.org/wiki/Help:Ranking """ # some lookups just take too long, so we remove them here remEntities = set() for entity in ['Q2']: if entity in entities: entities.remove(entity) remEntities.add(entity) # short-circuit, if nothing is left if not entities: return {k: [] for k in remEntities} # run the query res = await runQuerySingleKey(cacheReverseObjectTop, entities, """ SELECT ?base ?prop ?parent WHERE { hint:Query hint:optimizer "None". VALUES ?base { %s } ?parent ?prop ?base . [] wikibase:directClaim ?prop . } """) # add the skipped entities again for k in remEntities: res[k] = [] return res
7266b4f29e3c3878abc14c995da7713a8d7121e0
30,622
import stat def compute_confidence_interval(data,confidence=0.95): """ Function to determine the confidence interval :param data: input data :param confidence: confidence level :return: confidence interval """ a = 1.0 * np.array(data) n = len(a) se = stat.sem(a) h = se * stat.t.ppf((1 + confidence) / 2., n-1) return h
b7f64935cefdb2f60a7ca7fdc720b3ecddf7e89c
30,623
def adjacent_powerset(iterable): """ Returns every combination of elements in an iterable where elements remain ordered and adjacent. For example, adjacent_powerset('ABCD') returns ['A', 'AB', 'ABC', 'ABCD', 'B', 'BC', 'BCD', 'C', 'CD', 'D'] Args: iterable: an iterable Returns: a list of element groupings """ return [iterable[a:b] for a in range(len(iterable)) for b in range(a + 1, len(iterable) + 1)]
951418b30d541e1dcdd635937ae609d429e3cd70
30,624
from typing import Iterator from typing import Counter import tqdm def export_ngrams( docs: Iterator[str], nlp: spacy.language.Language, n: str, patterns=False ) -> Counter: """ Extracts n-gram frequencies of a series of documents Parameters ---------- docs : Iterator[str] An iterator of documents, e.g. abstracts nlp : spacy.language.Language A spaCy language model, e.g. en_core_web_sm patterns : bool, optional Further analysis of neighboring tokens, by default False. If True, a spaCy matcher will be used to filter most of the stopword combinations that might not be of interest. The matcher will also extract bigrams made up of three tokens, like "Alzheimer's disease" and "human-like AI", while filtering most of the other punctuation. Returns ------- Counter n-gram frequencies Raises ------ ValueError In case that the 'patterns' options is used for anything but bigrams """ n_grams = Counter() if "-" in n: parts = n.split("-") if len(parts) != 2: raise ValueError(f"Order of n-grams has wrong format: {n}") # Potential ValueErrors might be raised here start = int(parts[0]) end = int(parts[1]) if start > end: # Just switch it instead of raising an error end, start = start, end ns = range(start, end + 1) else: ns = [int(n)] if patterns: if not all(1 <= i <= 5 for i in ns): raise ValueError("Patterns can only be used for n-grams with n <= 5.") matcher = Matcher(nlp.vocab) for i in ns: matcher.add(f"{i}-grams", ngram_masks[i]) for doc in tqdm(nlp.pipe(docs)): matches = matcher(doc) candidates = ( doc[start:end].text for _, start, end in matches if (start - 1 >= 0 and doc[start - 1].text not in ("-") or start == 0) if ( end != len(doc) and doc[end].text not in ("-", "*") or end == len(doc) ) ) # some n-grams are part of bigger m-grams and might # start or end with a '-' because of that n_grams.update( c for c in candidates if not c[0] in ("-", "*", "%") and not c.endswith("-") ) else: for doc in tqdm(nlp.pipe(docs)): for sent in doc.sents: for i in ns: n_words = ngrams(sent.text.split(), n=i) n_grams.update(list(" ".join(words) for words in n_words)) return n_grams
242d0b3fcb2dffd2d35ae76416dfc7861bdfb916
30,625
import torch def solve2D_system( pde_system, conditions, xy_min=None, xy_max=None, single_net=None, nets=None, train_generator=None, shuffle=True, valid_generator=None, optimizer=None, criterion=None, additional_loss_term=None, batch_size=16, max_epochs=1000, monitor=None, return_internal=False, return_best=False ): """Train a neural network to solve a PDE with 2 independent variables. :param pde_system: The PDEsystem to solve. If the PDE is :math:`F_i(u_1, u_2, ..., u_n, x, y) = 0` where :math:`u_i` is the i-th dependent variable and :math:`x` and :math:`y` are the independent variables, then `pde_system` should be a function that maps :math:`(u_1, u_2, ..., u_n, x, y)` to a list where the i-th entry is :math:`F_i(u_1, u_2, ..., u_n, x, y)`. :type pde_system: function :param conditions: The initial/boundary conditions. The ith entry of the conditions is the condition that :math:`x_i` should satisfy. :type conditions: list[`neurodiffeq.pde.DirichletBVP2D` or `neurodiffeq.pde.IBVP1D` or `neurodiffeq.pde.NoCondition`] :param xy_min: The lower bound of 2 dimensions, if we only care about :math:`x \\geq x_0` and :math:`y \\geq y_0`, then `xy_min` is `(x_0, y_0)`, only needed when train_generator or valid_generator are not specified, defaults to None :type xy_min: tuple[float, float], optional :param xy_max: The upper bound of 2 dimensions, if we only care about :math:`x \\leq x_1` and :math:`y \\leq y_1`, then `xy_min` is `(x_1, y_1)`, only needed when train_generator or valid_generator are not specified, defaults to None :type xy_max: tuple[float, float], optional :param single_net: The single neural network used to approximate the solution. Only one of `single_net` and `nets` should be specified, defaults to None :param single_net: `torch.nn.Module`, optional :param nets: The neural networks used to approximate the solution, defaults to None. :type nets: list[`torch.nn.Module`], optional :param train_generator: The example generator to generate 1-D training points, default to None. :type train_generator: `neurodiffeq.pde.ExampleGenerator2D`, optional :param shuffle: Whether to shuffle the training examples every epoch, defaults to True. :type shuffle: bool, optional :param valid_generator: The example generator to generate 1-D validation points, default to None. :type valid_generator: `neurodiffeq.pde.ExampleGenerator2D`, optional :param optimizer: The optimization method to use for training, defaults to None. :type optimizer: `torch.optim.Optimizer`, optional :param criterion: The loss function to use for training, defaults to None. :type criterion: `torch.nn.modules.loss._Loss`, optional :param additional_loss_term: Extra terms to add to the loss function besides the part specified by `criterion`. The input of `additional_loss_term` should be the same as `pde_system` :type additional_loss_term: function :param batch_size: The size of the mini-batch to use, defaults to 16. :type batch_size: int, optional :param max_epochs: The maximum number of epochs to train, defaults to 1000. :type max_epochs: int, optional :param monitor: The monitor to check the status of nerual network during training, defaults to None. :type monitor: `neurodiffeq.pde.Monitor2D`, optional :param return_internal: Whether to return the nets, conditions, training generator, validation generator, optimizer and loss function, defaults to False. :type return_internal: bool, optional :param return_best: Whether to return the nets that achieved the lowest validation loss, defaults to False. :type return_best: bool, optional :return: The solution of the PDE. The history of training loss and validation loss. Optionally, the nets, conditions, training generator, validation generator, optimizer and loss function. The solution is a function that has the signature `solution(xs, ys, as_type)`. :rtype: tuple[`neurodiffeq.pde.Solution`, dict]; or tuple[`neurodiffeq.pde.Solution`, dict, dict] """ ########################################### subroutines ########################################### def train(train_generator, net, nets, pde_system, conditions, criterion, additional_loss_term, shuffle, optimizer): train_examples_x, train_examples_y = train_generator.get_examples() train_examples_x, train_examples_y = train_examples_x.reshape((-1, 1)), train_examples_y.reshape((-1, 1)) n_examples_train = train_generator.size idx = np.random.permutation(n_examples_train) if shuffle else np.arange(n_examples_train) train_loss_epoch = 0.0 batch_start, batch_end = 0, batch_size while batch_start < n_examples_train: if batch_end > n_examples_train: batch_end = n_examples_train batch_idx = idx[batch_start:batch_end] xs, ys = train_examples_x[batch_idx], train_examples_y[batch_idx] train_loss_batch = calculate_loss(xs, ys, net, nets, pde_system, conditions, criterion, additional_loss_term) train_loss_epoch += train_loss_batch.item() * (batch_end - batch_start) / n_examples_train optimizer.zero_grad() train_loss_batch.backward() optimizer.step() batch_start += batch_size batch_end += batch_size return train_loss_epoch def valid(valid_generator, net, nets, pde_system, conditions, criterion, additional_loss_term): valid_examples_x, valid_examples_y = valid_generator.get_examples() xs, ys = valid_examples_x.reshape((-1, 1)), valid_examples_y.reshape((-1, 1)) valid_loss_epoch = calculate_loss(xs, ys, net, nets, pde_system, conditions, criterion, additional_loss_term) valid_loss_epoch = valid_loss_epoch.item() return valid_loss_epoch def calculate_loss(xs, ys, net, nets, pde_system, conditions, criterion, additional_loss_term): us = _trial_solution_2input(net, nets, xs, ys, conditions) Fuxys = pde_system(*us, xs, ys) loss = sum( criterion(Fuxy, torch.zeros_like(xs)) for Fuxy in Fuxys ) if additional_loss_term is not None: loss += additional_loss_term(*us, xs, ys) return loss ################################################################################################### if single_net and nets: raise RuntimeError('Only one of net and nets should be specified') # defaults to use a single neural network if (not single_net) and (not nets): net = FCNN(n_input_units=2, n_output_units=len(conditions), n_hidden_units=32, n_hidden_layers=1, actv=nn.Tanh) if single_net: # mark the Conditions so that we know which condition correspond to which output unit for ith, con in enumerate(conditions): con.set_impose_on(ith) if not train_generator: if (xy_min is None) or (xy_max is None): raise RuntimeError('Please specify xy_min and xy_max when train_generator is not specified') train_generator = ExampleGenerator2D((32, 32), xy_min, xy_max, method='equally-spaced-noisy') if not valid_generator: if (xy_min is None) or (xy_max is None): raise RuntimeError('Please specify xy_min and xy_max when valid_generator is not specified') valid_generator = ExampleGenerator2D((32, 32), xy_min, xy_max, method='equally-spaced') if (not optimizer) and single_net: # using a single net optimizer = optim.Adam(single_net.parameters(), lr=0.001) if (not optimizer) and nets: # using multiple nets all_parameters = [] for net in nets: all_parameters += list(net.parameters()) optimizer = optim.Adam(all_parameters, lr=0.001) if not criterion: criterion = nn.MSELoss() loss_history = {'train': [], 'valid': []} if return_best: valid_loss_epoch_min = np.inf solution_min = None for epoch in range(max_epochs): train_loss_epoch = train(train_generator, single_net, nets, pde_system, conditions, criterion, additional_loss_term, shuffle, optimizer) loss_history['train'].append(train_loss_epoch) valid_loss_epoch = valid(valid_generator, single_net, nets, pde_system, conditions, criterion, additional_loss_term) loss_history['valid'].append(valid_loss_epoch) if monitor and epoch % monitor.check_every == 0: monitor.check(single_net, nets, conditions, loss_history) if return_best and valid_loss_epoch < valid_loss_epoch_min: valid_loss_epoch_min = valid_loss_epoch solution_min = Solution(single_net, nets, conditions) if return_best: solution = solution_min else: solution = Solution(single_net, nets, conditions) if return_internal: internal = { 'single_net': single_net, 'nets': nets, 'conditions': conditions, 'train_generator': train_generator, 'valid_generator': valid_generator, 'optimizer': optimizer, 'criterion': criterion } return solution, loss_history, internal else: return solution, loss_history
f9763819a3df3477df88dea395c45d7a357c25c7
30,627
def model_criterion(preds, labels): """ Function: Model criterion to train the model """ loss = nn.CrossEntropyLoss() return loss(preds, labels)
c4005131b30c2e5bab03d13ec00fcf96657b4fbb
30,628
def get_dbmapping(syn: Synapse, project_id: str) -> dict: """Gets database mapping information Args: syn: Synapse connection project_id: Project id where new data lives Returns: {'synid': database mapping syn id, 'df': database mapping pd.DataFrame} """ project_ent = syn.get(project_id) dbmapping_synid = project_ent.annotations.get("dbMapping", "")[0] database_mappingdf = get_syntabledf( syn, f'select * from {dbmapping_synid}' ) return {'synid': dbmapping_synid, 'df': database_mappingdf}
cee2daf40886a68871b400ae06298eff095a8205
30,629
def end_position(variant_obj): """Calculate end position for a variant.""" alt_bases = len(variant_obj['alternative']) num_bases = max(len(variant_obj['reference']), alt_bases) return variant_obj['position'] + (num_bases - 1)
e49110a1102ea2ca53053858597247799065f8e1
30,630
def cast_to_server(server_params, topic, msg): """ Invoke a remote method that does not return anything """ return _get_impl().cast_to_server(cfg.CONF, server_params, topic, msg)
0fb92932dbe6f23cbc230bd2f23891a514bffd7a
30,631
def get_classification_systems(): """Retrieve all classification systems available in service.""" system = db.session.query(LucClassificationSystem).all() return ClassificationSystemSchema().dump(system, many=True)
03ca32de57f319144c1d185a2f5260ffab269a15
30,632
def read_annotations(filename, tagset, labeled): """ Read tsv data and return sentences and [word, tag, sentenceID, filename] list """ with open(filename, encoding="utf-8") as f: sentence = [] sentence.append(["[CLS]", -100, -1, -1, None]) sentences = [] sentenceID=0 for line in f: if len(line) > 0: if line == '\n': sentenceID+=1 sentence.append(["[SEP]", -100, -1, -1, None]) sentences.append(sentence) sentence = [] sentence.append(["[CLS]", -100, -1, -1, None]) else: data=[] split_line = line.rstrip().split('\t') data.append(split_line[0]) data.append(tagset[split_line[1]] if labeled else 0) data.append(sentenceID) data.append(filename) sentence.append(data) sentence.append(["[SEP]", -100, -1, -1, None]) if len(sentence) > 2: sentences.append(sentence) return sentences
bbb210fe631f1e10432ab6c18146d69933fe7187
30,633
def find_rmse(data_1, data_2, ax=0): """ Finds RMSE between data_1 and data_2 Inputs ------ data_1 (np.array) data_2 (np.array) ax (int) The axis (or axes) to mean over Outpts ------ (int) RMSE between data_1 and data_2 """ return np.sqrt(np.mean((data_1 - data_2)**2, axis=ax))
aed7ee0d6fda234f452056a91eb70495343579ac
30,634
def validate_tag_update(update): """ Property: ResourceUpdateConstraint.TagUpdateOnProvisionedProduct """ valid_tag_update_values = [ "ALLOWED", "NOT_ALLOWED", ] if update not in valid_tag_update_values: raise ValueError("{} is not a valid tag update value".format(update)) return update
c2abd7af00be52cf8cfecb5790d88a04d3207253
30,635
def bollinger_band(df: pd.DataFrame, window: int = 20, window_dev: int = 2) -> pd.DataFrame: """Implementation of bollinger band.""" df_with_signals = df.copy() typical_price = (df["close"] + df["low"] + df["high"]) / 3 df_with_signals["typical_price"] = typical_price std_dev = df_with_signals["typical_price"].rolling(window=window).std(ddof=0) df_with_signals["BOLA"] = df_with_signals["typical_price"].rolling(window=window).mean() df_with_signals["BOLU"] = df_with_signals["BOLA"] + window_dev * std_dev df_with_signals["BOLD"] = df_with_signals["BOLA"] - window_dev * std_dev return df_with_signals
69fb61a09512967c92fc997134cad67e7659774f
30,636
def close_corner_contour(contour: np.ndarray, shape: tuple) -> np.ndarray: """Check if contours are in the corner, and close them if needed. Contours which cover a corner cannot be closed by joining the first and last element, because some of the area is missed. This algorithm adds the corner point to close the contours. Parameters ---------- contour : (n,2) np.ndarray List of coordinates describing a contour. shape : tuple Shape of the source image. Used to check which corners the contour touches. Returns ------- contour : (n+1,2) or (n,2) np.array Return a contour with a corner point added if needed, otherwise return the input contour """ xmin, ymin = contour.min(axis=0) xmax, ymax = contour.max(axis=0) xdim, ydim = np.array(shape) - 1 left = (xmin == 0) right = (xmax == xdim) bottom = (ymin == 0) top = (ymax == ydim) if bottom and left: extra_point = (0, 0) elif top and left: extra_point = (0, ydim) elif top and right: extra_point = (xdim, ydim) elif bottom and right: extra_point = (xdim, 0) else: # all good return contour contour = np.vstack([contour, extra_point]) return contour
62564816c5e00131a5ec59242467cee464d6f5ac
30,637
def simulate_spatial_ratiometric_reading( do, temperature, sealed_patch_do=0, sealed_patch_kwargs={}, unsealed_patch_kwargs={} ): """ Simulate a "spatial ratiometric" reading using a sealed DO patch as the ratiometric reference Args: do: Dissolved Oxygen partial pressure in mmHg in the unsealed patch temperature: Temperature in degrees Celcius sealed_patch_do: Optional (default=0). Dissolved Oxygen partial pressure in mmHg in the sealed patch sealed_patch_kwargs: Optional. Additional args passed to get_optical_reading_normalized for the sealed patch unsealed_patch_kwargs: Optional. Additional args passed to get_optical_reading_normalized for the unsealed patch Returns: A spatial ratiometric result: the ratio between normalized optical readings of the unsealed and sealed patches """ unsealed_patch_reading = get_optical_reading_normalized( do, temperature, **unsealed_patch_kwargs ) sealed_patch_reading = get_optical_reading_normalized( sealed_patch_do, temperature, **sealed_patch_kwargs ) return unsealed_patch_reading / sealed_patch_reading
17bc66583c6d9c8a9c77b6e9e19f3adee2e73617
30,639
from .interactive._iplot_state import iplot_state from ._state_visualization import plot_state as plot from .interactive._iplot_state import iplot_state from ._state_visualization import plot_state as plot def plot_state(rho, method='city', filename=None, options=None, mode=None, show=False): """Plot a quantum state. This function provides several methods to plot a quantum state. There are two rendering backends either done in python using matplotlib or using js in a jupyter notebook using an externally hosted graphing library. To use the js you need to be running in jupyter and have network connectivity to the external server where the js library is hosted. Args: rho (ndarray): statevector or density matrix representation of a quantum state method (str): The plotting method to use. Valid choices are: - 'city': Plots the cityscape, two 3d bargraphs of the mixed state rho) of the quantum state. This is the default. - 'paulivec': Plot the paulivec representation, a bar graph of the mixed state rho over the pauli matrices, of a quantum state - 'qsphere': Plot the qsphere representation of the quantum state - 'bloch': Plot the bloch vector for each qubit in the quantum state - 'wigner': Plot the equal angle slice spin Wigner function of an arbitrary quantum state. filename (str): If using the `mpl` mode save the output visualization as an image file to this path options (dict): An dict with options for visualization in `interactive` mode. The valid fields are: - width (int): graph horizontal size, must be specified with height to have an effect - height (integer): graph vertical size, must be specified with width to have an effect - slider (bool): activate slider (only used for the `paulivec` method) mode (str): The visualization mode to use, either `mpl` or `interactive`. Interactive requires running in jupyter and external network connectivity to work. By default this will use `mpl` unless you are running in jupyter and you have external connectivity. show (bool): If set to true the rendered image will open in a new window (mpl only) Returns: None: If used in interactive mode there is no return matplotlib.Figure: If used in mpl mode the matplotlib.Figure of the histogram will be returned. Raises: VisualizationError: If invalid mode is specified ImportError: If matplotlib is used but it's not installed or configured """ fig = None if not mode: if INTERACTIVE: iplot_state(rho, method=method, options=options) elif HAS_MATPLOTLIB: fig = plot(rho, method=method, filename=filename, show=show) else: raise ImportError(_MSG % "plot_state") else: if mode == 'interactive': iplot_state(rho, method=method, options=options) elif mode == 'mpl': if HAS_MATPLOTLIB: fig = plot(rho, method=method, filename=filename, show=show) else: raise ImportError(_MSG % "plot_state") else: raise VisualizationError( "Invalid mode: %s, valid choices are 'interactive' or 'mpl'") if HAS_MATPLOTLIB: if fig: plt.close(fig) return fig
3266a41986b8c77a966fd5b76fb55e2b330dd05e
30,641
from datetime import datetime def tick_format(ticktime): """ Format the tick date/time """ datetime_object = datetime.strptime(ticktime, '%Y-%m-%dT%H:%M:%S.%fZ') return datetime_object.strftime("%H:%M:%S UTC %A %d %B")
6fa02f7627bc947646046a47ab7298aad68399d8
30,642
def add_finite_filter_to_scorer(score_func): """Takes a scorer and returns a scorer that ignores NA / infinite elements in y_true. sklearn scorers (and others) don't handle arrays with 0 length. In that case, return None :param score_func: function that maps two arrays to a number. E.g. (y_true, y_pred) -> error :return: scorer that drops records where y_true is not finite """ def score_func_finite(y_true, y_pred, **kwargs): y_true, y_pred = valid_elements_for_evaluation(y_true, y_pred) if len(y_true) == 0: # returns None if there are no elements return None return score_func(y_true, y_pred, **kwargs) return score_func_finite
a6ee3874b12213fa2b5ea385a8343c8ba3e1462b
30,643
import gc def lsst_fit(lc, grp): """Take full mock LC and SDSS cadence to find best_fit params. Args: lc: Kali LC object, full mock LC. grp: HDF5 group storing the MCMC chains. """ best_param = [] # store best-fit params ref_ls = [] task = kali.carma.CARMATask(1, 0, nsteps=nsteps, nwalkers=nwalkers, nthreads=1) for cad_idx in range(cad_min, cad_max): # for new maf output cad = maf['cad'][cad_idx] ra = maf['ra'][cad_idx] dec = maf['dec'][cad_idx] # loop over required bands for band in bands: # start fitting task.clear() lc_down = lsstlc(ra, dec, cad[cad['filter'] == band]['expDate'], lc, fix_dt=True, band=band) task.fit(lc_down) # fitted params and chains to array and pass back fit = list(task.bestTau) fit.append(band) fit.append(ra) fit.append(dec) best_param.append(fit) mcmc_rec = np.rec.array([task.LnPosterior, task.Chain[0], task.Chain[1], task.rootChain[0], task.rootChain[1]], dtype=dtype) # create hdf5 dataset given id as combination of ra, dec and band dset = grp.create_dataset('{}_{}_{}'.format(ra, dec, band), dtype=dtype, data=mcmc_rec, shape=()) # create reference to this dataset and store in para_fit dataframe ref_ls.append(dset.ref) df_p = pd.DataFrame(best_param, columns=['tau', 'sigma', 'band', 'ra', 'dec']) df_p['ref2chain'] = ref_ls # flush data into file grp.file.flush() gc.collect() return df_p
44cd48fe3c7d3de50fdab2c007a0f4c947ae3116
30,644
def getStudioModeStatus(): """ Indicates if Studio Mode is currently enabled. """ return __createJSON("GetStudioModeStatus", {})
544ffccc459259b52b395aadb94c0439d824f7b4
30,645
from typing import Tuple def calc_long_short_prec( pred: pd.Series, label: pd.Series, date_col="datetime", quantile: float = 0.2, dropna=False, is_alpha=False ) -> Tuple[pd.Series, pd.Series]: """ calculate the precision for long and short operation :param pred/label: index is **pd.MultiIndex**, index name is **[datetime, instruments]**; columns names is **[score]**. .. code-block:: python score datetime instrument 2020-12-01 09:30:00 SH600068 0.553634 SH600195 0.550017 SH600276 0.540321 SH600584 0.517297 SH600715 0.544674 label : label date_col : date_col Returns ------- (pd.Series, pd.Series) long precision and short precision in time level """ if is_alpha: label = label - label.mean(level=date_col) if int(1 / quantile) >= len(label.index.get_level_values(1).unique()): raise ValueError("Need more instruments to calculate precision") df = pd.DataFrame({"pred": pred, "label": label}) if dropna: df.dropna(inplace=True) group = df.groupby(level=date_col) N = lambda x: int(len(x) * quantile) # find the top/low quantile of prediction and treat them as long and short target long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label).reset_index(level=0, drop=True) short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label).reset_index(level=0, drop=True) groupll = long.groupby(date_col) l_dom = groupll.apply(lambda x: x > 0) l_c = groupll.count() groups = short.groupby(date_col) s_dom = groups.apply(lambda x: x < 0) s_c = groups.count() return (l_dom.groupby(date_col).sum() / l_c), (s_dom.groupby(date_col).sum() / s_c)
e74c6666922786522f55190d8f4d9125bb86c94d
30,646
def prepare_tuple_argument(arg, n, arg_name, validate_args=False): """Helper which processes `Tensor`s to tuples in standard form.""" arg_size = ps.size(arg) arg_size_ = tf.get_static_value(arg_size) assertions = [] if arg_size_ is not None: if arg_size_ not in (1, n): raise ValueError('The size of `{}` must be equal to `1` or to the rank ' 'of the convolution (={}). Saw size = {}'.format( arg_name, n, arg_size_)) elif validate_args: assertions.append(assert_util.assert_equal( ps.logical_or(arg_size == 1, arg_size == n), True, message=('The size of `{}` must be equal to `1` or to the rank of the ' 'convolution (={})'.format(arg_name, n)))) with tf.control_dependencies(assertions): arg = ps.broadcast_to(arg, shape=[n]) arg = ps.unstack(arg, num=n) return arg
51f94eb8e4eef0b69df443ca71fdc9def3fd55a1
30,647
import zipfile def isValidLibreOfficeFile(file_path): """ Return true if given file is valid LibreOffice ods file containing manifest.xml, false otherwise. """ try: with zipfile.ZipFile(file_path, 'a') as open_document: open_document.open(DOCUMENT_MANIFEST_PATH) return True except KeyError: return False
3e36bea3c7f3bd72b91cefba94087ea8afc5116e
30,648
def mean_absolute_percentage_error(y_true, y_pred, zeros_strategy='mae'): """ Similar to sklearn https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html with options for behaviour for around zeros :param y_true: :param y_pred: :param zeros_strategy: :return: """ epsilon = np.finfo(np.float64).eps if zeros_strategy == 'epsilon': ape = np.abs(y_pred - y_true) / np.maximum(np.abs(y_true), epsilon) elif zeros_strategy == 'mae': ae = np.abs(y_pred - y_true) ape = ae / np.maximum(np.abs(y_true), epsilon) # When true values are very small, we take MAE small_y_mask = y_true < epsilon ape = np.where(small_y_mask, ae, ape) else: raise ValueError(f'Undefined zeros_strategy {zeros_strategy}') return np.mean(ape)
5720343835378e50399caafeada31685effde5de
30,649
import json def __get_pretty_body__(headers, body): """ Return a pretty printed body using the Content-Type header information :param headers: Headers for the request/response (dict) :param body: Body to pretty print (string) :return: Body pretty printed (string) """ if HEADER_CONTENT_TYPE in headers: if HEADER_REPRESENTATION_XML == headers[HEADER_CONTENT_TYPE]: xml_parsed = parseString(body) pretty_xml_as_string = xml_parsed.toprettyxml() return pretty_xml_as_string else: if HEADER_REPRESENTATION_JSON in headers[HEADER_CONTENT_TYPE]: parsed = json.loads(body) return json.dumps(parsed, sort_keys=True, indent=4) else: return body else: return body
4cb173c8c5d8c924b58b0c39f5595e353e514eee
30,650