content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def compute_lorentz(Phi, omega, sigma): """In a time-harmonic discretization with quantities .. math:: \\begin{align} A &= \\Re(a \\exp(\\text{i} \\omega t)),\\\\ B &= \\Re(b \\exp(\\text{i} \\omega t)), \\end{align} the time-average of :math:`A\\times B` over one period is .. math:: \\overline{A\\times B} = \\frac{1}{2} \\Re(a \\times b^*), see http://www.ece.rutgers.edu/~orfanidi/ewa/ch01.pdf. Since the Lorentz force generated by the current :math:`J` in the magnetic field :math:`B` is .. math:: F_L = J \\times B, its time average is .. math:: \\overline{F_L} = \\frac{1}{2} \\Re(j \\times b^*). With .. math:: J &= \\Re(\\exp(\\text{i} \\omega t) j e_{\\theta}),\\\\ B &= \\Re\\left( \\exp(i \\omega t) \\left( -\\frac{\\text{d}\\phi}{\\text{d}z} e_r + \\frac{1}{r} \\frac{\\text{d}(r\\phi)}{\\text{d}r} e_z \\right) \\right), we have .. math:: \\overline{F_L} &= \\frac{1}{2} \\Re\\left(j \\frac{d\\phi^*}{dz} e_z + \\frac{j}{r} \\frac{d(r\\phi^*)}{dr} e_r\\right)\\\\ &= \\frac{1}{2} \\Re\\left(\\frac{j}{r} \\nabla(r\\phi^*)\\right)\\\\ In the workpiece, we can assume .. math:: j = -\\text{i} \\sigma \\omega \\phi which gives .. math:: \\begin{align*} \\overline{F_L} &= \\frac{\\sigma\\omega}{2r} \\Im\\left( \\phi \\nabla(r \\phi^*) \\right)\\\\ &= \\frac{\\sigma\\omega}{2r} \\left( \\Im(\\phi) \\nabla(r \\Re(\\phi)) -\\Re(\\phi) \\nabla(r \\Im(\\phi)) \\right) \\end{align*} """ mesh = Phi[0].function_space().mesh() r = SpatialCoordinate(mesh)[0] return ( 0.5 * sigma * omega / r * (+Phi[1] * grad(r * Phi[0]) - Phi[0] * grad(r * Phi[1])) )
5b82df614d8245565e3427277ace2e0ba3fd27c5
10,900
def play_db(cursor, query_string, lookup_term): """ Given a query string and a term, retrieve the list of plays associated with that term """ play_list = [] try: cursor.execute(query_string, [lookup_term]) play_res = cursor.fetchall() except DatabaseError as err: LOG.error( "Error retrieving plays for %s: %s", lookup_term, err ) return play_list for row in play_res: play_list.append(row) if not play_list: LOG.info("No plays for %s", lookup_term) return play_list
35ee0f96e122cddf65dbce7b127a8123b703b8f8
10,901
def find_nocc(two_arr, n): """ Given two sorted arrays of the SAME lengths and a number, find the nth smallest number a_n and use two indices to indicate the numbers that are no larger than a_n. n can be real. Take the floor. """ l = len(two_arr[0]) if n >= 2 * l: return l, l if n == 0: return 0, 0 res, n = n % 1, int(n) lo, hi = max(0, n - l - 1), min(l - 1, n - 1) while lo <= hi: mid = int((lo + hi) / 2) # image mid is the right answer if mid + 1 < l and n - mid - 2 >= 0: if two_arr[0][mid + 1] < two_arr[1][n - mid - 2]: lo = mid + 1 continue if n - mid - 1 < l: if two_arr[1][n - mid - 1] < two_arr[0][mid]: hi = mid continue break if n - mid - 1 >= l or mid + 1 < l and two_arr[0][mid + 1] < two_arr[1][n - mid - 1]: return mid + res + 1, n - mid - 1 else: return mid + 1, n - mid - 1 + res
42c8998e24095f03b0d873a0c9ad1f63facab8cb
10,902
import json def get_dict(str_of_dict: str, order_key='', sort_dict=False) -> list: """Function returns the list of dicts: :param str_of_dict: string got form DB (e.g. {"genre_id": 10, "genre_name": "name1"}, {"genre_id": 11, "genre_name": "name12"},...), :param order_key: the key by which dictionaries will be sorted (required if flag 'sort_dict=True'), :param sort_dict: flag for sorting the dictionary (boolean). :return: list of dicts (e.g. [{"genre_id": 10, "genre_name": "name1"}, {"genre_id": 11, "genre_name": "name12"},...])""" result_dict = list() if str_of_dict: result_dict = json.loads('[' + str_of_dict + ']') if sort_dict and order_key: try: result_dict = sorted(result_dict, key=lambda i: i[order_key]) return result_dict except KeyError: return result_dict return result_dict else: return result_dict
81d20db2dbe929693994b5b94aa971850ef9c838
10,903
import hashlib import struct def get_richpe_hash(pe): """Computes the RichPE hash given a file path or data. If the RichPE hash is unable to be computed, returns None. Otherwise, returns the computed RichPE hash. If both file_path and data are provided, file_path is used by default. Source : https://github.com/RichHeaderResearch/RichPE """ if pe.RICH_HEADER is None: return None # Get list of @Comp.IDs and counts from Rich header # Elements in rich_fields at even indices are @Comp.IDs # Elements in rich_fields at odd indices are counts rich_fields = pe.RICH_HEADER.values if len(rich_fields) % 2 != 0: return None # The RichPE hash of a file is computed by computing the md5 of specific # metadata within the Rich header and the PE header md5 = hashlib.md5() # Update hash using @Comp.IDs and masked counts from Rich header while len(rich_fields): compid = rich_fields.pop(0) count = rich_fields.pop(0) mask = 2 ** (count.bit_length() // 2 + 1) - 1 count |= mask md5.update(struct.pack("<L", compid)) md5.update(struct.pack("<L", count)) # Update hash using metadata from the PE header md5.update(struct.pack("<L", pe.FILE_HEADER.Machine)) md5.update(struct.pack("<L", pe.FILE_HEADER.Characteristics)) md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.Subsystem)) md5.update(struct.pack("<B", pe.OPTIONAL_HEADER.MajorLinkerVersion)) md5.update(struct.pack("<B", pe.OPTIONAL_HEADER.MinorLinkerVersion)) md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorOperatingSystemVersion)) md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorOperatingSystemVersion)) md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorImageVersion)) md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorImageVersion)) md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorSubsystemVersion)) md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorSubsystemVersion)) return md5.hexdigest()
30e5437f36f76a6225eaba579d55218440ab46b9
10,904
def get_input(label, default=None): """Prompt the user for input. :param label: The label of the prompt. :param label: str :param default: The default value. :rtype: str | None """ if default: _label = "%s [%s]: " % (label, default) else: _label = "%s: " % label print("") value = input(_label) if not value: return default return value
11de813f0fcfd16f1198299030656c07392f95c9
10,905
import logging def get_pretrain_data_text(data, batch_size, num_ctxes, shuffle, num_buckets, vocab, tokenizer, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, whole_word_mask, num_parts=1, part_idx=0, num_workers=1): """Get a data iterator from raw text documents. Parameters ---------- batch_size : int The batch size per GPU. num_ctxes : int The number of GPUs. shuffle : bool Whether to shuffle the data. num_buckets : int The number of buckets for the FixedBucketSampler for training. vocab : BERTVocab The vocabulary. tokenizer : BERTTokenizer or BERTSPTokenizer The tokenizer. max_seq_length : int The hard limit of maximum sequence length of sentence pairs. short_seq_prob : float The probability of sampling sequences shorter than the max_seq_length. masked_lm_prob : float The probability of replacing texts with masks/random words/original words. max_predictions_per_seq : int The hard limit of the number of predictions for masked words whole_word_mask : bool Whether to use whole word masking. num_parts : int The number of partitions for the dataset. part_idx : int The index of the partition to read. num_workers : int The number of worker processes for dataset contruction. """ num_files = len(nlp.utils.glob(data)) logging.info('%d files are found.', num_files) assert num_files >= num_parts, \ 'The number of training text files must be no less than the number of ' \ 'workers/partitions (%d). Only %d files at %s are found.'%(num_parts, num_files, data) dataset_params = {'tokenizer': tokenizer, 'max_seq_length': max_seq_length, 'short_seq_prob': short_seq_prob, 'masked_lm_prob': masked_lm_prob, 'max_predictions_per_seq': max_predictions_per_seq, 'vocab':vocab, 'whole_word_mask': whole_word_mask} dataset_fn = SimpleDatasetFn(BERTPretrainDataset, dataset_params) sampler_fn = BERTSamplerFn(batch_size, shuffle, num_ctxes, num_buckets) dataloader_fn = BERTDataLoaderFn(num_ctxes, vocab) split_sampler = nlp.data.SplitSampler(num_files, num_parts=num_parts, part_index=part_idx) dataloader = DatasetLoader(data, split_sampler, dataset_fn, sampler_fn, dataloader_fn, num_dataset_workers=num_workers) return dataloader
986ba7afc87f8ce5b054816de365e1c2793f6876
10,906
def define_app_flags(scenario_num): """ Define the TensorFlow application-wide flags Returns: FLAGS: TensorFlow flags """ FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_boolean('save_model', False, 'save model to disk') tf.app.flags.DEFINE_string('summaries_dir', './logs', 'tensorboard summaries') tf.app.flags.DEFINE_string('ckpt_dir', './saved_models/', 'check point dir') tf.app.flags.DEFINE_string('scenario_num', scenario_num, 'Scenario number') tf.app.flags.DEFINE_string('errors_dir', './errors/', 'Errors dir') return FLAGS
de79e076db37f7981633b3b2b38db6b462155709
10,907
def longitude_validator(value): """Perform longitude validation. """ valid = -180 < value < 180 if not valid: raise ValidationError(_('longitude not in range of -90 < value < 90')) return value
866c45da71d1b4d6b2d5bd60e331caecb365f297
10,908
import os import json def getVariables(): """ Retrieves the variables.json file. """ if os.path.exists('variables.json'): with open('variables.json') as jsonFile: variables = json.loads(jsonFile.read()) return variables else: variables = {} variables['path'] = '' return variables
ba0c37c14e92caa9bb83bb078d864541cbeec4ac
10,909
from typing import List import random def stop_random_tasks( cluster: str, task_count: int = None, task_percent: int = None, service: str = None, reason: str = "Chaos Testing", configuration: Configuration = None, secrets: Secrets = None, ) -> List[AWSResponse]: """ Stop a random number of tasks based on given task_count or task_percent You can specify a cluster by its ARN identifier or, if not provided, the default cluster will be picked up. :param cluster: The ECS cluster Name :param task_count: The number of tasks to stop :param task_percent: The percentage of total tasks to stop :param service: The ECS service name :param reason: An explanation of why the service was stopped :param configuration: access values used by actions/probes :param secrets: values that need to be passed on to actions/probes :return: List[Dict[str, Any]] """ if not any([task_count, task_percent]) or all([task_count, task_percent]): raise FailedActivity('Must specify one of "task_count", "task_percent"') client = aws_client("ecs", configuration, secrets) validate(client, cluster, service) tasks = list_running_tasks_in_cluster( cluster=cluster, client=client, service=service ) if task_percent: task_count = int(float(len(tasks) * float(task_percent)) / 100) if len(tasks) < task_count: raise FailedActivity( "Not enough running tasks in {} to satisfy " "stop count {} ({})".format(cluster, task_count, len(tasks)) ) tasks = random.sample(tasks, task_count) results = [] for task in tasks: logger.debug(f"Stopping ECS task: {task}") response = client.stop_task(cluster=cluster, task=task, reason=reason) results.append( { "Task_Id": response["task"]["taskArn"], "Desired_Status": response["task"]["desiredStatus"], } ) return results
8e56f5aee8254deb7d11e6f48477491a8e572a99
10,910
def test_create_batch_multi_record_update_fails(shared_zone_test_context): """ Test recordsets with multiple records cannot be edited in batch (relies on config, skip-prod) """ client = shared_zone_test_context.ok_vinyldns_client ok_zone = shared_zone_test_context.ok_zone # record sets to setup a_update_name = generate_record_name() a_update_fqdn = a_update_name + ".ok." a_update = get_recordset_json(ok_zone, a_update_name, "A", [{"address": "1.1.1.1"}, {"address": "1.1.1.2"}], 200) txt_update_name = generate_record_name() txt_update_fqdn = txt_update_name + ".ok." txt_update = get_recordset_json(ok_zone, txt_update_name, "TXT", [{"text": "hello"}, {"text": "again"}], 200) a_delete_name = generate_record_name() a_delete_fqdn = a_delete_name + ".ok." a_delete = get_recordset_json(ok_zone, a_delete_name, "A", [{"address": "1.1.1.1"}, {"address": "1.1.1.2"}], 200) txt_delete_name = generate_record_name() txt_delete_fqdn = txt_delete_name + ".ok." txt_delete = get_recordset_json(ok_zone, txt_delete_name, "TXT", [{"text": "hello"}, {"text": "again"}], 200) batch_change_input = { "comments": "this is optional", "changes": [ get_change_A_AAAA_json(a_update_fqdn, change_type="DeleteRecordSet"), get_change_A_AAAA_json(a_update_fqdn, address="1.2.3.4"), get_change_A_AAAA_json(a_update_fqdn, address="4.5.6.7"), get_change_TXT_json(txt_update_fqdn, change_type="DeleteRecordSet"), get_change_TXT_json(txt_update_fqdn, text="some-multi-text"), get_change_TXT_json(txt_update_fqdn, text="more-multi-text"), get_change_A_AAAA_json(a_delete_fqdn, change_type="DeleteRecordSet"), get_change_TXT_json(txt_delete_fqdn, change_type="DeleteRecordSet"), # adding an HVD so this will fail if accidentally run against wrong config get_change_A_AAAA_json("high-value-domain") ] } to_delete = [] try: for rs in [a_update, txt_update, a_delete, txt_delete]: create_rs = client.create_recordset(rs, status=202) to_delete.append(client.wait_until_recordset_change_status(create_rs, 'Complete')) response = client.create_batch_change(batch_change_input, status=400) def existing_err(name, type): return 'RecordSet with name {} and type {} cannot be updated in a single '.format(name, type) + \ 'Batch Change because it contains multiple DNS records (2).' def new_err(name, type): return 'Multi-record recordsets are not enabled for this instance of VinylDNS. ' \ 'Cannot create a new record set with multiple records for inputName {} and type {}.'.format(name, type) assert_error(response[0], error_messages=[existing_err(a_update_fqdn, "A")]) assert_error(response[1], error_messages=[existing_err(a_update_fqdn, "A"), new_err(a_update_fqdn, "A")]) assert_error(response[2], error_messages=[existing_err(a_update_fqdn, "A"), new_err(a_update_fqdn, "A")]) assert_error(response[3], error_messages=[existing_err(txt_update_fqdn, "TXT")]) assert_error(response[4], error_messages=[existing_err(txt_update_fqdn, "TXT"), new_err(txt_update_fqdn, "TXT")]) assert_error(response[5], error_messages=[existing_err(txt_update_fqdn, "TXT"), new_err(txt_update_fqdn, "TXT")]) assert_error(response[6], error_messages=[existing_err(a_delete_fqdn, "A")]) assert_error(response[7], error_messages=[existing_err(txt_delete_fqdn, "TXT")]) finally: clear_recordset_list(to_delete, client)
b2fe0cea07af57996058cdb0f9a31cbbf11a88ce
10,911
from typing import OrderedDict def _build_colormap(data, hue, palette, order): """Builds a colormap.""" if hue is None: color_map = {} else: if palette is None: palette = sns.color_palette() if order is None: order = data[hue].unique() color_map = OrderedDict(zip(order, palette)) return color_map
82294634a1295fc68e5d3afb05fa00d83dfdc6ea
10,912
def f_is_oword(*args): """ f_is_oword(F, arg2) -> bool See 'is_oword()' @param F (C++: flags_t) """ return _ida_bytes.f_is_oword(*args)
a6d75a65b527ebdd029a5d3e65a756bcbb86561a
10,913
def aggregate_CSV_files(data_path): """ Aggregate the data in CSV files, specified in the config file, into a single pandas DataFrame object. """ merge_queue = [] for path in data_path: data_df = pd.read_csv(path, na_values = ['.']); data_df.index = pd.to_datetime(data_df['DATE'], format='%Y-%m-%d') data_df = data_df[data_df.index > c.START_DATE] del data_df['DATE'] merge_queue.append(data_df) aggregate_df = pd.concat(merge_queue, sort = True, axis = 1) aggregate_df.sort_index(inplace = True) return aggregate_df
281ca2a5e84e2dfbb2c2269083d0d2be5654fb75
10,914
def dR2(angle: np_float) -> np.ndarray: """Derivative of a rotation matrix around the second axis with respect to the rotation angle Args: angle: Scalar, list or numpy array of angles in radians. Returns: Numpy array: Rotation matrix or array of rotation matrices. """ zero = _zero(angle) cosA, sinA = np.cos(angle), np.sin(angle) return _roll_axes(np.array([[-sinA, zero, -cosA], [zero, zero, zero], [cosA, zero, -sinA]]))
5080c78c46505ed9e155fb76ae4a9be3b6e5d685
10,915
import os def build_symm_filter_commands(chainfiles, chromref, outpath, cmd, jobcall): """ :return: """ chromfiles = collect_full_paths(chromref, '*.tsv') assert chromfiles, 'No chromosome files found at location: {}'.format(chromref) assm_chrom = dict() for chrf in chromfiles: assm = os.path.basename(chrf).split('_')[0] sizes = read_chromsizes(chrf) assm_chrom[assm] = list(sizes.keys()) params = [] for chf in chainfiles: fn = os.path.basename(chf) target, query = fn.split('.', 1)[0].split('_to_') chroms = assm_chrom[query] for c in chroms: outname = '{}_to_{}.{}.symmap.tsv.gz'.format(target, query, c) outfull = os.path.join(outpath, outname) tmp = cmd.format(**{'chrom': c}) params.append([chf, outfull, tmp, jobcall]) if len(chainfiles) > 0: assert params, 'No parameters created for chain symmetry filtering' return params
b76b978d805802dc46666ad69efe5e7c89bea6b6
10,916
def clear_predecessor(n): """ Sets n's predecessor to None :param n: node on which to call clear_predecessor :return: string of response """ def clear(node): node.predecessor = None n.event_queue.put(clear) resp_header = {"status": STATUS_OK} return utils.create_request(resp_header, {})
e5c071572799c8df6b629d0bb1cbde4d106a4e95
10,917
import os def resource_file(): """ Create an empty resource file :return: """ def _resource_file(dirname, filename): filepath = os.path.join(dirname, filename) open(filepath, 'a').close() return filepath return _resource_file
044b8561bed7660e74b02f359bceb55604431cb6
10,918
def get_local_variable_influence(model, form_data): """ """ row = format_data_to_row(form_data) model_obj = read_model(model.path, model.file_type) df = load_dataset_sample(model.dataset, nrows=50) df = df[model.dataset.model_columns] explainer = load_model_explainer_from_obj(model_obj, df) prediction = list() prediction.append(model_obj.predict(row)[0]) if hasattr(model_obj, 'predict_proba'): prediction.append(model_obj.predict_proba(row)[0]) base_value = explainer.explainer.expected_value variable_influence = compute_local_influence(explainer, row) return variable_influence, prediction, base_value
403c2e89937a7b8bfbeb1ce44d49147fe9c35ddc
10,919
def submit_experiment(body, **kwargs): """Submit an experiment :param body: experiment payload :type body: dict | bytes :rtype: StatusSerializer """ serializer = ExperimentSerializer.from_dict(body) check_experiment_permission(serializer, kwargs["token_info"]) stub = get_experiments_services_stub() response = stub.Submit(job_pb2.Experiment(**body)) if response.status != 200: return ErrorSerializer(status=response.status, title="Api Error", detail=response.message), response.status return StatusSerializer.from_dict(util.deserialize_protobuf(response))
21b91876f1d9ffa4b55c296a2e1dc9a2c66e1026
10,920
def obj_assert_check(cls): """ The body of the assert check for an accessor We allow all versions of add/delete/modify to use the same accessors """ if cls in ["of_flow_modify", "of_flow_modify_strict", "of_flow_delete", "of_flow_delete_strict", "of_flow_add"]: return "IS_FLOW_MOD_SUBTYPE(obj->object_id)" else: return "obj->object_id == %s" % cls.upper()
4ebddebdd87c0bdb28e7687ec2b0da623507f89e
10,921
from typing import List import hashlib def ripemd160(data: List[int]) -> List[int]: """ :param data: :return: """ try: bytes_data = bytes(data) except TypeError: raise NativeContractException digest = hashlib.new("ripemd160", bytes_data).digest() padded = 12 * [0] + list(digest) return list(bytearray(bytes(padded)))
bfa29479b6d2633c0075462f558f21562fc96a04
10,922
def has_duplicates(s:list) -> dict: """Returns True if any element appears more than once in a sequence.""" d = dict() for char in s: if char in d: return True d[char] = 1 return False
f702e53cded0c18a0e1b7cffb58bccbff3386bce
10,923
def get_from_chain(J, domain, nof_coefficients, ncap=10000, disc_type='sp_quad', interval_type='lin', mapping_type='lan_bath', permute=None, residual=True, low_memory=True, stable=False, get_trafo=False, force_sp=False, mp_dps=30, sort_by=None, **kwargs): """ Returns star coefficients, constructed from chain coefficients via diagonalization see chain.get and convert_chain_to_star for an explanation of the arguments. Sort_by sorts the couplings and energies (if passed and not None), see utils.sorting.sort_star_coefficients for details on the parameters. :returns: gamma (couplings), xi (energies), info dict from both the conversion and the chain mapping if get_trafo is set True, the dict only contains the latest transformation (from chain to star here) """ c0, omega, t, info = get_chain(J, domain, nof_coefficients, ncap=ncap, disc_type=disc_type, interval_type=interval_type, mapping_type=mapping_type, permute=permute, residual=residual, low_memory=low_memory, stable=stable, get_trafo=False, **kwargs) gamma, xi, trafo_info = convert_chain_to_star(c0, omega, t, force_sp=force_sp, mp_dps=mp_dps, get_trafo=get_trafo) gamma, xi = sort_star_coefficients(gamma, xi, sort_by) return gamma, xi, info.update(trafo_info)
d5cd09a088d4946015eb9556b0fed3ca5be55187
10,924
from typing import Type def factory(kernel_type, cuda_type=None, gpu_mode=None, *args, **kwargs): """Return an instance of a kernel corresponding to the requested kernel_type""" if cuda_type is None: cuda_type = default.dtype if gpu_mode is None: gpu_mode = default.gpu_mode # turn enum string to enum object if isinstance(kernel_type, str): try: for c in [' ', '-']: # chars to be replaced for normalization kernel_type = kernel_type.replace(c, '_') kernel_type = Type[kernel_type.upper()] except: raise TypeError('kernel_type ' + kernel_type + ' could not be found') if not isinstance(kernel_type, Type): raise TypeError('kernel_type must be an instance of KernelType Enum') if kernel_type in [Type.UNDEFINED, Type.NO_KERNEL]: return None res = None hash = AbstractKernel.hash(kernel_type, cuda_type, gpu_mode, *args, **kwargs) if hash not in instance_map: res = kernel_type.value(gpu_mode=gpu_mode, cuda_type=cuda_type, *args, **kwargs) # instantiate instance_map[hash] = res else: res = instance_map[hash] assert res is not None return res
2d9bf5fb0fd45e367d31b76656dfc611912f7202
10,925
from typing import Dict from typing import Union def init_scaler( scaler_parameters: Dict, fit_data: np.ndarray, ) -> Union[MinMaxScaler, StandardScaler, RobustScaler]: """Initialize and return scaler. Args: scaler_parameters: Parameters of scaler. fit_data: Data to be fit. Returns: Selected scaler. """ scaler_type = scaler_parameters["scaler_type"] if scaler_type == "RobustScaler": scaler = RobustScaler() elif scaler_type == "StandardScaler": scaler = StandardScaler() else: scaler = MinMaxScaler() scaler.fit(fit_data) return scaler
18f15e8e6bebb32ad659636f46ee2e5f54ccc69d
10,926
def get_dynamic_resource(previous_length: str): """Get the job with job_name. Returns: None. """ name_to_node_usage = redis_controller.get_resource_usage( previous_length=int(previous_length) ) return name_to_node_usage
05efd928f66b8237e39bd04df2482d8b24259700
10,927
def _margo_bin(exe=""): """Returns the path of the margo executable. """ return gs.home_path("bin", exe or INSTALL_EXE)
a540357e84411ec84820163966440d75ae142d8b
10,928
def csl_density(basis, mini_cell, plane): """ returns the CSL density of a given plane and its d_spacing. """ plane = np.array(plane) c = csl_vec(basis, mini_cell) h = np.dot(c.T, plane) h = smallest_integer(h)[0] h = common_divisor(h)[0] g = np.linalg.inv(np.dot(c.T, c)) h_norm = np.sqrt(np.dot(h.T, np.dot(g, h))) density = 1/(h_norm * np.linalg.det(c)) return abs(density), 1 / h_norm
852ba976f1bfc9b1fa30ba660f8b660e023bed94
10,929
def mw_Av(): """Build the A_V attenuation by the MW towards M31.""" curve = SF11ExtinctionCurve() ratio = curve['Landolt V'] # A_V / E(B-V) from T6 of SF2011 return ratio * 0.07
ff53a5c302945ab6020a3734950bc8449c522faa
10,930
import os def load_model(model_uri): """ Load an H2O model from a local file (if ``run_id`` is ``None``) or a run. This function expects there is an H2O instance initialised with ``h2o.init``. :param model_uri: The location, in URI format, of the MLflow model. For example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs:/<mlflow_run_id>/run-relative/path/to/model`` - ``models:/<model_name>/<model_version>`` - ``models:/<model_name>/<stage>`` For more information about supported URI schemes, see `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html# artifact-locations>`_. :return: An `H2OEstimator model object <http://docs.h2o.ai/h2o/latest-stable/h2o-py/docs/intro.html#models>`_. """ local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) # Flavor configurations for models saved in MLflow version <= 0.8.0 may not contain a # `data` key; in this case, we assume the model artifact path to be `model.h2o` h2o_model_file_path = os.path.join(local_model_path, flavor_conf.get("data", "model.h2o")) return _load_model(path=h2o_model_file_path)
1363bd19fa3744de84a0b0f6b451ad8eba742808
10,931
def load_data(filenames): """Load a single file or sequence of files using skimage.io""" filenames = [filenames, ] if isinstance(filenames, str) else filenames loadfunc = tifffile.imread if all(f.lower().endswith("tif") for f in filenames) else skio.imread if len(filenames) > 1: return np.array([loadfunc(f) for f in filenames], dtype=float) elif len(filenames) == 1: return loadfunc(filenames[0]).astype(float) else: raise Exception("load_data received an empty list")
be9c451c5aa3469a2bcaceb2fb6ab8ab09195794
10,932
def GetInverseMatrix(matrix): """ :param matrix: the matrix which will get its inverse matrix :return: the inverse matrix(two dimensions only) """ matrix[0, 0], matrix[1, 1] = -matrix[1, 1], -matrix[0, 0] matrix = matrix / -(matrix[0, 0] * matrix[1, 1] - matrix[0, 1] * matrix[1, 0]) return matrix
c4fdba364cc6b73a3b72a40f980a0fa402a1968f
10,933
import re def petsc_memory_stats(log): """Return the memory stats section of PETSc's -log_view output as a dictionary.""" # first search for the 'Memory usage' header, then match anything that follows # after the first line starting with --- up until the first line starting with ===== # re.DOTALL makes . match newlines as well try: memory_profile = re.finditer('Memory usage is given in bytes:.*?\n---[^\n].*?\n(.*?)\n===========', log, re.DOTALL).next().group(1) except StopIteration: # no memory stats section found (did you run with -log_view ?) return None stats = {} for line in memory_profile.split('\n'): try: (object, profile) = re.finditer('(\s.*?)([0-9]+.*)', line).next().groups() except StopIteration: continue profile = profile.split() stats[object.strip()] = [int(x) for x in profile[0:3]] + [float(profile[3]),] return stats
c7756190ae2a4c25f5cf7a16764ace06da95b0f6
10,934
import torch def track2result(bboxes, labels, ids, num_classes): """Convert tracking results to a list of numpy arrays. Args: bboxes (torch.Tensor | np.ndarray): shape (n, 5) labels (torch.Tensor | np.ndarray): shape (n, ) ids (torch.Tensor | np.ndarray): shape (n, ) num_classes (int): class number, including background class Returns: list(ndarray): tracking results of each class. """ valid_inds = ids > -1 bboxes = bboxes[valid_inds] labels = labels[valid_inds] ids = ids[valid_inds] if bboxes.shape[0] == 0: return [np.zeros((0, 6), dtype=np.float32) for i in range(num_classes)] else: if isinstance(bboxes, torch.Tensor): bboxes = bboxes.cpu().numpy() labels = labels.cpu().numpy() ids = ids.cpu().numpy() return [ np.concatenate((ids[labels == i, None], bboxes[labels == i, :]), axis=1) for i in range(num_classes) ]
ae2dda3abd32d8b6c3dd0fc0c8c5f65268a8e747
10,935
def build_result_dataframe(gh, pred, df): """ Construct a datarame that contain the prediction. :param gh: the geohas6 code of the prediction :param pred: numpy array of prediction :param df: the dataframe used for prediction :returns: prediction dataframe :rtype: pandas.core.frame.DataFrame """ # generate a sequence of timestamp start_time = df.timestamp.values.max() + np.timedelta64(15, 'm') timestamps = pd.date_range(start_time, periods=len(pred), freq='15T') # calulate 'day' colum of the dataframe dtdelta = (timestamps.date - df.timestamp.max().date()) dtdelta = list(map(lambda x: x.days, dtdelta)) days = dtdelta + df.day.max() # calulate time of day tod = list(map(lambda x: x.strftime('%H:%M'), timestamps.time)) # construct the result dictionary res = {'geohash6': [gh] * len(pred), 'day': days, 'timestamp': tod, 'demand': pred } return pd.DataFrame(res)
0b1523aa42c7a31aa286522ee81ec93690dfbf0c
10,936
from typing import Sequence from pathlib import Path import sys from contextlib import suppress def find_module(module_name: str, search_paths: Sequence[str | Path] | None = None) -> Path: # noqa: WPS231 """Find a module in a given list of paths or in `sys.path`. Parameters: module_name: The module name. search_paths: The paths to search into. Raises: ModuleNotFoundError: When the module cannot be found. Returns: The module file path. """ # optimization: pre-compute Paths to relieve CPU when joining paths search = [path if isinstance(path, Path) else Path(path) for path in search_paths or sys.path] parts = module_name.split(".") # always search a .pth file first using the first part for path in search: top_pth = Path(f"{parts[0]}.pth") abs_top_pth = path / top_pth if abs_top_pth.exists(): with suppress(UnhandledPthFileError): location = _handle_pth_file(abs_top_pth) if location.suffix: location = location.parent search = [location.parent] # TODO: possible optimization # always break if exists? break # resume regular search filepaths = [ # TODO: handle .py[cod] and .so files? Path(*parts, "__init__.py"), Path(*parts[:-1], f"{parts[-1]}.py"), Path(*parts[:-1], f"{parts[-1]}.pth"), Path(*parts), # namespace packages, try last ] for path in search: # noqa: WPS440 for choice in filepaths: abs_path = path / choice # optimization: just check if the file exists, # not if it's an actual file if abs_path.exists(): if abs_path.name.endswith(".pth"): try: return _handle_pth_file(abs_path) except UnhandledPthFileError as error: raise ModuleNotFoundError(module_name) from error return abs_path raise ModuleNotFoundError(module_name)
4d066fa6e528b1d3ae36d789b8cc9df7a38bf5f0
10,937
def day_1_puzzle_1_solution() -> int: """Use this function to return the total fuel requirements for all of the modules. This function is used for reading the text file of puzzle data and returning the total amount of fuel that is required for the modules. :return: the total fuel requirement. """ return sum([calculate_fuel(int(mass)) for mass in get_puzzle_input()])
625497ea7e7619b1e84abc9eb8dfdfd1076af392
10,938
def is_description_style(style): """ True if this is a style used for Relationships paragraph text """ return is_style(style, 'Normal') or is_style(style, 'Note')
0e96d9977f7d18e8253a87e3af59f31e8326f4ae
10,939
def inject_content_head_last(html, content): """ 将文本内容插入到head的尾部 :type html: str :type content: str :rtype: str """ head_end_pos = html.find("</head") # 找到 </head> 标签结束的位置 if head_end_pos == -1: # 如果没有 </head> 就不进行插入 return html return html[:head_end_pos] + content + html[head_end_pos:]
61792831f859a966e8cfa01ca56a6b9be10ede4d
10,940
from typing import Union def download(ticker: str, start: Union[pd.Timestamp, str] = None, end: Union[pd.Timestamp, str] = None, frequency: str = "day") -> pd.DataFrame: """ Download market data from yahoo finance using the yfinance library from ticker `ticker` from `start` to `end` at a specific frequency (day, hour or minute). :param str ticker: Ticker, e.g. "AAPL" or "GOOG". :param pd.Timestamp,str start: Starting date for fetching the data as a pd.Timestamp or a "YYYY-MM-DD HH:MM:SS" str. If None, the oldest possible date is used by yfinance. `start` is **always** truncated to max 730 days from today for `frequency="1h"`and to max 30 days for `frequency="1m"`. Default is None. :param pd.Timestamp,str end: End date for fetching the data as a pd.Timestamp or a "YYYY-MM-DD HH:MM:SS" str. If None, today is used ( `pd.Timestamp.today().floor("D")` ). Default is None. :param str frequency: Frequency at which the data is sampled, can be daily ("day", "daily", "d", "1d"), hourly ("hour", "hourly", "h", "1h") or every minute ("minute", "m", "1m"). Default is "day". :return: market data as a pd.DataFrame with columns "Open", "High", "Low", "Close", "Adj Close", "Volume". """ today = pd.Timestamp.today().floor('D') if end is None: end = today elif isinstance(end, str): end = pd.Timestamp(end) day_set = {"day", "daily", "d", "1d"} hour_set = {"hour", "hourly", "h", "1h"} minute_set = {"minute", "m", "1m"} if frequency.lower() in day_set: df = yf.download(ticker, start=start, end=end, interval="1d") elif frequency.lower() in hour_set.union(minute_set): if frequency.lower in hour_set: frequency = "1h" # Range is limited to 730 days max (including today so 729) limit = pd.Timedelta(days=729) # Dummy limit for the download batchlimit = pd.Timedelta(days=1000) else: frequency = "1m" # Range is limited to 30 days max (including today) limit = pd.Timedelta(days=29) # Limit of 7 days for the download of minute data batchlimit = pd.Timedelta(days=7) # Check the start point if start is None: start = today - limit start = max(end - limit, today - limit) # Download by batches (effective only for minute data) local_start = start local_end = min(local_start + batchlimit, end) df = yf.download(ticker, start=local_start, end=local_end, interval=frequency) while local_end < end: local_start = local_end local_end = min(local_start + batchlimit, end) df = pd.concat((df, yf.download(ticker, start=local_start, end=local_end, interval=frequency))) else: raise ValueError(f"Wrong `frequency` argument ({frequency}). " f"Should be in {day_set}, {hour_set} or {minute_set}.") if df is None: raise EmptyDataError elif not isinstance(df, pd.DataFrame): raise EmptyDataError else: if len(df) == 0: raise EmptyDataError if df.columns.nlevels == 2: df = df.swaplevel(axis=1) df.sort_index(axis=1, inplace=True) return df
733d5ac8244ca6fdbcb73a11585067c90dd7210b
10,941
import types import os import re def sortUrlList(urlList): """Return ordered url list (localFile, DAP, HTTP, FTP).""" #localList = [url for url in urlList if os.path.exists(url)] #dodsList = [url for url in urlList if sciflo.utils.isDODS(url)] #httpList = [url for url in urlList if not sciflo.utils.isDODS(url) and url.startswith('http')] #ftpList = [url for url in urlList if url.startswith('ftp')] #localList.extend(dodsList); localList.extend(httpList); localList.extend(ftpList) fileUrlList = [] localList = [] dodsList = [] httpList = [] ftpList = [] allList = [] for url in urlList: if isinstance(url, types.StringTypes) and '.xfr' in url: continue if os.path.exists(url): localList.insert(0,url) elif url.startswith('file://'): fileUrlList.insert(0, url) elif url.startswith('http') and re.search(r'(dods|opendap)',url,re.IGNORECASE): dodsList.insert(0,url) elif url.startswith('http'): if '.ecs.nasa.gov' in url: httpList.insert(0,url) else: httpList.append(url) else: ftpList.append(url) localList.sort(); localList.reverse() #allList.extend(dodsList); allList.extend(ftpList); allList.extend(httpList) #allList.extend(localList); allList.extend(fileUrlList) allList.extend(ftpList); allList.extend(httpList); allList.extend(dodsList) allList.extend(localList); allList.extend(fileUrlList) return allList
839a4e451c2052eaf3b2c5df6499399cbd693f4b
10,942
import sys import os from datetime import datetime def main(args): """ Main method """ # await/async requires python >= 3.5 if sys.version_info.major < 3 and sys.version_info.minor < 5: print("Error, language features require the latest python version.") print("Please install python 3.8 or greater") return 1 # Force tieried compilation off. It will effect both collection and replay os.environ["COMPlus_TieredCompilation"] = "0" coreclr_args = setup_args(args) success = True if coreclr_args.mode == "collect": # Start a new SuperPMI Collection. begin_time = datetime.datetime.now() print("SuperPMI Collect") print("------------------------------------------------------------") print("Start time: {}".format(begin_time.strftime("%H:%M:%S"))) collection = SuperPMICollect(coreclr_args) success = collection.collect() print("Finished SuperPMI collect") if coreclr_args.output_mch_path != None: print("mch path: {}".format(coreclr_args.output_mch_path)) end_time = datetime.datetime.now() print("Finish time: {}".format(end_time.strftime("%H:%M:%S"))) elif coreclr_args.mode == "replay": # Start a new SuperPMI Replay begin_time = datetime.datetime.now() print("SuperPMI Replay") print("------------------------------------------------------------") print("Start time: {}".format(begin_time.strftime("%H:%M:%S"))) mch_file = coreclr_args.mch_file jit_path = coreclr_args.jit_path print("") print("MCH Path: {}".format(mch_file)) print("JIT Path: {}".format(jit_path)) replay = SuperPMIReplay(coreclr_args, mch_file, jit_path) success = replay.replay() print("Finished SuperPMI replay") end_time = datetime.datetime.now() print("Finish time: {}".format(end_time.strftime("%H:%M:%S"))) elif coreclr_args.mode == "asmdiffs": # Start a new SuperPMI Replay with AsmDiffs begin_time = datetime.datetime.now() print("SuperPMI ASM diffs") print("------------------------------------------------------------") print("Start time: {}".format(begin_time.strftime("%H:%M:%S"))) mch_file = coreclr_args.mch_file base_jit_path = coreclr_args.base_jit_path diff_jit_path = coreclr_args.diff_jit_path print("") print("MCH Path: {}".format(mch_file)) print("Base JIT Path: {}".format(base_jit_path)) print("Diff JIT Path: {}".format(diff_jit_path)) asm_diffs = SuperPMIReplayAsmDiffs(coreclr_args, mch_file, base_jit_path, diff_jit_path) success = asm_diffs.replay_with_asm_diffs(coreclr_args.previous_temp_location) print("Finished SuperPMI replay") end_time = datetime.datetime.now() print("Finish time: {}".format(end_time.strftime("%H:%M:%S"))) elif coreclr_args.mode == "upload": begin_time = datetime.datetime.now() print("SuperPMI upload") print("------------------------------------------------------------") print("Start time: {}".format(begin_time.strftime("%H:%M:%S"))) upload_mch(coreclr_args) print("Finished SuperPMI upload") end_time = datetime.datetime.now() print("Finish time: {}".format(end_time.strftime("%H:%M:%S"))) elif coreclr_args.mode == "list-collections": index = download_index(coreclr_args) index_count = len(index) print("SuperPMI list-collections") print("") print("{} different collections".format(index_count)) print("") for item in index: print(item) print("") else: raise NotImplementedError(coreclr_args.mode) return 0 if success else 1
3dc81fdb1e69ae1eadf5ca0be3a577d6cc37b866
10,943
def _ps_run_one_reset_kwargs(G, reset_kwargs: tuple, eval: bool): """ Sample one rollout with given init state and domain parameters, passed as a tuple for simplicity at the other end. This function is used when a minimum number of rollouts was given. """ if len(reset_kwargs) != 2: raise pyrado.ShapeErr(given=reset_kwargs, expected_match=(2,)) if not isinstance(reset_kwargs[0], np.ndarray): raise pyrado.TypeErr(given=reset_kwargs[0], expected_type=np.ndarray) if not isinstance(reset_kwargs[1], dict): raise pyrado.TypeErr(given=reset_kwargs[1], expected_type=dict) return rollout( G.env, G.agent, eval=eval, reset_kwargs=dict(init_state=reset_kwargs[0], domain_param=reset_kwargs[1]) )
95e23ad682d6afc3014bfa7932b00a955cc5bd3d
10,944
from exopy_pulses.testing.context import TestContext from typing import OrderedDict def test_compiling_a_sequence_not_compiling2(workspace, root, monkeypatch, exopy_qtbot, dialog_sleep): """Test compiling a sequence that can be evaluated but not compiled. """ def __raise(*args, **kwargs): return False, {}, {'test': False} monkeypatch.setattr(TestContext, 'compile_and_transfer_sequence', __raise) workbench = workspace.workbench ui = workbench.get_plugin('enaml.workbench.ui') ui.show_window() exopy_qtbot.wait(10 + dialog_sleep) root.external_vars = OrderedDict({'a': 1.5}) pulse1 = Pulse(def_1='1.0', def_2='{a}') pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0') pulse3 = Pulse(def_1='{4_start} + 0.5', def_2='{4_start}+{4_duration}-0.5') pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration') pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration') sequence2 = BaseSequence(time_constrained=True, def_1='{3_stop} + 0.5', def_2='6') sequence2.add_child_item(0, pulse3) sequence1 = BaseSequence() add_children(sequence1, (pulse2, sequence2, pulse4)) add_children(root, (pulse1, sequence1, pulse5)) workspace.state.sequence = root dial = CompileDialog(workspace=workspace) dial.show() wait_for_window_displayed(exopy_qtbot, dial) comp_widget = dial.central_widget().widgets()[0] comp_widget.widgets()[-1].clicked = True def assert_exec(): assert comp_widget.elapsed_time assert comp_widget.errors assert comp_widget.widgets()[-2].background == parse_color('red') exopy_qtbot.wait_until(assert_exec)
eec3ee453346a75398da230e59013bfaa47f8b23
10,945
import warnings def deprecated(message, exception=PendingDeprecationWarning): """Throw a warning when a function/method will be soon deprecated Supports passing a ``message`` and an ``exception`` class (uses ``PendingDeprecationWarning`` by default). This is useful if you want to alternatively pass a ``DeprecationWarning`` exception for already deprecated functions/methods. Example:: >>> import warnings >>> from functools import wraps >>> message = "this function will be deprecated in the near future" >>> @deprecated(message) ... def foo(n): ... return n+n >>> with warnings.catch_warnings(record=True) as w: ... warnings.simplefilter("always") ... foo(4) ... assert len(w) == 1 ... assert issubclass(w[-1].category, PendingDeprecationWarning) ... assert message == str(w[-1].message) ... assert foo.__name__ == 'foo' 8 """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): warnings.warn(message, exception, stacklevel=2) return func(*args, **kwargs) return wrapper return decorator
86ccfeb53048d130a7fe35a0609dc5e95440da23
10,946
import subprocess def check(config, content, filename): """ Run flake8 with the given ``config`` against the passed file. Returns a ``list`` of :py:class:`flake.Violation`. """ with environment(config, content, filename) as env: out = subprocess.check_output(['flake8', '--exit-zero', '--config', env.config_filename, '--format', FLAKE8_REPORT_FORMAT, env.filename], universal_newlines=True) return parse(out)
2d75348b489cef9fc60e3d76ce94200e7542a0d2
10,947
def x_dot(y): """x_dot(y) Describes the differential equation for position as given in CW 12. """ return y
7fa01584b09c6e83e28ddf63b300323fdcb7fa0b
10,948
def get_comp_depends(comp_info, comps): """ Get comp depends from comp index """ depends = [] for comp in comps: if comp in comp_info: depends += comp_info[comp]["dependencies"] if depends: depends += get_comp_depends(comp_info, depends) return list(set(depends))
79a8b51e329cf9be414391508cc0ecbe76ff0707
10,949
def get_naiveb_model(x_train: pd.DataFrame, y_train: pd.Series) -> GaussianNB: """ Trains and returns a naive Bayes model Data must all be on the same scale in order to use naive Bayes """ gnb = GaussianNB(priors=None) gnb.fit(x_train, y_train) return gnb
f1b93acf80ee88f1eb0be7a61aa0d9ac94248966
10,950
def updateDF(df, fields, id_patient): """ fields is a dictionary of column names and values. The function updates the row of id_patient with the values in fields. """ for key in fields: df.loc[df["id_patient"] == id_patient, key] = fields[key][0] return df
5ced64eca8d8736836f82dacd1750cb8ac612989
10,951
def gcd(num1: int, num2: int) -> int: """Computes the greatest common divisor of integers a and b using Euclid's Algorithm. """ while num2 != 0: num1, num2 = num2, num1 % num2 return num1
c53ff5be770570278f497d7ce2a2146a3ac3d9da
10,952
import tempfile import json import os def application(request): """ To use this application, the user must send a POST request with base64 or form encoded encoded HTML content and the wkhtmltopdf Options in request data, with keys 'base64_html' and 'options'. The application will return a response with the PDF file. """ if request.method != 'POST': return hasHeader = False hasFooter = False images = [] request_is_json = request.content_type.endswith('json') with tempfile.NamedTemporaryFile(suffix='.html') as footer_file: with tempfile.NamedTemporaryFile(suffix='.html') as header_file: with tempfile.NamedTemporaryFile(suffix='.html') as source_file: if request_is_json: # If a JSON payload is there, all data is in the payload payload = json.loads(request.data) source_file.write(payload['contents'].decode('base64')) if payload.has_key('header'): header_file.write(payload['header'].decode('base64')) hasHeader = True if payload.has_key('footer'): footer_file.write(payload['footer'].decode('base64')) hasFooter = True if payload.has_key('images'): for image in payload['images']: if image.has_key('path') and image.has_key('contents'): path = "/tmp/" +image['path'] if os.path.isdir(os.path.dirname(path))==False: os.makedirs(os.path.dirname(path)) f = open(path, "w") f.write(image['contents'].decode('base64')) f.close() images.append(path) options = payload.get('options', {}) elif request.files: # First check if any files were uploaded source_file.write(request.files['file'].read()) # Load any options that may have been provided in options options = json.loads(request.form.get('options', '{}')) source_file.flush() header_file.flush() footer_file.flush() # Evaluate argument to run with subprocess args = ['wkhtmltopdf'] if hasHeader: args.append('--header-html "file://%s"' % header_file.name) if hasFooter: args.append('--footer-html "file://%s"' % footer_file.name) # Add Global Options if options: for option, value in options.items(): args.append('--%s' % option) if value: args.append('"%s"' % value) # Add source file name and output file name file_name = source_file.name args += [file_name, file_name + ".pdf"] # Execute the command using executor execute(' '.join(args)) for image in images: os.remove(image) return Response( wrap_file(request.environ, open(file_name + '.pdf')), mimetype='application/pdf', )
69fac274a111d9e9c9dff446dacc0cf2520468c9
10,953
import json async def check_user_name(request): """Check if a user exists with provided username.""" log_request(request) conn = await create_connection() response = await users_query.users_search_duplicate( conn, request.args.get("username") ) conn.close() return json({"exists": bool(response)})
72ff533a02e6377b78bfbfc631e87acc5fe59779
10,954
def azip_longest(*aiterables, fillvalue=None): """async version of izip_longest with parallel iteration""" return _azip(*aiterables, fillvalue=fillvalue, stop_any=False)
22f4ef6b4f1294ccca71a59337913a64e89a9e62
10,955
def drop_table(name, con): """ drop table from database Parameters ---------- name : string, name of SQL table con : sqlalchemy.engine.Engine or sqlite3.Connection Returns ------- True Examples -------- >>> import pandas as pd >>> from sqlalchemy import create_engine >>> from tidyframe import drop_table >>> >>> engine = create_engine("sqlite:///raw_table.db") >>> df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}]) >>> df.to_sql("raw_table", engine) >>> drop_table("raw_table", engine) True """ table = load_table_schema(name, con) table.drop() return True
c86ad4e71c24bdfaba924171a21e74096ab8c11e
10,956
def class_info_interface(**class_name): """ Set Class_Name, Class_Index, and DNN Model \nclass_name (kwargs) : Input Class Name with list type, if want to set class number, add tuple parameters like 'class_info_interface(class_name = [list], class_number = [list])' \nclass_number : Default the number of class_name """ global window window = Tk() window.title("Auto Labeling Input Class Name") global entry_num global entry_name global entry_model # 1. DNN Model Interface ttk.Label(window, text = "DNN Model : ").grid(row = 0, column = 0, padx = 10, pady = 10) entry_model = ttk.Entry(window) entry_model.grid(row = 0, column = 1, padx = 10, pady = 10) # 2. Class name Interface ttk.Label(window, text = "Class name : ").grid(row = 1, column = 0, padx = 10, pady = 10) entry_name = ttk.Entry(window) entry_name.grid(row = 1, column = 1, padx = 10, pady = 10) # 3. Class number Interface ttk.Label(window, text = "Class number : ").grid(row = 2, column = 0, padx = 10, pady = 10) entry_num = ttk.Entry(window) entry_num.grid(row = 2, column = 1, padx = 10, pady = 10) ttk.Button(window, text="OK", command=get_class_info).grid(row = 2, column = 2, padx = 10, pady = 10) # 4. User Name Guide Interface if not class_name or class_name.__len__() is 0: # isEmpty == True ttk.Label(window, text = "Username \n\n" + "professor_seo \n" + "jaeseok \n" + "hun \n").grid(row = 3, column = 1, padx = 10, pady = 10) ttk.Label(window, text = "| Class Name\n\n" + "| 0\n| 1\n| 2\n").grid(row = 3, column = 2, padx = 10, pady = 10) elif len(class_name) is not 0: # tuple variable class_name_list = list() for key, value in class_name.items(): print(key, value) class_name_list.append(value) # Class Name [0] name_value = str() index_value = str() for i, name in enumerate(class_name_list[0]): name_value = name_value + name + ' \n' index_value = index_value + '| ' + str(i) + ' \n' ttk.Label(window, text = "Username \n\n" + name_value).grid(row = 3, column = 1, padx = 10, pady = 10) # Class Index [1] if len(class_name) == 2: index_value = str() for index in class_name_list[1]: index_value = index_value + '|' + \ str(index) + ' \n' ttk.Label(window, text = "| Class Name\n\n" + index_value).grid(row = 3, column = 2, padx = 10, pady = 10) print("list") else: raise ValueError("Not Supported value. See function docstring") window.mainloop() return user_name, user_num, dnn_model
a9da1515192cf67bfe326ab90ff7c12a32106304
10,957
def uint8(value): """ Create an SPL ``uint8`` value. Returns: Expression: Expression representing the value. """ return streamsx.spl.op.Expression('UINT8', int(value))
7e8562b4ec82bbb932c92a9af4cfd06224b6596d
10,958
import re def print_table(log_results, platform_width = 0, build_failures_width = 0, test_failures_width = 0, successful_width = 0, space_char = " ", list_separator = DEFAULT_LIST_SEPARATOR): """Print out a table in the requested format (text or markdown).""" # Print table header output_lines = list() headers = [ re.sub(r'\b \b', space_char, PLATFORM_HEADER.ljust(platform_width)), re.sub(r'\b \b', space_char,BUILD_FAILURES_HEADER.ljust(build_failures_width)), re.sub(r'\b \b', space_char,TEST_FAILURES_HEADER.ljust(test_failures_width)) ] + ( [re.sub(r'\b \b', space_char,SUCCESSFUL_TESTS_HEADER.ljust(successful_width))] if FLAGS.include_successful else [] ) # Print header line. output_lines.append(("|" + " %s |" * len(headers)) % tuple(headers)) # Print a |-------|-------|---------| line. output_lines.append(("|" + "-%s-|" * len(headers)) % tuple([ re.sub("[^|]","-", header) for header in headers ])) # Iterate through platforms and print out table lines. for platform in sorted(log_results.keys()): if log_results[platform]["build_failures"] or log_results[platform]["test_failures"] or FLAGS.include_successful: columns = [ re.sub(r'\b \b', space_char, platform.ljust(platform_width)), format_result(log_results[platform]["build_failures"], justify=build_failures_width, list_separator=list_separator), format_result(log_results[platform]["test_failures"], justify=test_failures_width, list_separator=list_separator), ] + ( [format_result(log_results[platform]["successful"], justify=successful_width, list_separator=list_separator)] if FLAGS.include_successful else [] ) output_lines.append(("|" + " %s |" * len(headers)) % tuple(columns)) return output_lines
e12ada2d86f3dcecef6292b5c052094599abda4b
10,959
def is_valid(filepath, digest, hashAlgo='md5'): """Verify the integrity of a file against a hash value.""" assert(isinstance(digest, str)) res = calculate(filepath, hashAlgo) LOG.debug('Calculated digest: '+res) LOG.debug(' Original digest: '+digest) return res is not None and res == digest
bb83d8b8a3a0bed7e061009a04b30c2eb361abd7
10,960
def align_reconstruction_to_pdr(reconstruction, data): """ leveling and scaling the reconstructions to pdr """ if reconstruction.alignment.aligned: return reconstruction if not data.pdr_shots_exist(): return reconstruction pdr_shots_dict = data.load_pdr_shots() X, Xp = [], [] onplane, verticals = [], [] for shot_id in reconstruction.shots.keys(): X.append(reconstruction.shots[shot_id].pose.get_origin()) Xp.append(pdr_shots_dict[shot_id][0:3]) R = reconstruction.shots[shot_id].pose.get_rotation_matrix() onplane.append(R[0,:]) onplane.append(R[2,:]) verticals.append(R[1,:]) X = np.array(X) Xp = np.array(Xp) # Estimate ground plane. p = multiview.fit_plane(X - X.mean(axis=0), onplane, verticals) Rplane = multiview.plane_horizontalling_rotation(p) X = Rplane.dot(X.T).T # Estimate 2d similarity to align to pdr predictions T = tf.affine_matrix_from_points(X.T[:2], Xp.T[:2], shear=False) s = np.linalg.det(T[:2, :2]) ** 0.5 A = np.eye(3) A[:2, :2] = T[:2, :2] / s A = A.dot(Rplane) b = np.array([ T[0, 2], T[1, 2], Xp[:, 2].mean() - s * X[:, 2].mean() # vertical alignment ]) # Align points. for point in reconstruction.points.values(): p = s * A.dot(point.coordinates) + b point.coordinates = p.tolist() # Align cameras. for shot in reconstruction.shots.values(): R = shot.pose.get_rotation_matrix() t = np.array(shot.pose.translation) Rp = R.dot(A.T) tp = -Rp.dot(b) + s * t try: shot.pose.set_rotation_matrix(Rp) shot.pose.translation = list(tp) except: logger.debug("unable to transform reconstruction!") return reconstruction
c3f5afd859a1275863cce6e2ccf3cd9523b94186
10,961
def checkLengthSmaller(op, graph, frm, to): """ Confirm resulting video has less frames that source. :param op: :param graph: :param frm: :param to: :return: @type op: Operation @type graph: ImageGraph @type frm: str @type to: str """ edge = graph.get_edge(frm, to) durationChangeTuple = getValue(edge, 'metadatadiff.video.nb_frames') if durationChangeTuple is None or \ (durationChangeTuple[0] == 'change' and int(durationChangeTuple[1]) < int(durationChangeTuple[2])): return (Severity.ERROR,"Length of video is not shorter")
a2101371e4f8af0ebaab1ece5d7cc31f4a277aca
10,962
import logging def enable_log(fmt='[%(asctime)s] [%(process)5s] %(levelname)s %(module)s %(name)s %(message)s', enable_color=True, filename=None): """ Clears all log handlers, and adds color handler and/or file handlers :param fmt: logging format string :param enable_color: True to enable :param filename: log file location :return: Logger object """ lgr = logging.getLogger() lgr.handlers.clear() # if there's no special requirements for logging # we still want the formatting. if not enable_color and \ filename is None and \ filename != '': loghandler = logging.StreamHandler() logfmt = logging.Formatter(fmt) loghandler.setFormatter(logfmt) lgr.addHandler(loghandler) return True if enable_color: loghandler = logging.StreamHandler() logfmt = ColorLogFormatter(fmt) loghandler.setFormatter(logfmt) lgr.addHandler(loghandler) if filename is not None and filename != '': logfilename = abspath(filename) fhandler = logging.FileHandler(logfilename) logfmt = logging.Formatter(fmt) fhandler.setFormatter(logfmt) lgr.addHandler(fhandler) return True
3e018012e7cff555d86e93396485c9644dfb32ae
10,963
def build_con_and_ds(dataset: str): """ Builds test connector and test datasource for testing with API key Leave this function in if ever want to run tests without skipping due to there being no Bearer tokens How to use: Replace build_ds function with this one in test_aircall file Be sure to also replace the endpoints inside the aircall connector file """ con = AircallConnector(name='mah_test', bearer_auth_id='abc123efg') ds = AircallDataSource(name='mah_ds', domain='test_domain', dataset=dataset, limit=1,) return con, ds
7fa19e15e0a38c22f575d6509e3156a874b8ea60
10,964
def _get_search_str_regex_main_body(join_with, last_date): """Returns something like: (t1[0-5]\d\d\d\d|t160[0-2]\d\d|t16030\d|t16031[0-3])""" todo_date = _get_todo_date(last_date + timedelta(1)) # yrs = _make_last_digit_all_values_less_last_digit(todo_date[:3]) # search_substrs = [yrs[-1]] #Only go back to the previous year search_substrs = [] for i in range(2, 7): regexed_date_i = _make_given_digit_all_values_less_than_current_val_regex(todo_date, i) if regexed_date_i is not None: search_substrs.append(regexed_date_i) # search_substrs.append(todo_date) search_str = join_with.join(search_substrs) search_str = "(%s)" % search_str return search_str
85a65df09ad5e35f71500ff30345c83f745564ea
10,965
def is_palindrome_recursive(text, left=None, right=None): """time complexity: O(1) because you are checking which conditional will run, which does not involve any loops text = str left = int right = int""" if len(text) == 0: return True given = get_letters(text) if left is None and right is None: left = 0 right = len(str) - 1 if given[left] != given[right]: return False elif left >= right: return True else: return is_palindrome_recursive(given, left+1, right-1)
d7bf4ab6e7f43d6418cde3485f94dc2d83e40180
10,966
import operator import numpy def flip(m, axis=None): """Reverses the order of elements in an array along the given axis. The shape of the array is preserved, but the elements are reordered. Parameters ---------- m : array_like Input array. axis : None or int or tuple of ints, optional Axis or axes along which to flip over. The default, axis=None, will flip over all of the axes of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, flipping is performed on all of the axes specified in the tuple. Returns ------- out : ndarray A view of m with the entries of axis reversed. Since a view is returned, this operation is done in constant time. Note ---- flip(m, 0) is equivalent to flipud(m). flip(m, 1) is equivalent to fliplr(m). flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all positions. flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at position 0 and position 1. See Also -------- flipud : Flips array in the up/down direction. fliplr : Flips array in the left/right direction. Examples -------- >>> import nlcpy as vp >>> A = vp.arange(8).reshape((2,2,2)) >>> A array([[[0, 1], [2, 3]], <BLANKLINE> [[4, 5], [6, 7]]]) >>> vp.flip(A, 0) array([[[4, 5], [6, 7]], <BLANKLINE> [[0, 1], [2, 3]]]) >>> vp.flip(A, 1) array([[[2, 3], [0, 1]], <BLANKLINE> [[6, 7], [4, 5]]]) >>> vp.flip(A) array([[[7, 6], [5, 4]], <BLANKLINE> [[3, 2], [1, 0]]]) >>> vp.flip(A, (0, 2)) array([[[5, 4], [7, 6]], <BLANKLINE> [[1, 0], [3, 2]]]) >>> A = vp.random.randn(3, 4, 5) >>> vp.all(vp.flip(A, 2) == A[:, :, ::-1, ...]) array(True) """ m = nlcpy.asanyarray(m) if axis is None: indexer = (slice(None, None, -1),) * m.ndim else: if type(axis) is nlcpy.ndarray: axis = axis.get() if type(axis) not in (tuple, list): try: axis = [operator.index(axis)] except TypeError: pass _axis = [] for ax in axis: if type(ax) is nlcpy.ndarray: ax = ax.get() if type(ax) is numpy.ndarray: if ax.size > 1: raise TypeError( 'only size-1 arrays can be converted to Python scalars') else: ax = ax.item() _axis.append(ax + m.ndim if ax < 0 else ax) axis = _axis if len(axis) != len(set(axis)): raise ValueError('repeated axis') indexer = [slice(None) for i in range(m.ndim)] for ax in axis: if ax >= m.ndim or ax < 0: raise AxisError( 'axis {0} is out of bounds for array of dimension {1}' .format(ax, m.ndim)) indexer[ax] = slice(None, None, -1) indexer = tuple(indexer) return m[indexer]
495b75a548d94bc9dbc9827678b08282efb104d8
10,967
def radius_of_gyration(pos): """ Radius of gyration of a group of positions. Does not account for periodic boundaries. """ com = np.mean(pos, axis = 0) delta = pos - com rgv = np.sqrt(np.sum(delta**2, axis = 0) / len(pos)) return np.linalg.norm(rgv)
a12450cf63768bf9a238bef11a3360ce49e3092f
10,968
def get_metadata_for_list(commit_range, git_dir=None, count=None, series=None, allow_overwrite=False): """Reads out patch series metadata from the commits This does a 'git log' on the relevant commits and pulls out the tags we are interested in. Args: commit_range (str): Range of commits to count (e.g. 'HEAD..base') git_dir (str): Path to git repositiory (None to use default) count (int): Number of commits to list, or None for no limit series (Series): Object to add information into. By default a new series is started. allow_overwrite (bool): Allow tags to overwrite an existing tag Returns: Series: Object containing information about the commits. """ if not series: series = Series() series.allow_overwrite = allow_overwrite stdout = get_list(commit_range, git_dir, count) pst = PatchStream(series, is_log=True) for line in stdout.splitlines(): pst.process_line(line) pst.finalise() return series
0134a836f28bf97e5196c63f80f5b07d372cc5d4
10,969
def side_seperator(lsep,rsep): """ To have a custom side lined formatter. A side-lined formatter is: `[DATE] SEP "L_SEP" EVENT "R_SEP" LOG` `loggy.side_seperator(lsep="||",rsep="||") # Default vals` """ fmt['ls']=lsep fmt['rs']=rsep return fmt
803519e93cef7342e9f951090823fc536f37839f
10,970
def _semi_implicit_euler(ode_fun, jac_fun, y_olds, t_old, f_old,dt, args, solver_parameters, J00, I): """ Calculate solution at t_old+dt using the semi-implicit Euler method. Based on Section IV.9.25 of Ref II. """ y_older, y_old = y_olds je_tot = 0 if(f_old is None): f_yj = ode_fun(*(y_old, t_old)+args) fe_tot = 1 else: f_yj = f_old fe_tot = 0 b = dt*f_yj A = I-dt*J00 if(solver_parameters['initialGuess']): # TODO: Using explicit Euler as a predictor doesn't seem to be # effective (maybe because with extrapolation we are taking too big # steps for the predictor be close to the solution). # x0, f_yj, fe_tot_,je_tot=_explicit_euler(ode_fun, jac_fun, # y_olds, t_old, f_yj, dt, args, solver_parameters) # fe_tot += fe_tot_ x0 = y_old else: x0 = None dy = linear_solve(A, b, iterative=solver_parameters['iterative'], tol=solver_parameters['min_tol'], x0=x0) y_new = y_old + dy return (y_new, f_yj, fe_tot, je_tot)
5db2c6b520ce401b986a2bb7468cd5cdac7413a7
10,971
from typing import OrderedDict def make_sequential(layer_configs, input): """Makes sequential layers automatically. Arguments: layer_configs: An OrderedDict that contains the configurations of a sequence of layers. The key is the layer_name while the value is a dict contains hyper-parameters needed to instantiate the corresponding layer. The key of the inner dict is the name of the hyper-parameter and the value is the value of the corresponding hyper-parameter. Note that the key "layer_type" indicates the type of the layer. input: A tensor that mimics the batch input of the model. The first dim is the batch size. All other dims should be exactly the same as the real input shape in the later training. Returns: A sequence of layers organized by nn.Sequential. """ layers = OrderedDict() for layer_name in layer_configs: arguments = deepcopy(layer_configs[layer_name]) layer_type = arguments.pop("layer_type") input_shape = [int(j) for j in input.data.size()] arguments["input_shape"] = input_shape layers.update({layer_name: make_layer(layer_type, **arguments)}) input = layers[layer_name](input) return nn.Sequential(layers)
662c8787115c7d6d3e499a89aa7e8c301b9e5e4b
10,972
import random def Pol_Dyn_ExploreWithNUTS(resultsList,totalSimDays=1000,numDaysRemain=1000,\ totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0): """ Grab intermediate and end node distribtuions via NUTS. Identify intermediate node sample variances. Pick an intermediate node, weighed towards picking those with higher sample variances. Pick an outlet from this intermediate node's column in the transition matrix A, again by a weighting (where 0% nodes have a non-zero probability of being selected). [log((p/1-p) + eps)?] policyParamList = [number days to plan for, sensitivity, specificity, M, Madapt, delta] (Only enter the number of days to plan for in the main simulation code, as the other parameters will be pulled from the respective input areas) """ #Initialize our output, a list with the above mentioned outputs sampleSchedule = [] # How many days to plan for? numDaysToSched = min(policyParamList[0],numDaysRemain) usedBudgetSoFar = 0 firstTestDay = totalSimDays - numDaysRemain if numDaysRemain == totalSimDays: # Our initial schedule should just be a distrubed exploration currNode = resultsList[0][0] for currDay in range(numDaysToSched): numToTest = int(np.floor((numBudgetRemain-usedBudgetSoFar) / (numDaysRemain-currDay))) +\ min((numBudgetRemain-usedBudgetSoFar) % (numDaysRemain-currDay),1) # How many samples to conduct in the next day for testInd in range(numToTest): # Iterate through our end nodes if currNode > resultsList[len(resultsList)-1][0]: currNode = resultsList[0][0] sampleSchedule.append([firstTestDay+currDay,currNode]) currNode += 1 else: sampleSchedule.append([firstTestDay+currDay,currNode]) currNode += 1 usedBudgetSoFar += 1 else: # Generate NUTS sample using current results and use it to generate a new schedule ydata = [] nSamp = [] for rw in resultsList: ydata.append(rw[2]) nSamp.append(rw[1]) A = simHelpers.GenerateTransitionMatrix(resultsList) sens, spec, M, Madapt, delta = policyParamList[1:] NUTSsamples = simEst.GenerateNUTSsamples(ydata,nSamp,A,sens,spec,M,Madapt,delta) # Store sample variances for intermediate nodes NUTSintVars = [] for intNode in range(A.shape[1]): currVar = np.var(sps.expit(NUTSsamples[:,intNode])) NUTSintVars.append(currVar) # Normalize sum of all variances to 1 NUTSintVars = NUTSintVars/np.sum(NUTSintVars) # Now pick from these samples to generate projections for currDay in range(numDaysToSched): numToTest = int(np.floor((numBudgetRemain-usedBudgetSoFar) / (numDaysRemain-currDay))) +\ min((numBudgetRemain-usedBudgetSoFar) % (numDaysRemain-currDay),1) # How many samples to conduct in the next day for testInd in range(numToTest): # Pick an intermediate node to "target", with more emphasis on higher sample variances rUnif = random.uniform(0,1) for intInd in range(A.shape[1]): if rUnif < np.sum(NUTSintVars[0:(intInd+1)]): targIntInd = intInd break # Go through the same process with the column of A # pertaining to this target intermediate node AtargCol = [row[targIntInd] for row in A] # Add a small epsilon, for 0 values, and normalize AtargCol = np.add(AtargCol,1e-3) AtargCol = AtargCol/np.sum(AtargCol) rUnif = random.uniform(0,1) for intEnd in range(A.shape[0]): if rUnif < np.sum(AtargCol[0:(intEnd+1)]): currInd = intEnd break currNode = resultsList[currInd][0] sampleSchedule.append([firstTestDay+currDay,currNode]) usedBudgetSoFar += 1 # Need to sort this list before passing it through sampleSchedule.sort(key=lambda x: x[0]) return sampleSchedule
a96638a7f2816a42069cfa714822e728ee7e325f
10,973
import numpy def calc_cos_t(hb_ratio, d, theta_s_i, theta_v_i, relative_azimuth): """Calculate t cossine. Args: hb_ratio (int): h/b. d (numpy array): d. theta_s_i (numpy array): theta_s_i. theta_v_i (numpy array): theta_v_i. relative_azimuth (numpy array): relative_azimuth. Returns: cos_t : numpy.array. """ return hb_ratio * numpy.sqrt(d*d + numpy.power(numpy.tan(theta_s_i)*numpy.tan(theta_v_i)*numpy.sin(relative_azimuth), 2)) / (sec(theta_s_i) + sec(theta_v_i))
5ac37f2aa8994b75bb0c71d9f54616ff041a5ff6
10,974
from typing import Callable def guild_only() -> Callable: """A decorator that limits the usage of a slash command to guild contexts. The command won't be able to be used in private message channels. Example --------- .. code-block:: python3 from discord import guild_only @bot.slash_command() @guild_only() async def test(ctx): await ctx.respond('You\'re in a guild.') """ def inner(command: Callable): if isinstance(command, ApplicationCommand): command.guild_only = True else: command.__guild_only__ = True return command return inner
d8ca993dd0ea71791458edd3c3bcec0551262552
10,975
import re def truncate(text, words=25): """Remove tags and truncate text to the specified number of words.""" return " ".join(re.sub("(?s)<.*?>", " ", text).split()[:words])
18d994a52dc5549aabb7cc8f33d5755be5392208
10,976
from datetime import datetime def _run_query_create_log(query, client, destination_table=None): """ Runs BigQuery queryjob :param query: Query to run as a string :param client: BigQuery client object :return: QueryJob object """ # Job config job_config = bigquery.QueryJobConfig() if destination_table is not None: job_config.destination = destination_table else: timestamp_name = datetime.now().strftime("query_%Y%m%d%H%M%S") project = "cmap-big-table" dataset = "cmap_query" dest_tbl = ".".join([project, dataset, timestamp_name]) job_config.destination = dest_tbl job_config.create_disposition = "CREATE_IF_NEEDED" return client.query(query, job_config=job_config)
265361c150f654bc8826cda096d85b4ae2911317
10,977
def read_disparity_gt(filename: str) -> np.ndarray: """ reads the disparity files used for training/testing. :param filename: name of the file. :return: data points. """ points = [] with open(filename, 'r') as file: for line in file: line = line.split(' ') frame = int(line[0]) x_rgb = int(line[1]) y = int(line[2]) x_ir = int(line[3]) points.append([frame, x_rgb, y, x_ir]) return np.array(points, dtype=np.int32)
bad5ad6698d58e5173709cf866fb027367daa8b1
10,978
def purchase_index(request): """displays users purchase history""" login_id = request.user.id context = {'histories': Purchase_history.objects.all().filter(acc_id=login_id).order_by('-date')} # get users purchase history return render(request, 'profile/histories/purchase_history.html', context)
d353e839ff08adfeebaa28a708d36df4d21a7ea8
10,979
import array def solve_EEC(self): """Compute the parameters dict for the equivalent electrical circuit cf "Advanced Electrical Drives, analysis, modeling, control" Rik de doncker, Duco W.J. Pulle, Andre Veltman, Springer edition <--- ---> -----R-----wsLqIq---- -----R-----wsLdId---- | | | | | | | BEMF | | | | ---------Id---------- ---------Iq---------- ---> ---> Ud Uq Parameters ---------- self : EEC_PMSM an EEC_PMSM object Return ------ out_dict : dict Dict containing all magnetic quantities that have been calculated in EEC """ felec = self.freq0 ws = 2 * pi * felec out_dict = dict() if "Ud" in self.parameters: # Voltage driven # Prepare linear system XR = array( [ [self.parameters["R20"], -ws * self.parameters["Lq"]], [ws * self.parameters["Ld"], self.parameters["R20"]], ] ) XE = array([0, ws * self.parameters["phi"]]) XU = array([self.parameters["Ud"], self.parameters["Uq"]]) # Solve system XI = solve(XR, XU - XE) out_dict["Id"] = XI[0] out_dict["Iq"] = XI[1] out_dict["Ud"] = self.parameters["Ud"] out_dict["Uq"] = self.parameters["Uq"] else: # Current Driven Ud = ( self.parameters["R20"] * self.parameters["Id"] - ws * self.parameters["Phiq"] ) Uq = ( self.parameters["R20"] * self.parameters["Iq"] + ws * self.parameters["Phid"] ) out_dict["Ud"] = Ud out_dict["Uq"] = Uq out_dict["Id"] = self.parameters["Id"] out_dict["Iq"] = self.parameters["Iq"] return out_dict
ad862028447acd038e76ba95960b089985bffe9b
10,980
def is_active(seat): """Return True if seat is empty. If occupied return False. """ active = seat_map.get(seat, ".") return True if active == "#" else False
098c4ccf9d4e9bbadb853d77a100eabd4e5142bf
10,981
from typing import List import tqdm def calibrate_intensity_to_powder(peak_intensity: dict, powder_peak_intensity: dict, powder_peak_label: List[str], image_numbers: List[int], powder_start: int = 1): """Calibrate peak intensity values to intensity measurements taken from a 'random' powder sample.""" corrected_peak_intensity = dict() first_iteration = True for image_number in tqdm(image_numbers): corrected_peak_intensity[image_number] = dict() for label in powder_peak_label: powder_average = np.average(powder_peak_intensity[powder_start][label]) powder_error = np.std(powder_peak_intensity[powder_start][label], ddof=1) corrected_peak_intensity[image_number][label] = [] corrected_peak_intensity[image_number][label] = peak_intensity[image_number][label] / powder_average if first_iteration: print(f"Normalised {label} intensities by a value of {powder_average} +/- {powder_error} from average powder intensity.") else: continue first_iteration = False return corrected_peak_intensity
8019eec6c63152ee25bacc9dcf8fa723407f8107
10,982
import json def examine(path): """ Look for forbidden tasks in a job-output.json file path """ data = json.load(open(path)) to_fix = False for playbook in data: if playbook['trusted']: continue for play in playbook['plays']: for task in play['tasks']: for hostname, host in task['hosts'].items(): if hostname != 'localhost': continue if host['action'] in ['command', 'shell']: print("Found disallowed task:") print(" Playbook: %s" % playbook['playbook']) print(" Role: %s" % task.get('role', {}).get('name')) print(" Task: %s" % task.get('task', {}).get('name')) to_fix = True return to_fix
e441fc58bbfc4547bbdff451d6d06ba952e5a1ba
10,983
import subprocess import sys def determine_disjuct_modules_alternative(src_rep): """ Potentially get rid of determine_added_modules and get_modules_lst() """ findimports_output = subprocess.check_output(['findimports', src_rep]) findimports_output = findimports_output.decode('utf-8').splitlines() custom_modules_lst = [] for i, elem in enumerate(findimports_output): if ':' in elem: continue elem = elem.rstrip('\n').split('.',1)[0].strip() #print(f" element : {elem}") custom_modules_lst.append(elem) custom_modules_lst = set(custom_modules_lst) #beautify this disjunct_modules = [] for i, elem in enumerate(custom_modules_lst): if elem in sys.modules: continue else: disjunct_modules.append(elem) return disjunct_modules
be3e0f1e4edf84bdeb8ea5b2a0117d9853581884
10,984
def config_ask(default_message = True, config_args = config_variables): """Formats user command line input for configuration details""" if default_message: print("Enter configuration parameters for the following variables... ") config_dictionary = dict() for v in config_args: config_dictionary.update({v:input("{}: ".format(v))}) return config_dictionary else: print(default_message) config_dictionary = dict() for v in config_args: config_dictionary.update({v:input("{}: ".format(v))}) return config_dictionary
277d26ae67baf14ee6b16547bb72c029ab0bc610
10,985
import sys def parseAndRun(args): """interface used by Main program and py.test (arelle_test.py) """ try: hasWebServer = True except ImportError: hasWebServer = False cntlr = CntlrCmdLine() # need controller for plug ins to be loaded usage = "usage: %prog [options]" parser = OptionParser(usage, version="Arelle(r) {0} ({1}bit)".format(Version.__version__, cntlr.systemWordSize), conflict_handler="resolve") # allow reloading plug-in options without errors parser.add_option("-f", "--file", dest="entrypointFile", help=_("FILENAME is an entry point, which may be " "an XBRL instance, schema, linkbase file, " "inline XBRL instance, testcase file, " "testcase index file. FILENAME may be " "a local file or a URI to a web located file. " "For multiple instance filings may be | separated file names or JSON list " "of file/parameter dicts [{\"file\":\"filepath\"}, {\"file\":\"file2path\"} ...].")) parser.add_option("--username", dest="username", help=_("user name if needed (with password) for web file retrieval")) parser.add_option("--password", dest="password", help=_("password if needed (with user name) for web retrieval")) # special option for web interfaces to suppress closing an opened modelXbrl parser.add_option("--keepOpen", dest="keepOpen", action="store_true", help=SUPPRESS_HELP) parser.add_option("-i", "--import", dest="importFiles", help=_("FILENAME is a list of files to import to the DTS, such as " "additional formula or label linkbases. " "Multiple file names are separated by a '|' character. ")) parser.add_option("-d", "--diff", dest="diffFile", help=_("FILENAME is a second entry point when " "comparing (diffing) two DTSes producing a versioning report.")) parser.add_option("-r", "--report", dest="versReportFile", help=_("FILENAME is the filename to save as the versioning report.")) parser.add_option("-v", "--validate", action="store_true", dest="validate", help=_("Validate the file according to the entry " "file type. If an XBRL file, it is validated " "according to XBRL validation 2.1, calculation linkbase validation " "if either --calcDecimals or --calcPrecision are specified, and " "SEC EDGAR Filing Manual (if --efm selected) or Global Filer Manual " "disclosure system validation (if --gfm=XXX selected). " "If a test suite or testcase, the test case variations " "are individually so validated. " "If formulae are present they will be validated and run unless --formula=none is specified. " )) parser.add_option("--calcDecimals", action="store_true", dest="calcDecimals", help=_("Specify calculation linkbase validation inferring decimals.")) parser.add_option("--calcdecimals", action="store_true", dest="calcDecimals", help=SUPPRESS_HELP) parser.add_option("--calcPrecision", action="store_true", dest="calcPrecision", help=_("Specify calculation linkbase validation inferring precision.")) parser.add_option("--calcprecision", action="store_true", dest="calcPrecision", help=SUPPRESS_HELP) parser.add_option("--calcDeduplicate", action="store_true", dest="calcDeduplicate", help=_("Specify de-duplication of consistent facts when performing calculation validation, chooses most accurate fact.")) parser.add_option("--calcdeduplicate", action="store_true", dest="calcDeduplicate", help=SUPPRESS_HELP) parser.add_option("--efm", action="store_true", dest="validateEFM", help=_("Select Edgar Filer Manual (U.S. SEC) disclosure system validation (strict).")) parser.add_option("--gfm", action="store", dest="disclosureSystemName", help=SUPPRESS_HELP) parser.add_option("--disclosureSystem", action="store", dest="disclosureSystemName", help=_("Specify a disclosure system name and" " select disclosure system validation. " "Enter --disclosureSystem=help for list of names or help-verbose for list of names and descriptions. ")) parser.add_option("--disclosuresystem", action="store", dest="disclosureSystemName", help=SUPPRESS_HELP) parser.add_option("--hmrc", action="store_true", dest="validateHMRC", help=_("Select U.K. HMRC disclosure system validation.")) parser.add_option("--utr", action="store_true", dest="utrValidate", help=_("Select validation with respect to Unit Type Registry.")) parser.add_option("--utrUrl", action="store", dest="utrUrl", help=_("Override disclosure systems Unit Type Registry location (URL or file path).")) parser.add_option("--utrurl", action="store", dest="utrUrl", help=SUPPRESS_HELP) parser.add_option("--infoset", action="store_true", dest="infosetValidate", help=_("Select validation with respect testcase infosets.")) parser.add_option("--labelLang", action="store", dest="labelLang", help=_("Language for labels in following file options (override system settings)")) parser.add_option("--labellang", action="store", dest="labelLang", help=SUPPRESS_HELP) parser.add_option("--labelRole", action="store", dest="labelRole", help=_("Label role for labels in following file options (instead of standard label)")) parser.add_option("--labelrole", action="store", dest="labelRole", help=SUPPRESS_HELP) parser.add_option("--DTS", "--csvDTS", action="store", dest="DTSFile", help=_("Write DTS tree into FILE (may be .csv or .html)")) parser.add_option("--facts", "--csvFacts", action="store", dest="factsFile", help=_("Write fact list into FILE")) parser.add_option("--factListCols", action="store", dest="factListCols", help=_("Columns for fact list file")) parser.add_option("--factTable", "--csvFactTable", action="store", dest="factTableFile", help=_("Write fact table into FILE")) parser.add_option("--concepts", "--csvConcepts", action="store", dest="conceptsFile", help=_("Write concepts into FILE")) parser.add_option("--pre", "--csvPre", action="store", dest="preFile", help=_("Write presentation linkbase into FILE")) parser.add_option("--table", "--csvTable", action="store", dest="tableFile", help=_("Write table linkbase into FILE")) parser.add_option("--cal", "--csvCal", action="store", dest="calFile", help=_("Write calculation linkbase into FILE")) parser.add_option("--dim", "--csvDim", action="store", dest="dimFile", help=_("Write dimensions (of definition) linkbase into FILE")) parser.add_option("--anch", action="store", dest="anchFile", help=_("Write anchoring relationships (of definition) linkbase into FILE")) parser.add_option("--formulae", "--htmlFormulae", action="store", dest="formulaeFile", help=_("Write formulae linkbase into FILE")) parser.add_option("--viewArcrole", action="store", dest="viewArcrole", help=_("Write linkbase relationships for viewArcrole into viewFile")) parser.add_option("--viewarcrole", action="store", dest="viewArcrole", help=SUPPRESS_HELP) parser.add_option("--viewFile", action="store", dest="viewFile", help=_("Write linkbase relationships for viewArcrole into viewFile")) parser.add_option("--viewfile", action="store", dest="viewFile", help=SUPPRESS_HELP) parser.add_option("--roleTypes", action="store", dest="roleTypesFile", help=_("Write defined role types into FILE")) parser.add_option("--roletypes", action="store", dest="roleTypesFile", help=SUPPRESS_HELP) parser.add_option("--arcroleTypes", action="store", dest="arcroleTypesFile", help=_("Write defined arcrole types into FILE")) parser.add_option("--arcroletypes", action="store", dest="arcroleTypesFile", help=SUPPRESS_HELP) parser.add_option("--testReport", "--csvTestReport", action="store", dest="testReport", help=_("Write test report of validation (of test cases) into FILE")) parser.add_option("--testreport", "--csvtestreport", action="store", dest="testReport", help=SUPPRESS_HELP) parser.add_option("--testReportCols", action="store", dest="testReportCols", help=_("Columns for test report file")) parser.add_option("--testreportcols", action="store", dest="testReportCols", help=SUPPRESS_HELP) parser.add_option("--rssReport", action="store", dest="rssReport", help=_("Write RSS report into FILE")) parser.add_option("--rssreport", action="store", dest="rssReport", help=SUPPRESS_HELP) parser.add_option("--rssReportCols", action="store", dest="rssReportCols", help=_("Columns for RSS report file")) parser.add_option("--rssreportcols", action="store", dest="rssReportCols", help=SUPPRESS_HELP) parser.add_option("--skipDTS", action="store_true", dest="skipDTS", help=_("Skip DTS activities (loading, discovery, validation), useful when an instance needs only to be parsed.")) parser.add_option("--skipdts", action="store_true", dest="skipDTS", help=SUPPRESS_HELP) parser.add_option("--skipLoading", action="store", dest="skipLoading", help=_("Skip loading discovered or schemaLocated files matching pattern (unix-style file name patterns separated by '|'), useful when not all linkbases are needed.")) parser.add_option("--skiploading", action="store", dest="skipLoading", help=SUPPRESS_HELP) parser.add_option("--logFile", action="store", dest="logFile", help=_("Write log messages into file, otherwise they go to standard output. " "If file ends in .xml it is xml-formatted, otherwise it is text. ")) parser.add_option("--logfile", action="store", dest="logFile", help=SUPPRESS_HELP) parser.add_option("--logFormat", action="store", dest="logFormat", help=_("Logging format for messages capture, otherwise default is \"[%(messageCode)s] %(message)s - %(file)s\".")) parser.add_option("--logformat", action="store", dest="logFormat", help=SUPPRESS_HELP) parser.add_option("--logLevel", action="store", dest="logLevel", help=_("Minimum level for messages capture, otherwise the message is ignored. " "Current order of levels are debug, info, info-semantic, warning, warning-semantic, warning, assertion-satisfied, inconsistency, error-semantic, assertion-not-satisfied, and error. ")) parser.add_option("--loglevel", action="store", dest="logLevel", help=SUPPRESS_HELP) parser.add_option("--logLevelFilter", action="store", dest="logLevelFilter", help=_("Regular expression filter for logLevel. " "(E.g., to not match *-semantic levels, logLevelFilter=(?!^.*-semantic$)(.+). ")) parser.add_option("--loglevelfilter", action="store", dest="logLevelFilter", help=SUPPRESS_HELP) parser.add_option("--logCodeFilter", action="store", dest="logCodeFilter", help=_("Regular expression filter for log message code.")) parser.add_option("--logcodefilter", action="store", dest="logCodeFilter", help=SUPPRESS_HELP) parser.add_option("--logTextMaxLength", action="store", dest="logTextMaxLength", type="int", help=_("Log file text field max length override.")) parser.add_option("--logtextmaxlength", action="store", dest="logTextMaxLength", type="int", help=SUPPRESS_HELP) parser.add_option("--logRefObjectProperties", action="store_true", dest="logRefObjectProperties", help=_("Log reference object properties (default)."), default=True) parser.add_option("--logrefobjectproperties", action="store_true", dest="logRefObjectProperties", help=SUPPRESS_HELP) parser.add_option("--logNoRefObjectProperties", action="store_false", dest="logRefObjectProperties", help=_("Do not log reference object properties.")) parser.add_option("--lognorefobjectproperties", action="store_false", dest="logRefObjectProperties", help=SUPPRESS_HELP) parser.add_option("--statusPipe", action="store", dest="statusPipe", help=SUPPRESS_HELP) parser.add_option("--monitorParentProcess", action="store", dest="monitorParentProcess", help=SUPPRESS_HELP) parser.add_option("--outputAttribution", action="store", dest="outputAttribution", help=SUPPRESS_HELP) parser.add_option("--outputattribution", action="store", dest="outputAttribution", help=SUPPRESS_HELP) parser.add_option("--showOptions", action="store_true", dest="showOptions", help=SUPPRESS_HELP) parser.add_option("--parameters", action="store", dest="parameters", help=_("Specify parameters for formula and validation (name=value[,name=value]).")) parser.add_option("--parameterSeparator", action="store", dest="parameterSeparator", help=_("Specify parameters separator string (if other than comma).")) parser.add_option("--parameterseparator", action="store", dest="parameterSeparator", help=SUPPRESS_HELP) parser.add_option("--formula", choices=("validate", "run", "none"), dest="formulaAction", help=_("Specify formula action: " "validate - validate only, without running, " "run - validate and run, or " "none - prevent formula validation or running when also specifying -v or --validate. " "if this option is not specified, -v or --validate will validate and run formulas if present")) parser.add_option("--formulaParamExprResult", action="store_true", dest="formulaParamExprResult", help=_("Specify formula tracing.")) parser.add_option("--formulaparamexprresult", action="store_true", dest="formulaParamExprResult", help=SUPPRESS_HELP) parser.add_option("--formulaParamInputValue", action="store_true", dest="formulaParamInputValue", help=_("Specify formula tracing.")) parser.add_option("--formulaparaminputvalue", action="store_true", dest="formulaParamInputValue", help=SUPPRESS_HELP) parser.add_option("--formulaCallExprSource", action="store_true", dest="formulaCallExprSource", help=_("Specify formula tracing.")) parser.add_option("--formulacallexprsource", action="store_true", dest="formulaCallExprSource", help=SUPPRESS_HELP) parser.add_option("--formulaCallExprCode", action="store_true", dest="formulaCallExprCode", help=_("Specify formula tracing.")) parser.add_option("--formulacallexprcode", action="store_true", dest="formulaCallExprCode", help=SUPPRESS_HELP) parser.add_option("--formulaCallExprEval", action="store_true", dest="formulaCallExprEval", help=_("Specify formula tracing.")) parser.add_option("--formulacallexpreval", action="store_true", dest="formulaCallExprEval", help=SUPPRESS_HELP) parser.add_option("--formulaCallExprResult", action="store_true", dest="formulaCallExprResult", help=_("Specify formula tracing.")) parser.add_option("--formulacallexprtesult", action="store_true", dest="formulaCallExprResult", help=SUPPRESS_HELP) parser.add_option("--formulaVarSetExprEval", action="store_true", dest="formulaVarSetExprEval", help=_("Specify formula tracing.")) parser.add_option("--formulavarsetexpreval", action="store_true", dest="formulaVarSetExprEval", help=SUPPRESS_HELP) parser.add_option("--formulaVarSetExprResult", action="store_true", dest="formulaVarSetExprResult", help=_("Specify formula tracing.")) parser.add_option("--formulavarsetexprresult", action="store_true", dest="formulaVarSetExprResult", help=SUPPRESS_HELP) parser.add_option("--formulaVarSetTiming", action="store_true", dest="timeVariableSetEvaluation", help=_("Specify showing times of variable set evaluation.")) parser.add_option("--formulavarsettiming", action="store_true", dest="timeVariableSetEvaluation", help=SUPPRESS_HELP) parser.add_option("--formulaAsserResultCounts", action="store_true", dest="formulaAsserResultCounts", help=_("Specify formula tracing.")) parser.add_option("--formulaasserresultcounts", action="store_true", dest="formulaAsserResultCounts", help=SUPPRESS_HELP) parser.add_option("--formulaSatisfiedAsser", action="store_true", dest="formulaSatisfiedAsser", help=_("Specify formula tracing.")) parser.add_option("--formulasatisfiedasser", action="store_true", dest="formulaSatisfiedAsser", help=SUPPRESS_HELP) parser.add_option("--formulaUnsatisfiedAsser", action="store_true", dest="formulaUnsatisfiedAsser", help=_("Specify formula tracing.")) parser.add_option("--formulaunsatisfiedasser", action="store_true", dest="formulaUnsatisfiedAsser", help=SUPPRESS_HELP) parser.add_option("--formulaUnsatisfiedAsserError", action="store_true", dest="formulaUnsatisfiedAsserError", help=_("Specify formula tracing.")) parser.add_option("--formulaunsatisfiedassererror", action="store_true", dest="formulaUnsatisfiedAsserError", help=SUPPRESS_HELP) parser.add_option("--formulaFormulaRules", action="store_true", dest="formulaFormulaRules", help=_("Specify formula tracing.")) parser.add_option("--formulaformularules", action="store_true", dest="formulaFormulaRules", help=SUPPRESS_HELP) parser.add_option("--formulaVarsOrder", action="store_true", dest="formulaVarsOrder", help=_("Specify formula tracing.")) parser.add_option("--formulavarsorder", action="store_true", dest="formulaVarsOrder", help=SUPPRESS_HELP) parser.add_option("--formulaVarExpressionSource", action="store_true", dest="formulaVarExpressionSource", help=_("Specify formula tracing.")) parser.add_option("--formulavarexpressionsource", action="store_true", dest="formulaVarExpressionSource", help=SUPPRESS_HELP) parser.add_option("--formulaVarExpressionCode", action="store_true", dest="formulaVarExpressionCode", help=_("Specify formula tracing.")) parser.add_option("--formulavarexpressioncode", action="store_true", dest="formulaVarExpressionCode", help=SUPPRESS_HELP) parser.add_option("--formulaVarExpressionEvaluation", action="store_true", dest="formulaVarExpressionEvaluation", help=_("Specify formula tracing.")) parser.add_option("--formulavarexpressionevaluation", action="store_true", dest="formulaVarExpressionEvaluation", help=SUPPRESS_HELP) parser.add_option("--formulaVarExpressionResult", action="store_true", dest="formulaVarExpressionResult", help=_("Specify formula tracing.")) parser.add_option("--formulavarexpressionresult", action="store_true", dest="formulaVarExpressionResult", help=SUPPRESS_HELP) parser.add_option("--formulaVarFilterWinnowing", action="store_true", dest="formulaVarFilterWinnowing", help=_("Specify formula tracing.")) parser.add_option("--formulavarfilterwinnowing", action="store_true", dest="formulaVarFilterWinnowing", help=SUPPRESS_HELP) parser.add_option("--formulaVarFiltersResult", action="store_true", dest="formulaVarFiltersResult", help=_("Specify formula tracing.")) parser.add_option("--formulavarfiltersresult", action="store_true", dest="formulaVarFiltersResult", help=SUPPRESS_HELP) parser.add_option("--testcaseResultsCaptureWarnings", action="store_true", dest="testcaseResultsCaptureWarnings", help=_("For testcase variations capture warning results, default is inconsistency or warning if there is any warning expected result. ")) parser.add_option("--testcaseresultscapturewarnings", action="store_true", dest="testcaseResultsCaptureWarnings", help=SUPPRESS_HELP) parser.add_option("--formulaRunIDs", action="store", dest="formulaRunIDs", help=_("Specify formula/assertion IDs to run, separated by a '|' character.")) parser.add_option("--formularunids", action="store", dest="formulaRunIDs", help=SUPPRESS_HELP) parser.add_option("--formulaCompileOnly", action="store_true", dest="formulaCompileOnly", help=_("Specify formula are to be compiled but not executed.")) parser.add_option("--formulacompileonly", action="store_true", dest="formulaCompileOnly", help=SUPPRESS_HELP) parser.add_option("--uiLang", action="store", dest="uiLang", help=_("Language for user interface (override system settings, such as program messages). Does not save setting.")) parser.add_option("--uilang", action="store", dest="uiLang", help=SUPPRESS_HELP) parser.add_option("--proxy", action="store", dest="proxy", help=_("Modify and re-save proxy settings configuration. " "Enter 'system' to use system proxy setting, 'none' to use no proxy, " "'http://[user[:password]@]host[:port]' " " (e.g., http://192.168.1.253, http://example.com:8080, http://joe:[email protected]:8080), " " or 'show' to show current setting, ." )) parser.add_option("--internetConnectivity", choices=("online", "offline"), dest="internetConnectivity", help=_("Specify internet connectivity: online or offline")) parser.add_option("--internetconnectivity", action="store", dest="internetConnectivity", help=SUPPRESS_HELP) parser.add_option("--internetTimeout", type="int", dest="internetTimeout", help=_("Specify internet connection timeout in seconds (0 means unlimited).")) parser.add_option("--internettimeout", type="int", action="store", dest="internetTimeout", help=SUPPRESS_HELP) parser.add_option("--internetRecheck", choices=("weekly", "daily", "never"), dest="internetRecheck", help=_("Specify rechecking cache files (weekly is default)")) parser.add_option("--internetrecheck", choices=("weekly", "daily", "never"), action="store", dest="internetRecheck", help=SUPPRESS_HELP) parser.add_option("--internetLogDownloads", action="store_true", dest="internetLogDownloads", help=_("Log info message for downloads to web cache.")) parser.add_option("--internetlogdownloads", action="store_true", dest="internetLogDownloads", help=SUPPRESS_HELP) parser.add_option("--noCertificateCheck", action="store_true", dest="noCertificateCheck", help=_("Specify no checking of internet secure connection certificate")) parser.add_option("--nocertificatecheck", action="store_true", dest="noCertificateCheck", help=SUPPRESS_HELP) parser.add_option("--xdgConfigHome", action="store", dest="xdgConfigHome", help=_("Specify non-standard location for configuration and cache files (overrides environment parameter XDG_CONFIG_HOME).")) parser.add_option("--plugins", action="store", dest="plugins", help=_("Specify plug-in configuration for this invocation. " "Enter 'show' to confirm plug-in configuration. " "Commands show, and module urls are '|' separated: " "url specifies a plug-in by its url or filename, " "relative URLs are relative to installation plug-in directory, " " (e.g., 'http://arelle.org/files/hello_web.py', 'C:\Program Files\Arelle\examples\plugin\hello_dolly.py' to load, " "or ../examples/plugin/hello_dolly.py for relative use of examples directory) " "Local python files do not require .py suffix, e.g., hello_dolly without .py is sufficient, " "Packaged plug-in urls are their directory's url (e.g., --plugins EdgarRenderer or --plugins xbrlDB). " )) parser.add_option("--packages", action="store", dest="packages", help=_("Specify taxonomy packages configuration. " "Enter 'show' to show current packages configuration. " "Commands show, and module urls are '|' separated: " "url specifies a package by its url or filename, please use full paths. " "(Package settings from GUI are no longer shared with cmd line operation. " "Cmd line package settings are not persistent.) " )) parser.add_option("--package", action="store", dest="packages", help=SUPPRESS_HELP) parser.add_option("--packageManifestName", action="store", dest="packageManifestName", help=_("Provide non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). " "Uses unix file name pattern matching. " "Multiple manifest files are supported in archive (such as oasis catalogs). " "(Replaces search for either .taxonomyPackage.xml or catalog.xml). " )) parser.add_option("--abortOnMajorError", action="store_true", dest="abortOnMajorError", help=_("Abort process on major error, such as when load is unable to find an entry or discovered file.")) parser.add_option("--showEnvironment", action="store_true", dest="showEnvironment", help=_("Show Arelle's config and cache directory and host OS environment parameters.")) parser.add_option("--showenvironment", action="store_true", dest="showEnvironment", help=SUPPRESS_HELP) parser.add_option("--collectProfileStats", action="store_true", dest="collectProfileStats", help=_("Collect profile statistics, such as timing of validation activities and formulae.")) if hasWebServer: parser.add_option("--webserver", action="store", dest="webserver", help=_("start web server on host:port[:server] for REST and web access, e.g., --webserver locahost:8080, " "or specify nondefault a server name, such as cherrypy, --webserver locahost:8080:cherrypy. " "(It is possible to specify options to be defaults for the web server, such as disclosureSystem and validations, but not including file names.) ")) pluginOptionsIndex = len(parser.option_list) # install any dynamic plugins so their command line options can be parsed if present for i, arg in enumerate(args): if arg.startswith('--plugin'): # allow singular or plural (option must simply be non-ambiguous if len(arg) > 9 and arg[9] == '=': preloadPlugins = arg[10:] elif i < len(args) - 1: preloadPlugins = args[i+1] else: preloadPlugins = "" for pluginCmd in preloadPlugins.split('|'): cmd = pluginCmd.strip() if cmd not in ("show", "temp") and len(cmd) > 0 and cmd[0] not in ('-', '~', '+'): moduleInfo = PluginManager.addPluginModule(cmd) if moduleInfo: cntlr.preloadedPlugins[cmd] = moduleInfo PluginManager.reset() break # add plug-in options for optionsExtender in pluginClassMethods("CntlrCmdLine.Options"): optionsExtender(parser) pluginLastOptionIndex = len(parser.option_list) parser.add_option("-a", "--about", action="store_true", dest="about", help=_("Show product version, copyright, and license.")) if not args and cntlr.isGAE: args = ["--webserver=::gae"] elif cntlr.isCGI: args = ["--webserver=::cgi"] elif cntlr.isMSW: # if called from java on Windows any empty-string arguments are lost, see: # http://bugs.java.com/view_bug.do?bug_id=6518827 # insert needed arguments sourceArgs = args args = [] namedOptions = set() optionsWithArg = set() for option in parser.option_list: names = str(option).split('/') namedOptions.update(names) if option.action == "store": optionsWithArg.update(names) priorArg = None for arg in sourceArgs: if priorArg in optionsWithArg and arg in namedOptions: # probable java/MSFT interface bug 6518827 args.append('') # add empty string argument # remove quoting if arguments quoted according to http://bugs.java.com/view_bug.do?bug_id=6518827 if r'\"' in arg: # e.g., [{\"foo\":\"bar\"}] -> [{"foo":"bar"}] arg = arg.replace(r'\"', '"') args.append(arg) priorArg = arg (options, leftoverArgs) = parser.parse_args(args) if options.about: print(_("\narelle(r) {0} ({1}bit)\n\n" "An open source XBRL platform\n" "(c) 2010-{2} Mark V Systems Limited\n" "All rights reserved\nhttp://www.arelle.org\[email protected]\n\n" "Licensed under the Apache License, Version 2.0 (the \"License\"); " "you may not \nuse this file except in compliance with the License. " "You may obtain a copy \nof the License at " "'http://www.apache.org/licenses/LICENSE-2.0'\n\n" "Unless required by applicable law or agreed to in writing, software \n" "distributed under the License is distributed on an \"AS IS\" BASIS, \n" "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n" "See the License for the specific language governing permissions and \n" "limitations under the License." "\n\nIncludes:" "\n Python(r) {4[0]}.{4[1]}.{4[2]} (c) 2001-2013 Python Software Foundation" "\n PyParsing (c) 2003-2013 Paul T. McGuire" "\n lxml {5[0]}.{5[1]}.{5[2]} (c) 2004 Infrae, ElementTree (c) 1999-2004 by Fredrik Lundh" "{3}" "\n May include installable plug-in modules with author-specific license terms" ).format(Version.__version__, cntlr.systemWordSize, Version.copyrightLatestYear, _("\n Bottle (c) 2011-2013 Marcel Hellkamp") if hasWebServer else "", sys.version_info, etree.LXML_VERSION)) elif options.disclosureSystemName in ("help", "help-verbose"): text = _("Disclosure system choices: \n{0}").format(' \n'.join(cntlr.modelManager.disclosureSystem.dirlist(options.disclosureSystemName))) try: print(text) except UnicodeEncodeError: print(text.encode("ascii", "replace").decode("ascii")) elif len(leftoverArgs) != 0 and (not hasWebServer or options.webserver is None): parser.error(_("unrecognized arguments: {}").format(', '.join(leftoverArgs))) elif (options.entrypointFile is None and ((not options.proxy) and (not options.plugins) and (not any(pluginOption for pluginOption in parser.option_list[pluginOptionsIndex:pluginLastOptionIndex])) and (not hasWebServer or options.webserver is None))): parser.error(_("incorrect arguments, please try\n python CntlrCmdLine.py --help")) elif hasWebServer and options.webserver: # webserver incompatible with file operations if any((options.entrypointFile, options.importFiles, options.diffFile, options.versReportFile, options.factsFile, options.factListCols, options.factTableFile, options.conceptsFile, options.preFile, options.tableFile, options.calFile, options.dimFile, options.anchFile, options.formulaeFile, options.viewArcrole, options.viewFile, options.roleTypesFile, options.arcroleTypesFile )): parser.error(_("incorrect arguments with --webserver, please try\n python CntlrCmdLine.py --help")) else: # note that web server logging does not strip time stamp, use logFormat if that is desired cntlr.startLogging(logFileName='logToBuffer', logTextMaxLength=options.logTextMaxLength, logRefObjectProperties=options.logRefObjectProperties) from arelle import CntlrWebMain app = CntlrWebMain.startWebserver(cntlr, options) if options.webserver == '::wsgi': return app else: # parse and run the FILENAME cntlr.startLogging(logFileName=(options.logFile or "logToPrint"), logFormat=(options.logFormat or "[%(messageCode)s] %(message)s - %(file)s"), logLevel=(options.logLevel or "DEBUG"), logToBuffer=getattr(options, "logToBuffer", False), logTextMaxLength=options.logTextMaxLength, # e.g., used by EdgarRenderer to require buffered logging logRefObjectProperties=options.logRefObjectProperties) cntlr.run(options) return cntlr
d7cf011ee59aea93659594411051e71ae4dd76e0
10,986
def build_A(N): """ Build A based on the defined problem. Args: N -- (int) as defined above Returns: NumPy ndarray - A """ A = np.hstack( (np.eye(N), np.negative(np.eye(N))) ) A = np.vstack( (A, np.negative(np.hstack( (np.eye(N), np.eye(N)) ))) ) A = np.vstack( (A, np.hstack( (np.ones(N), np.zeros(N)) )) ) return A
eecb541e44cc177e594f38d9a7c1930f2d4f0c40
10,987
def gms_change_est2(T_cont, T_pert, q_cont, precip, level, lat, lev_sfc=925., gamma=1.): """ Gross moist stability change estimate. Near surface MSE difference between ITCZ and local latitude, neglecting geopotential term and applying a thermodynamic scaling for the moisture term, and multiplying the ITCZ terms by cos(lat) and a fixed fraction gamma to account for deviation of upper level MSE from the near surface ITCZ value. """ # ITCZ defined as latitude with maximum zonal mean precip. itcz_ind = np.argmax(precip.mean(axis=-1)) # Need temperature change at T_pert = np.squeeze(T_pert[np.where(level == lev_sfc)].mean(axis=-1)) T_cont = np.squeeze(T_cont[np.where(level == lev_sfc)].mean(axis=-1)) dT = T_pert - T_cont dT_itcz = T_pert[itcz_ind] - T_cont[itcz_ind] q_cont = np.squeeze(q_cont[np.where(level == lev_sfc)].mean(axis=-1)) # GMS is difference between surface alpha = 0.07 return (np.cos(np.deg2rad(lat))**2*gamma* (c_p + L_v*alpha*q_cont[itcz_ind])*dT_itcz - (c_p + L_v*alpha*q_cont)*dT)/c_p
991721a2dae52269dec276fa384d568b1d58672f
10,988
def solid_polygon_info_(base_sides, printed=False): """Get information about a solid polygon from its side count.""" # Example: A rectangular solid (Each base has four sides) is made up of # 12 edges, 8 vertices, 6 faces, and 12 triangles. edges = base_sides * 3 vertices = base_sides * 2 faces = base_sides + 2 triangles = (base_sides - 2) * 2 + vertices if printed: print(f"Edges: {edges}\nVertices: {vertices}\nFaces: {faces}\nTriangles: {triangles}") else: return {"edges": edges, "vertices": vertices, "faces": faces, "triangles": triangles}
a16bae9b82fd7a89332d5403359c2aa1eddf6cb4
10,989
def read(id=None): """ This function responds to a request for /api/people with the complete lists of people :return: sorted list of people """ # Create the list of people from our data with client() as mcl: # Database ppldb = mcl.ppldb # collection (kind of like a table) pplclxn = ppldb.people log.debug(pplclxn) if id is None: ppl = [Person(p) for p in pplclxn.find()] log.debug(ppl) else: p = pplclxn.find_one({'lname': id}) return Person(p) return ppl # return [PEOPLE[key] for key in sorted(PEOPLE.keys())]
92490bf44d4929709c95833f32c92236fe265fd0
10,990
def load_prism_theme(): """Loads a PrismJS theme from settings.""" theme = get_theme() if theme: script = ( f"""<link href="{PRISM_PREFIX}{PRISM_VERSION}/themes/prism-{theme}""" """.min.css" rel="stylesheet">""" ) return mark_safe(script) return ""
565e9fdb7b201bf6c34b3b2d198aa18f22070145
10,991
def get_root_name(depth): """ Returns the Rootname. """ return Alphabet.get_null_character() * depth
1514bcd0ef9c6a2a4051772d8eeee34f3f7197a7
10,992
import hashlib def md5(fname): """ Cacualte the MD5 hash of the file given as input. Returns the hash value of the input file. """ hash_md5 = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest()
0c238810f1682f86e8a31982135c37017df4d6fd
10,993
def date2num(date_axis, units, calendar): """ A wrapper from ``netCDF4.date2num`` able to handle "years since" and "months since" units. If time units are not "years since" or "months since" calls usual ``netcdftime.date2num``. :param numpy.array date_axis: The date axis following units :param str units: The proper time units :param str calendar: The NetCDF calendar attribute :returns: The corresponding numerical time axis :rtype: *array* """ # date_axis is the date time axis incremented following units (i.e., by years, months, etc). if not units.split(' ')[0] in ['years', 'months']: # If units are not 'years' or 'months since', call usual netcdftime.date2num: return nc.date2num(date_axis, units=units, calendar=calendar) else: # Return to time reference with 'days since' units_as_days = 'days ' + ' '.join(units.split(' ')[1:]) # Convert date axis as number of days since time reference days_axis = nc.date2num(date_axis, units=units_as_days, calendar=calendar) # Convert the time reference 'units_as_days' as datetime object start_date = nc.num2date(0.0, units=units_as_days, calendar=calendar) # Create years axis from input date axis years = np.array([date.year for date in np.atleast_1d(np.array(date_axis))]) if units.split(' ')[0] == 'years': # If units are 'years since' # Define the number of maximum and minimum years to build a date axis covering # the whole 'num_axis' period max_years = np.max(years - start_date.year + 1) min_years = np.min(years - start_date.year - 1) # Create a date axis with one year that spans the entire period by year years_axis = np.array([add_year(start_date, yid) for yid in np.arange(min_years, max_years + 2)]) # Convert years axis as number of days since time reference cdftime = netcdftime.utime(units_as_days, calendar=calendar) years_axis_as_days = cdftime.date2num(years_axis) # Find closest index for years_axis_as_days in days_axis closest_index = np.searchsorted(years_axis_as_days, days_axis) # Compute the difference between closest value of year axis and start date, in number of days num = days_axis - years_axis_as_days[closest_index] # Number of days of the corresponding closest year den = np.diff(years_axis_as_days)[closest_index] return min_years + closest_index + num / den elif units.split(' ')[0] == 'months': # If units are 'months since' # Define the number of maximum and minimum months to build a date axis covering # the whole 'num_axis' period max_months = np.max(12 * (years - start_date.year + 12)) min_months = np.min(12 * (years - start_date.year - 12)) # Create a date axis with one month that spans the entire period by month months_axis = np.array([add_month(start_date, mid) for mid in np.arange(min_months, max_months)]) # Convert months axis as number of days since time reference cdftime = netcdftime.utime(units_as_days, calendar=calendar) months_axis_as_days = cdftime.date2num(months_axis) # Find closest index for months_axis_as_days in days_axis closest_index = np.searchsorted(months_axis_as_days, days_axis) # Compute the difference between closest value of months axis and start date, in number of days num = days_axis - months_axis_as_days[closest_index] # Number of days of the corresponding closest month den = np.diff(months_axis_as_days)[closest_index] return min_months + closest_index + num / den
b435697098c58d1045f7e31eefb23cac201bfe0c
10,994
import gettext def _(txt): """ Custom gettext translation function that uses the CurlyTx domain """ t = gettext.dgettext("CurlyTx", txt) if t == txt: #print "[CurlyTx] fallback to default translation for", txt t = gettext.gettext(txt) return t
839c36184eabde641a40d7b7ad55d4695574dafb
10,995
import html def output_node(ctx, difference, path, indentstr, indentnum): """Returns a tuple (parent, continuation) where - parent is a PartialString representing the body of the node, including its comments, visuals, unified_diff and headers for its children - but not the bodies of the children - continuation is either None or (only in html-dir mode) a function which when called with a single integer arg, the maximum size to print, will print any remaining "split" pages for unified_diff up to the given size. """ indent = tuple(indentstr * (indentnum + x) for x in range(3)) t, cont = PartialString.cont() comments = u"" if difference.comments: comments = u'{0[1]}<div class="comment">\n{1}{0[1]}</div>\n'.format( indent, "".join( u"{0[2]}{1}<br/>\n".format(indent, html.escape(x)) for x in difference.comments ), ) visuals = u"" for visual in difference.visuals: visuals += output_visual(visual, path, indentstr, indentnum + 1) udiff = u"" ud_cont = None if difference.unified_diff: ud_cont = HTMLSideBySidePresenter().output_unified_diff( ctx, difference.unified_diff, difference.has_internal_linenos ) udiff = next(ud_cont) if isinstance(udiff, PartialString): ud_cont = ud_cont.send udiff = udiff.pformatl(PartialString.of(ud_cont)) else: for _ in ud_cont: pass # exhaust the iterator, avoids GeneratorExit ud_cont = None # PartialString for this node body = PartialString.numl(u"{0}{1}{2}{-1}", 3, cont).pformatl( comments, visuals, udiff ) if len(path) == 1: # root node, frame it body = output_node_frame(difference, path, indentstr, indentnum, body) t = cont(t, body) # Add holes for child nodes for d in difference.details: child = output_node_frame( d, path + [d], indentstr, indentnum + 1, PartialString.of(d) ) child = PartialString.numl( u"""{0[1]}<div class="difference"> {1}{0[1]}</div> {-1}""", 2, cont, ).pformatl(indent, child) t = cont(t, child) # there might be extra holes for the unified diff continuation assert len(t.holes) >= len(difference.details) + 1 return cont(t, u""), ud_cont
dbe4c5f806457d4308954fb9e13bf01419b4e1a1
10,996
def split_tree_into_feature_groups(tree: TreeObsForRailEnv.Node, max_tree_depth: int) -> ( np.ndarray, np.ndarray, np.ndarray): """ This function splits the tree into three difference arrays of values """ data, distance, agent_data = _split_node_into_feature_groups(tree) for direction in TreeObsForRailEnv.tree_explored_actions_char: sub_data, sub_distance, sub_agent_data = _split_subtree_into_feature_groups(tree.childs[direction], 1, max_tree_depth) data = np.concatenate((data, sub_data)) distance = np.concatenate((distance, sub_distance)) agent_data = np.concatenate((agent_data, sub_agent_data)) return data, distance, agent_data
87352b0d500d178b32d4697ae49736133c7fd6a1
10,997
def _generate_training_batch(ground_truth_data, representation_function, batch_size, num_points, random_state): """Sample a set of training samples based on a batch of ground-truth data. Args: ground_truth_data: GroundTruthData to be sampled from. representation_function: Function that takes observations as input and outputs a dim_representation sized representation for each observation. batch_size: Number of points to be used to compute the training_sample. num_points: Number of points to be sampled for training set. random_state: Numpy random state used for randomness. Returns: points: (num_points, dim_representation)-sized numpy array with training set features. labels: (num_points)-sized numpy array with training set labels. """ points = None # Dimensionality depends on the representation function. labels = np.zeros(num_points, dtype=np.int64) for i in range(num_points): labels[i], feature_vector = _generate_training_sample( ground_truth_data, representation_function, batch_size, random_state) if points is None: points = np.zeros((num_points, feature_vector.shape[0])) points[i, :] = feature_vector return points, labels
944ed5845385089063f0e1558a9a9aedb4aa6d26
10,998
def get_mnist_loaders(data_dir, b_sz, shuffle=True): """Helper function that deserializes MNIST data and returns the relevant data loaders. params: data_dir: string - root directory where the data will be saved b_sz: integer - the batch size shuffle: boolean - whether to shuffle the training set or not """ train_loader = DataLoader( MNIST(data_dir, train=True, transform=ToTensor(), download=True), shuffle=shuffle, batch_size=b_sz) test_loader = DataLoader( MNIST(data_dir, train=False, transform=ToTensor(), download=True), shuffle=False, batch_size=b_sz) return train_loader, test_loader
7149dbe78ceb321c0afea52c20ae927ce154a8f6
10,999