content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def geo_to_string(value): """ Convert geo objects to strings, because they don't support equality. """ if isinstance(value, list): return [geo_to_string(x) for x in value] if isinstance(value, dict): result = {} for dict_key, dict_value in value.iteritems(): result[dict_key] = geo_to_string(dict_value) return result if isinstance(value, aerospike.GeoJSON): return str(value) return value
9566a980128767ea4b2d651c88d715673e7ef005
3,652,662
import http def page_not_found(request, template_name='404.html'): """ Default 404 handler. Templates: `404.html` Context: request_path The path of the requested URL (e.g., '/app/pages/bad_page/') """ t = loader.get_template(template_name) # You need to create a 404.html template. return http.HttpResponseNotFound(t.render(RequestContext(request, {'request_path': request.path})))
de0348f3c3bf963f1614d13ffc32bb79d30437b0
3,652,664
import csv def load_data(filename): """ Load shopping data from a CSV file `filename` and convert into a list of evidence lists and a list of labels. Return a tuple (evidence, labels). evidence should be a list of lists, where each list contains the following values, in order: - Administrative, an integer - Administrative_Duration, a floating point number - Informational, an integer - Informational_Duration, a floating point number - ProductRelated, an integer - ProductRelated_Duration, a floating point number - BounceRates, a floating point number - ExitRates, a floating point number - PageValues, a floating point number - SpecialDay, a floating point number - Month, an index from 0 (January) to 11 (December) - OperatingSystems, an integer - Browser, an integer - Region, an integer - TrafficType, an integer - VisitorType, an integer 0 (not returning) or 1 (returning) - Weekend, an integer 0 (if false) or 1 (if true) labels should be the corresponding list of labels, where each label is 1 if Revenue is true, and 0 otherwise. """ with open("shopping.csv") as f: reader = csv.reader(f) next(reader) months = ["Jan", "Feb", "Mar", "Apr", "May", "June", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] data = [] for row in reader: data.append({ "evidence": [int(row[0]), float(row[1]), int(row[2]), float(row[3]), int(row[4]), float(row[5]), float(row[6]), float(row[7]), float(row[8]), float(row[9]), months.index(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), 0 if row[15] == "New_Visitor" else 1, 0 if row[16] == "FALSE" else 1], "label": 0 if row[17] == "FALSE" else 1 }) evidence = [row["evidence"] for row in data] labels = [row["label"] for row in data] return (evidence, labels)
eb2465d0ebfb7398a3742d8fb79463d3d7b076f0
3,652,665
def instance_mock(cls, request, name=None, spec_set=True, **kwargs): """ Return a mock for an instance of *cls* that draws its spec from the class and does not allow new attributes to be set on the instance. If *name* is missing or |None|, the name of the returned |Mock| instance is set to *request.fixturename*. Additional keyword arguments are passed through to the Mock() call that creates the mock. """ if name is None: name = request.fixturename return create_autospec(cls, _name=name, spec_set=spec_set, instance=True, **kwargs)
ccc60e2f90f63a131059714b3ddb213246807a0b
3,652,666
import functools import torch def auto_fp16(apply_to=None, out_fp32=False): """Decorator to enable fp16 training automatically. This decorator is useful when you write custom modules and want to support mixed precision training. If inputs arguments are fp32 tensors, they will be converted to fp16 automatically. Arguments other than fp32 tensors are ignored. Args: apply_to (Iterable, optional): The argument names to be converted. `None` indicates all arguments. out_fp32 (bool): Whether to convert the output back to fp32. Example: >>> import torch.nn as nn >>> class MyModule1(nn.Module): >>> >>> # Convert x and y to fp16 >>> @auto_fp16() >>> def forward(self, x, y): >>> pass >>> import torch.nn as nn >>> class MyModule2(nn.Module): >>> >>> # convert pred to fp16 >>> @auto_fp16(apply_to=('pred', )) >>> def do_something(self, pred, others): >>> pass """ def auto_fp16_wrapper(old_func): @functools.wraps(old_func) def new_func(*args, **kwargs): # check if the module has set the attribute `fp16_enabled`, if not, # just fallback to the original method. if not isinstance(args[0], torch.nn.Module): raise TypeError('@auto_fp16 can only be used to decorate the ' 'method of nn.Module') if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): return old_func(*args, **kwargs) # get the arg spec of the decorated method args_info = getfullargspec(old_func) # get the argument names to be casted args_to_cast = args_info.args if apply_to is None else apply_to # convert the args that need to be processed new_args = [] # NOTE: default args are not taken into consideration if args: arg_names = args_info.args[:len(args)] for i, arg_name in enumerate(arg_names): if arg_name in args_to_cast: new_args.append( cast_tensor_type(args[i], torch.float, torch.half)) else: new_args.append(args[i]) # convert the kwargs that need to be processed new_kwargs = {} if kwargs: for arg_name, arg_value in kwargs.items(): if arg_name in args_to_cast: new_kwargs[arg_name] = cast_tensor_type( arg_value, torch.float, torch.half) else: new_kwargs[arg_name] = arg_value # apply converted arguments to the decorated method output = old_func(*new_args, **new_kwargs) # cast the results back to fp32 if necessary if out_fp32: output = cast_tensor_type(output, torch.half, torch.float) return output return new_func return auto_fp16_wrapper
1b3292ce6382f82b210d07ec48557f3c06ed7259
3,652,667
def get_loci_score(state, loci_w, data_w, species_w, better_loci, species_counts, total_individuals, total_species, individuals): """ Scoring function with user-specified weights. :param state: :param loci_w: the included proportion of loci from the original data set (higher is better). :param data_w: 1 - the proportion of missing data for the selected loci (higher is better). :param species_w: the average proportion of species represented per locus (higher is better). :param better_loci: :param species_counts: :param total_individuals: :param total_species: :param individuals: :return: """ num_loci = sum(state) species_loci_counts = {species: 0 for species in species_counts} individual_count = 0 missing_counts = {individual: 0 for individual in individuals} total_loci = len(better_loci) for i in range(total_loci): if state[i] == 0: continue found_species = set() found_individuals = set() lines = better_loci[i].split("\n") for line in lines: if line == "": continue (individual, sequence) = line[1:].split() found_individuals.add(individual) individual_count += 1 species = individual.split("_")[-1] found_species.add(species) for species in found_species: species_loci_counts[species] += 1 # Keep track of the amount of missing data for each individual. for individual in individuals: if individual not in found_individuals: missing_counts[individual] += 1 num_missing = num_loci * total_individuals - individual_count score_comps = [loci_w * float(num_loci) / float(total_loci), data_w * (1 - float(num_missing) / float(num_loci * total_individuals)), species_w * float(sum([species_loci_counts[species] for species in species_loci_counts])) / (float(num_loci) * float(total_species))] return score_comps, missing_counts
e5e12bf2f9f76e994289a33b52d4cdc3d641ec8e
3,652,668
def make_per_cell_fastqs( reads, outdir, channel_id, output_format, cell_barcode_pattern, good_barcodes_filename): """Write the filtered cell barcodes in reads from barcodes_with_significant_umi_file fastq.gzs to outdir Parameters ---------- reads : str read records from fasta path greater than or equal to min_umi_per_cell outdir: str write the per cell barcode fastq.gzs to outdir channel_id: str prefix to fastq output_format: str format of output files, can be either fastq or fastq.gz cell_barcode_pattern: regex pattern cell barcode pattern to detect in the record name barcodes_with_significant_umi_file: list list of containing barcodes that have significant umi counts Returns ------- Write the filtered cell barcodes in reads from barcodes_with_significant_umi_file fastq.gzs to outdir """ if channel_id is None: channel_id = "" good_barcodes = read_barcodes_file(good_barcodes_filename) fastqs = [] record_count = 0 for record in screed.open(reads): record_count += 1 if record_count == 0: return fastqs good_cell_barcode_records = get_good_cell_barcode_records( reads, good_barcodes, cell_barcode_pattern) for cell_barcode, records in good_cell_barcode_records.items(): if channel_id == "": filename = "{}/{}.{}".format( outdir, cell_barcode, output_format) else: filename = "{}/{}_{}.{}".format( outdir, channel_id, cell_barcode, output_format) write_fastq(records, filename) fastqs.append(filename) return fastqs
16f45364e8a081addf7b126c3d5af4fb00de4bdc
3,652,669
def plot_roc_curve(data, cls_name, title='ROC curve'): """ :param data: list [(fpr, tpr), (), ...] :param cls_name: tuple of names for each class :param title: plot title :return: """ def cal_auc(tpr, fpr): return np.trapz(tpr, fpr) def plot_single_curve(fpr, tpr, cls_ind): auc = cal_auc(tpr, fpr) plt.plot(fpr, tpr, label="%s ROC curve (area = %.2f)" % (cls_name[cls_ind], auc)) return auc assert isinstance(data, list) if len(cls_name) == 2: assert len(data) == 1 else: assert len(data) == len(cls_name) fig = plt.figure() args = [(fpr, tpr, i) for i, (fpr, tpr) in enumerate(data)] if len(cls_name) > 2: auc = np.mean(list(map(lambda x: plot_single_curve(*x), args))) else: fpr, tpr = data[0] auc = cal_auc(tpr, fpr) plt.plot(fpr, tpr, label="%s vs. %s ROC curve (area = %.2f)" % (cls_name[1], cls_name[0], auc)) ax = plt.gca() ax.plot([0, 1], [0, 1], ls="--", c=".3") plt.title(title + ' (mean area = %.4f)' % auc) plt.ylabel('True positive rate') plt.xlabel('False positive rate') plt.legend() return fig, auc
5b2a56c3f193954173431341185b3bdc53c33c7a
3,652,670
import time def train(elastic_coordinator, train_step, state): """ This is the main elastic data parallel loop. It starts from an initial 'state'. Each iteration calls 'train_step' and returns a new state. 'train_step' has the following interface: state, worker_stats = train_step(state) When 'train_step' exhausts all the data, a StopIteration exception should be thrown. """ assert isinstance(state, torchelastic.State) failure_count = 0 rank = 0 checkpoint_util = CheckpointUtil(elastic_coordinator) while not elastic_coordinator.should_stop_training(): # See: https://github.com/pytorch/elastic/issues/7 if failure_count >= MAX_FAILURES: e = RuntimeError( "Exceeded max number of recoverable failures: {}".format(failure_count) ) elastic_coordinator.on_error(e) raise e start_time = time.time() snapshot = state.capture_snapshot() try: store, rank, world_size = elastic_coordinator.rendezvous_barrier() elastic_coordinator.init_process_group() # load checkpoint if necessary state = checkpoint_util.load_checkpoint(state, rank) state_sync_start_time = time.time() state.sync(world_size, rank) publish_metric( "torchelastic", "state_sync.duration.ms", get_elapsed_time_ms(state_sync_start_time), ) checkpoint_util.set_checkpoint_loaded() elastic_coordinator.barrier() log.info("Rank {0} synced state with other nodes".format(rank)) except StopException: log.info("Rank {0} received stopped signal. Exiting training.".format(rank)) break except RuntimeError as e: # See: https://github.com/pytorch/elastic/issues/7 elastic_coordinator.on_error(e) state.apply_snapshot(snapshot) failure_count += 1 continue except (NonRetryableException, Exception) as e: elastic_coordinator.on_error(e) raise finally: publish_metric( "torch_elastic", "outer_train_loop.duration.ms", get_elapsed_time_ms(start_time), ) # Note that the loop might not even start if the rendezvous was closed # due to one of the trainer processes completing earlier. while not elastic_coordinator.should_stop_training(): start_time = time.time() snapshot = state.capture_snapshot() try: train_step_start_time = time.time() state, worker_stats = train_step(state) publish_metric( "torchelastic", "train_step.duration.ms", get_elapsed_time_ms(train_step_start_time), ) elastic_coordinator.monitor_progress(state, worker_stats) checkpoint_util.save_checkpoint(state, rank) if elastic_coordinator.should_rendezvous(state): log.info("Rank {0} will re-rendezvous".format(rank)) # Executor told us, for whatever reason, to re-rendezvous. # This can occur if another node encounters an error, # if a new node becomes available to train, # or potentially even if it's time to checkpoint. break elastic_coordinator.report_progress(state) except StopIteration: log.info("Rank {0} finished all the iterations".format(rank)) # Current trainer process completed processing assigned subset of # examples. Other trainer processes need to stop as well. # This sends an explicit signal on training completion. elastic_coordinator.signal_training_done() break except RuntimeError as e: # See: https://github.com/pytorch/elastic/issues/7 elastic_coordinator.on_error(e) state.apply_snapshot(snapshot) failure_count += 1 break except Exception as e: elastic_coordinator.on_error(e) raise finally: publish_metric( "torchelastic", "inner_train_loop.duration.ms", get_elapsed_time_ms(start_time), ) if elastic_coordinator.should_stop_training(): return state else: # This is an error condition and should not happen. raise Exception( "Exiting without training complete. rank: {0}," " should_stop_training: {1}".format( rank, elastic_coordinator.should_stop_training() ) )
ea7886bba7db96ff85e1687b3b3e24cbc8f8af9d
3,652,671
import torch def softmax_mask(w: torch.Tensor, dim=-1, mask: torch.BoolTensor = None ) -> torch.Tensor: """ Allows having -np.inf in w to mask out, or give explicit bool mask :param w: :param dim: :param mask: :return: """ if mask is None: mask = w != -np.inf minval = torch.min(w[~mask]) # to avoid affecting torch.max w1 = w.clone() w1[~mask] = minval # to prevent over/underflow w1 = w1 - torch.max(w1, dim=dim, keepdim=True)[0] w1 = torch.exp(w1) p = w1 / torch.sum(w1 * mask.float(), dim=dim, keepdim=True) p[~mask] = 0. return p
6cf295b308040d3ad4019ab8292b37a679fb6e27
3,652,673
from jack.readers.multiple_choice.shared import MultipleChoiceSingleSupportInputModule from jack.readers.natural_language_inference.modular_nli_model import ModularNLIModel from jack.readers.multiple_choice.shared import SimpleMCOutputModule from typing import Union def modular_nli_reader(resources_or_conf: Union[dict, SharedResources] = None): """Creates a Modular NLI reader instance. Model defined in config.""" shared_resources = create_shared_resources(resources_or_conf) input_module = MultipleChoiceSingleSupportInputModule(shared_resources) model_module = ModularNLIModel(shared_resources) output_module = SimpleMCOutputModule(shared_resources) return TFReader(shared_resources, input_module, model_module, output_module)
03032966355fcb3405cbc2f311908d7be1f2485d
3,652,674
def move_box_and_gtt(n_targets=3, time_limit=DEFAULT_TIME_LIMIT, control_timestep=DEFAULT_CONTROL_TIMESTEP): """Loads `move_box_or_gtt` task.""" return _make_predicate_task( n_boxes=1, n_targets=n_targets, include_gtt_predicates=True, include_move_box_predicates=True, max_num_predicates=2, control_timestep=control_timestep, time_limit=time_limit)
4ae2377d0449b93d3dc0ff34e18010c7bff004d8
3,652,675
import json def get_body(data_dict, database_id, media_status, media_type): """ 获取json数据 :param media_type: :param media_status: :param data_dict: :param database_id: :return: """ status = "" music_status = "" if media_status == MediaStatus.WISH.value: status = "想看" music_status = "想听" elif media_status == MediaStatus.DO.value: status = "在看" music_status = "在听" elif media_status == MediaStatus.COLLECT.value: status = "看过" music_status = "听过" else: status = "" music_status = "" log_detail.info(f"【RUN】- {media_type}数据信息整理为json格式") rating = data_dict[MediaInfo.RATING_F.value] # rating = float(rat) if rat == "" else 0 if media_type == MediaType.MUSIC.value: body = { "parent": { "type": "database_id", "database_id": f"{database_id}" }, "properties": { "音乐": { "title": [{ "type": "text", "text": { "content": data_dict[MediaInfo.TITLE.value] } }] }, "封面": { "files": [{ "type": "external", "name": data_dict[MediaInfo.IMG.value][-13:], "external": { "url": data_dict[MediaInfo.IMG.value] } }] }, "表演者": { "rich_text": [{ "type": "text", "text": { "content": data_dict[MediaInfo.PERFORMER.value] } }] }, "发行时间": { "select": { "name": data_dict[MediaInfo.RELEASE_DATE.value][0:4] } }, "标记状态": { "select": { "name": f"{music_status}" } }, "豆瓣链接": { "url": f"{data_dict[MediaInfo.URL.value]}" } } } # 评分 if data_dict[MediaInfo.RATING_F.value]: rating_f = float(data_dict[MediaInfo.RATING_F.value]) tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value, property_params=rating_f) body["properties"]["评分"] = tmp_dict # 评分人数 if data_dict[MediaInfo.ASSESS.value]: tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value, property_params=data_dict[MediaInfo.ASSESS.value]) body["properties"]["评分人数"] = tmp_dict return body elif media_type == MediaType.MOVIE.value: # 导演 编剧 主演 text_director = ' / '.join(data_dict[MediaInfo.DIRECTOR.value]) text_screenwriter = ' / '.join(data_dict[MediaInfo.SCREENWRITER.value]) text_starring = ' / '.join(data_dict[MediaInfo.STARRING.value]) str_type = get_multi_select_body(data_dict[MediaInfo.MOVIE_TYPE.value]) json_type = json.loads(str_type) str_c_or_r = get_multi_select_body(data_dict[MediaInfo.C_OR_R.value]) json_c_or_r = json.loads(str_c_or_r) body = { "parent": { "type": "database_id", "database_id": f"{database_id}" }, "properties": { "名字": { "title": [{ "type": "text", "text": { "content": data_dict[MediaInfo.TITLE.value] } }] }, "导演": { "rich_text": [{ "type": "text", "text": { "content": text_director } }] }, "编剧": { "rich_text": [{ "type": "text", "text": { "content": text_screenwriter } }] }, "主演": { "rich_text": [{ "type": "text", "text": { "content": text_starring } }] }, "类型": { "multi_select": json_type }, "国家地区": { "multi_select": json_c_or_r }, "IMDb": { "url": f"https://www.imdb.com/title/{data_dict[MediaInfo.IMDB.value]}" }, "标记状态": { "select": { "name": f"{status}" } }, "分类": { "select": { "name": f"{data_dict[MediaInfo.CATEGORIES.value]}" } }, "简介": { "rich_text": [{ "type": "text", "text": { "content": data_dict[MediaInfo.RELATED.value] } }] }, "封面": { "files": [{ "type": "external", "name": data_dict[MediaInfo.IMG.value][-15:], "external": { "url": data_dict[MediaInfo.IMG.value] } }] }, "豆瓣链接": { "url": f"{data_dict[MediaInfo.URL.value]}" } } } # 评分 if data_dict[MediaInfo.RATING_F.value]: rating_f = float(data_dict[MediaInfo.RATING_F.value]) tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value, property_params=rating_f) body["properties"]["评分"] = tmp_dict # 评分人数 if data_dict[MediaInfo.ASSESS.value]: tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value, property_params=data_dict[MediaInfo.ASSESS.value]) body["properties"]["评分人数"] = tmp_dict return body elif media_type == MediaType.BOOK.value: body = { "parent": { "type": "database_id", "database_id": f"{database_id}" }, "properties": { "书名": { "title": [{ "type": "text", "text": { "content": data_dict[MediaInfo.TITLE.value] } }] }, "封面": { "files": [{ "type": "external", "name": data_dict[MediaInfo.IMG.value][-13:], "external": { "url": data_dict[MediaInfo.IMG.value] } }] }, "作者": { "rich_text": [{ "type": "text", "text": { "content": data_dict[MediaInfo.AUTHOR.value] } }] }, "出版年份": { "select": { "name": data_dict[MediaInfo.PUB_DATE.value][0:4] } }, "标记状态": { "select": { "name": f"{status}" } }, "豆瓣链接": { "url": f"{data_dict[MediaInfo.URL.value]}" } } } # ISBN if data_dict[MediaInfo.ISBN.value]: tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.URL.value, property_params=data_dict[MediaInfo.ISBN.value]) body["properties"]["ISBN"] = tmp_dict # 价格 if data_dict[MediaInfo.PRICE.value]: tmp_float = float(data_dict[MediaInfo.PRICE.value]) tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value, property_params=tmp_float) body["properties"]["价格"] = tmp_dict # 评分 if data_dict[MediaInfo.RATING_F.value]: rating_f = float(data_dict[MediaInfo.RATING_F.value]) tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value, property_params=rating_f) body["properties"]["评分"] = tmp_dict # 评分人数 if data_dict[MediaInfo.ASSESS.value]: tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value, property_params=data_dict[MediaInfo.ASSESS.value]) body["properties"]["评分人数"] = tmp_dict # 页数 if data_dict[MediaInfo.PAGES.value]: pages_num = int(data_dict[MediaInfo.PAGES.value]) tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value, property_params=pages_num) body["properties"]["页数"] = tmp_dict # 出版社 if data_dict[MediaInfo.PUBLISHER.value]: tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.SELECT.value, property_params=data_dict[MediaInfo.PUBLISHER.value]) body["properties"]["出版社"] = tmp_dict return body
4d09b4000c47dc1dc56aa73ca7b176d40f360e97
3,652,676
def _find_full_periods(events, quantity, capacity): """Find the full periods.""" full_periods = [] used = 0 full_start = None for event_date in sorted(events): used += events[event_date]['quantity'] if not full_start and used + quantity > capacity: full_start = event_date elif full_start and used + quantity <= capacity: full_periods.append((full_start, event_date)) full_start = None return full_periods
16c36ce8cc5a91031117534e66c67605e13e3bd4
3,652,677
import random def crop_image(img, target_size, center): """ crop_image """ height, width = img.shape[:2] size = target_size if center == True: w_start = (width - size) / 2 h_start = (height - size) / 2 else: w_start = random.randint(0, width - size) h_start = random.randint(0, height - size) w_end = w_start + size h_end = h_start + size img = img[h_start:h_end, w_start:w_end, :] return img
c61d4410155501e3869f2e243af22d7fc13c10ee
3,652,678
def _histogram( data=None, bins="freedman-diaconis", p=None, density=False, kind="step", line_kwargs={}, patch_kwargs={}, **kwargs, ): """ Make a plot of a histogram of a data set. Parameters ---------- data : array_like 1D array of data to make a histogram out of bins : int, array_like, or str, default 'freedman-diaconis' If int or array_like, setting for `bins` kwarg to be passed to `np.histogram()`. If 'exact', then each unique value in the data gets its own bin. If 'integer', then integer data is assumed and each integer gets its own bin. If 'sqrt', uses the square root rule to determine number of bins. If `freedman-diaconis`, uses the Freedman-Diaconis rule for number of bins. p : bokeh.plotting.Figure instance, or None (default) If None, create a new figure. Otherwise, populate the existing figure `p`. density : bool, default False If True, normalized the histogram. Otherwise, base the histogram on counts. kind : str, default 'step' The kind of histogram to display. Allowed values are 'step' and 'step_filled'. line_kwargs : dict Any kwargs to be passed to p.line() in making the line of the histogram. patch_kwargs : dict Any kwargs to be passed to p.patch() in making the fill of the histogram. kwargs : dict All other kwargs are passed to bokeh.plotting.figure() Returns ------- output : Bokeh figure Figure populated with histogram. """ if data is None: raise RuntimeError("Input `data` must be specified.") # Instantiate Bokeh plot if not already passed in if p is None: y_axis_label = kwargs.pop("y_axis_label", "density" if density else "count") if "plot_height" not in kwargs and "frame_height" not in kwargs: kwargs["frame_height"] = 275 if "plot_width" not in kwargs and "frame_width" not in kwargs: kwargs["frame_width"] = 400 y_range = kwargs.pop("y_range", bokeh.models.DataRange1d(start=0)) p = bokeh.plotting.figure(y_axis_label=y_axis_label, y_range=y_range, **kwargs) # Compute histogram bins = _bins_to_np(data, bins) e0, f0 = _compute_histogram(data, bins, density) if kind == "step": p.line(e0, f0, **line_kwargs) if kind == "step_filled": x2 = [e0.min(), e0.max()] y2 = [0, 0] p = fill_between(e0, f0, x2, y2, show_line=True, p=p, patch_kwargs=patch_kwargs) return p
cc766f3367c0c7b5c3c1f56c120f8e682c14a8fb
3,652,679
def getGarbageBlock(): """获取正在标记的众生区块 { ?block_id= } 返回 json { "is_success":bool, "data": {"id": , "election_period": "beings_block_id": "votes": "vote_list": "status": "create_time": """ try: beings_block_id = request.args.get("block_id") res = blockOfGarbage.getGarbageBlockQueue(beings_block_id) if res is None: http_message = HttpMessage(is_success=False, data=res) else: http_message = HttpMessage(is_success=True, data=res) return http_message.getJson() except Exception as err: print(err) http_message = HttpMessage(is_success=False, data="参数错误") return http_message.getJson()
9faff268d1f492adde467a100d93e99d0b2cc583
3,652,680
from typing import Dict import itertools def cluster_confusion_matrix(pred_cluster:Dict, target_cluster:Dict) -> EvalUnit: """ simulate confusion matrix Args: pred_cluster: Dict element: cluster_id (cluster_id from 0 to max_size)| predicted clusters target_cluster: Dict element:cluster_id (cluster_id from 0 to max_size) | target clusters Returns: In order to return detailed data, It will return a EvalUnit, """ pred_elements = list(pred_cluster.keys()) target_elements = list(target_cluster.keys()) it = itertools.product(pred_elements,target_elements) tp,fp,tn,fn = 0,0,0,0 for x,y in it: if x != y:#other word x_cluster = pred_elements[x] x_cluster_ = target_elements[x] y_cluster = pred_elements[y] y_cluster_ = target_elements[y] if x_cluster == y_cluster and x_cluster_ == y_cluster_: tp += 1 elif x_cluster != y_cluster and x_cluster_ != y_cluster_: tn += 1 elif x_cluster == y_cluster and x_cluster_ != y_cluster_: fp += 1 else: fn +=1 return EvalUnit(tp,tn,fp,fn,'rand_index')
b3a5afb5c01cf5cb07c43e3666111d06e9229259
3,652,682
def change_short(x, y, limit): """Return abs(y - x) as a fraction of x, but with a limit. >>> x, y = 2, 5 >>> abs(y - x) / x 1.5 >>> change_short(x, y, 100) 1.5 >>> change_short(x, y, 1) # 1 is smaller than 1.5 1 >>> x = 0 >>> change_short(x, y, 100) # No error, even though abs(y - x) / x divides by 0! 100 """ return limited(x, limit if (x == 0) else abs(y - x) / x, limit)
1d4965650f12c95ba54f1ce38fc63e1e6eb39573
3,652,683
import copy import tqdm def generate_correlation_map(f, x_data, y_data, method='chisquare_spectroscopic', filter=None, resolution_diag=20, resolution_map=15, fit_args=tuple(), fit_kws={}, distance=5, npar=1): """Generates a correlation map for either the chisquare or the MLE method. On the diagonal, the chisquare or loglikelihood is drawn as a function of one fixed parameter. Refitting to the data each time gives the points on the line. A dashed line is drawn on these plots, with the intersection with the plots giving the correct confidence interval for the parameter. In solid lines, the interval estimated by the fitting routine is drawn. On the offdiagonal, two parameters are fixed and the model is again fitted to the data. The change in chisquare/loglikelihood is mapped to 1, 2 and 3 sigma contourmaps. Parameters ---------- f: :class:`.BaseModel` Instance of the model for which the contour map has to be generated. x_data: array_like or list of array_likes Data on the x-axis for the fit. Must be appropriate input for *f*. y_data: array_like or list of array_likes Data on the y-axis for the fit. Must be appropriate input for *f*. Other parameters ---------------- method: {'chisquare', 'chisquare_spectroscopic', mle'} Chooses between generating the map for the chisquare routine or for the likelihood routine. filter: list of strings Only the parameters matching the names given in this list will be used to generate the maps. resolution_diag: int Number of points for the line plot on each diagonal. resolution_map: int Number of points along each dimension for the meshgrids. fit_kws: dictionary Dictionary of keywords to pass on to the fitting routine. npar: int Number of parameters for which simultaneous predictions need to be made. Influences the uncertainty estimates from the parabola.""" # Save the original goodness-of-fit and parameters for later use mapping = {'chisquare_spectroscopic': (fitting.chisquare_spectroscopic_fit, 'chisqr_chi'), 'chisquare': (fitting.chisquare_fit, 'chisqr_chi'), 'mle': (fitting.likelihood_fit, 'likelihood_mle')} func, attr = mapping.pop(method.lower(), (fitting.chisquare_spectroscopic_fit, 'chisqr_chi')) title = '{}\n${}_{{-{}}}^{{+{}}}$' title_e = '{}\n$({}_{{-{}}}^{{+{}}})e{}$' fit_kws['verbose'] = False fit_kws['hessian'] = False to_save = {'mle': ('fit_mle', 'result_mle')} to_save = to_save.pop(method.lower(), ('chisq_res_par', 'ndof_chi', 'redchi_chi')) saved = [copy.deepcopy(getattr(f, attr)) for attr in to_save] # func(f, x_data, y_data, *fit_args, **fit_kws) orig_value = getattr(f, attr) orig_params = copy.deepcopy(f.params) state = fitting._get_state(f, method=method.lower()) ranges = {} chifunc = lambda x: chi2.cdf(x, npar) - 0.682689492 # Calculate 1 sigma boundary boundary = optimize.root(chifunc, npar).x[0] * 0.5 if method.lower() == 'mle' else optimize.root(chifunc, npar).x[0] # Select all variable parameters, generate the figure param_names = [] no_params = 0 for p in orig_params: if orig_params[p].vary and (filter is None or any([f in p for f in filter])): no_params += 1 param_names.append(p) fig, axes, cbar = _make_axes_grid(no_params, axis_padding=0, cbar=no_params > 1) # Make the plots on the diagonal: plot the chisquare/likelihood # for the best fitting values while setting one parameter to # a fixed value. saved_params = copy.deepcopy(f.params) function_kws = {'method': method.lower(), 'func_args': fit_args, 'func_kwargs': fit_kws} function_kws['orig_stat'] = orig_value for i in range(no_params): params = copy.deepcopy(saved_params) ranges[param_names[i]] = {} # Set the y-ticklabels. ax = axes[i, i] ax.set_title(param_names[i]) if i == no_params-1: if method.lower().startswith('chisquare'): ax.set_ylabel(r'$\Delta\chi^2$') else: ax.set_ylabel(r'$\Delta\mathcal{L}$') # Select starting point to determine error widths. value = orig_params[param_names[i]].value stderr = orig_params[param_names[i]].stderr print(stderr) stderr = stderr if stderr is not None else 0.01 * np.abs(value) stderr = stderr if stderr != 0 else 0.01 * np.abs(value) result_left, success_left = fitting._find_boundary(-stderr, param_names[i], boundary, f, x_data, y_data, function_kwargs=function_kws) result_right, success_right = fitting._find_boundary(stderr, param_names[i], boundary, f, x_data, y_data, function_kwargs=function_kws) success = success_left * success_right ranges[param_names[i]]['left'] = result_left ranges[param_names[i]]['right'] = result_right if not success: print("Warning: boundary calculation did not fully succeed for " + param_names[i]) right = np.abs(ranges[param_names[i]]['right'] - value) left = np.abs(ranges[param_names[i]]['left'] - value) params[param_names[i]].vary = False left_val, right_val = max(value - distance * left, orig_params[param_names[i]].min), min(value + distance * right, orig_params[param_names[i]].max) ranges[param_names[i]]['right_val'] = right_val ranges[param_names[i]]['left_val'] = left_val value_range = np.linspace(left_val, right_val, resolution_diag) value_range = np.sort(np.append(value_range, np.array([value - left, value + right, value]))) chisquare = np.zeros(len(value_range)) # Calculate the new value, and store it in the array. Update the progressbar. with tqdm.tqdm(value_range, desc=param_names[i], leave=True) as pbar: for j, v in enumerate(value_range): chisquare[j] = fitting.calculate_updated_statistic(v, param_names[i], f, x_data, y_data, **function_kws) fitting._set_state(f, state, method=method.lower()) pbar.update(1) # Plot the result ax.plot(value_range, chisquare, color='k') c = '#0093e6' # Indicate the used interval. ax.axvline(value + right, ls="dashed", color=c) ax.axvline(value - left, ls="dashed", color=c) ax.axvline(value, ls="dashed", color=c) ax.axhline(boundary, color=c) up = '{:.2ug}'.format(u.ufloat(value, right)) down = '{:.2ug}'.format(u.ufloat(value, left)) val = up.split('+')[0].split('(')[-1] r = up.split('-')[1].split(')')[0] l = down.split('-')[1].split(')')[0] if 'e' in up or 'e' in down: ex = up.split('e')[-1] ax.set_title(title_e.format(param_names[i], val, l, r, ex)) else: ax.set_title(title.format(param_names[i], val, l, r)) # Restore the parameters. fitting._set_state(f, state, method=method.lower()) for i, j in zip(*np.tril_indices_from(axes, -1)): params = copy.deepcopy(orig_params) ax = axes[i, j] x_name = param_names[j] y_name = param_names[i] if j == 0: ax.set_ylabel(y_name) if i == no_params - 1: ax.set_xlabel(x_name) right = ranges[x_name]['right_val'] left = ranges[x_name]['left_val'] x_range = np.append(np.linspace(left, right, resolution_map), orig_params[x_name].value) x_range = np.sort(x_range) right = ranges[y_name]['right_val'] left = ranges[y_name]['left_val'] y_range = np.append(np.linspace(left, right, resolution_map), orig_params[y_name].value) y_range = np.sort(y_range) X, Y = np.meshgrid(x_range, y_range) Z = np.zeros(X.shape) i_indices, j_indices = np.indices(Z.shape) with tqdm.tqdm(i_indices.flatten(), desc=param_names[j]+' ' + param_names[i], leave=True) as pbar: for k, l in zip(i_indices.flatten(), j_indices.flatten()): x = X[k, l] y = Y[k, l] print(x, y, f.params['Background0'].value) Z[k, l] = fitting.calculate_updated_statistic([x, y], [x_name, y_name], f, x_data, y_data, **function_kws) fitting._set_state(f, state, method=method.lower()) pbar.update(1) Z = -Z npar = 1 bounds = [] for bound in [0.997300204, 0.954499736, 0.682689492]: chifunc = lambda x: chi2.cdf(x, npar) - bound # Calculate 1 sigma boundary bounds.append(-optimize.root(chifunc, npar).x[0]) # bounds = sorted([-number*number for number in np.arange(1, 9, .1)]) bounds.append(1) if method.lower() == 'mle': bounds = [b * 0.5 for b in bounds] norm = mpl.colors.BoundaryNorm(bounds, invcmap.N) contourset = ax.contourf(X, Y, Z, bounds, cmap=invcmap, norm=norm) f.params = copy.deepcopy(orig_params) if method.lower() == 'mle': f.fit_mle = copy.deepcopy(orig_params) else: f.chisq_res_par try: cbar = plt.colorbar(contourset, cax=cbar, orientation='vertical') cbar.ax.yaxis.set_ticks([0, 1/6, 0.5, 5/6]) cbar.ax.set_yticklabels(['', r'3$\sigma$', r'2$\sigma$', r'1$\sigma$']) except: pass setattr(f, attr, orig_value) for attr, value in zip(to_save, saved): setattr(f, attr, copy.deepcopy(value)) for a in axes.flatten(): if a is not None: for label in a.get_xticklabels()[::2]: label.set_visible(False) for label in a.get_yticklabels()[::2]: label.set_visible(False) return fig, axes, cbar
1294f1a93a98602ee50e5e52aacea6e678625520
3,652,684
import socket def local_ip(): """find out local IP, when running spark driver locally for remote cluster""" ip = ((([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect( ("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0]) return ip
d45978f3f433adba5cb1181d71cb367ceabd880f
3,652,685
def string_to_int_replacements(setting, item, for_property): """Maps keys to values from setting and item for replacing string templates for settings which need to be converted to/from Strings. """ replacements = common_replacements(setting, item) replacements.update({ '$string_to_int': string_to_int(item, property=for_property), '$int_to_string': int_to_string(item)}) return replacements
c6ae6a55fdda2fe13bd3ac7001da528d704e3df7
3,652,686
import struct def UnpackU32(buf, offset=0, endian='big'): """ Unpack a 32-bit unsigned integer into 4 bytes. Parameters: buf - Input packed buffer. offset - Offset in buffer. endian - Byte order. Return: 2-tuple of unpacked value, new buffer offset. """ try: return (struct.unpack_from(_EndianCode[endian]+'I', buf, offset)[0], offset+4) except (KeyError, TypeError, DeprecationWarning, struct.error) as inst: _UnpackException('u32', offset, endian, inst)
061ba5e8c4db891100549d0181475b8915d9fb0a
3,652,689
def get_reset_token(user, expires_sec=1800): """ Create a specify token for reset user password Args: user: expires_sec: Returns: token: string """ hash_token_password = Serializer(APP.config['SECRET_KEY'], expires_sec) return hash_token_password.dumps({'user_name': user.user_name}).decode('utf-8')
6477f03ca25a206db18c4a0a59ae0fac1106e262
3,652,690
from typing import Union def _on_error_resume_next(*sources: Union[Observable, Future]) -> Observable: """Continues an observable sequence that is terminated normally or by an exception with the next observable sequence. Examples: >>> res = rx.on_error_resume_next(xs, ys, zs) Returns: An observable sequence that concatenates the source sequences, even if a sequence terminates exceptionally. """ sources_ = iter(sources) def subscribe(observer, scheduler=None): scheduler = scheduler or current_thread_scheduler subscription = SerialDisposable() cancelable = SerialDisposable() def action(scheduler, state=None): try: source = next(sources_) except StopIteration: observer.on_completed() return # Allow source to be a factory method taking an error source = source(state) if callable(source) else source current = rx.from_future(source) if is_future(source) else source d = SingleAssignmentDisposable() subscription.disposable = d def on_resume(state=None): scheduler.schedule(action, state) d.disposable = current.subscribe_(observer.on_next, on_resume, on_resume, scheduler) cancelable.disposable = scheduler.schedule(action) return CompositeDisposable(subscription, cancelable) return Observable(subscribe)
ed604de1b566bc73b394143bb0b8bc487646ac1a
3,652,691
def prompt_for_value(field_name: str, field_type): """Promt the user to input a value for the parameter `field_name`.""" print_formatted_text( f"No value found for field '{field_name}' of type '{field_type}'. " "Please enter a value for this parameter:" ) response = prompt("> ") while response == "": print_formatted_text(f"No input received, please enter a value:") response = prompt("> ") return parse_value_from_string(response)
3483b718f09d5a99a37a9d6086e462acf546cbf3
3,652,692
def create_dist_list(dist: str, param1: str, param2: str) -> list: """ Creates a list with a special syntax describing a distribution Syntax: [identifier, param1, param2 (if necessary)] """ dist_list: list = [] if dist == 'fix': dist_list = ["f", float(param1)] elif dist == 'binary': dist_list = ["b", float(param1)] elif dist == 'binomial': dist_list = ["i", float(param1), float(param2)] elif dist == 'normal': dist_list = ["n", float(param1), float(param2)] elif dist == 'uniform': dist_list = ["u", float(param1), float(param2)] elif dist == 'poisson': dist_list = ["p", float(param1)] elif dist == 'exponential': dist_list = ["e", float(param1)] elif dist == 'lognormal': dist_list = ["l", float(param1), float(param2)] elif dist == 'chisquare': dist_list = ["c", float(param1)] elif dist == 'standard-t': dist_list = ["t", float(param1)] return dist_list
19cb93867639bcb4ae8152e4a28bb5d068a9c756
3,652,694
def compute_arfgm(t, qd, p, qw=0.0): """computes relative humidity from temperature, pressure and specific humidty (qd) This might be similar to https://unidata.github.io/MetPy/latest/api/generated/metpy.calc.relative_humidity_from_specific_humidity.html algorithm from RemapToRemo addgm **Arguments:** *p:* atmospheric pressure ([Pa], 3d) *t:* temperature fields ([K], 3d) *qd:* specific humidity fields ([kg/kg], 3d) *qw:* liquid water content ([kg/kg], 3d) **Returns:** *relhum:* relative humidity ([%],3d) """ # return fgqd(fgee(t),p) #gqd = np.where(t >= C.B3, fgqd(fgew(t), p), fgqd(fgee(t), p)) fge = np.where(t >= C.B3, fgew(t), fgee(t)) gqd = fgqd(fge, p) relhum = qd / gqd return np.where(relhum > 1.0, (gqd + qw) / gqd, (qd + qw) / gqd)
3e74be0b2099774482c32e2a3fbc4e2ee3f339fe
3,652,695
def load_data(): """ loads the data for this task :return: """ fpath = 'images/ball.png' radius = 70 Im = cv2.imread(fpath, 0).astype('float32')/255 # 0 .. 1 # we resize the image to speed-up the level set method Im = cv2.resize(Im, dsize=(0, 0), fx=0.5, fy=0.5) height, width = Im.shape centre = (width // 2, height // 2) Y, X = np.ogrid[:height, :width] phi = radius - np.sqrt((X - centre[0]) ** 2 + (Y - centre[1]) ** 2) return Im, phi
3caaa20ecb43853910f1d42667bd481bbe62e17d
3,652,697
def create_response_body(input_json): """Create a json response with specific args of input JSON.""" city_name = str(input_json['name']) country_code = str(input_json['sys']['country']) temp_celsius = get_val_unit(input_json, 'main', 'temp', ' °C') wind_speed = get_val_unit(input_json, 'wind', 'speed', ' m/s') wind_deg = get_val_unit(input_json, 'wind', 'deg', ' deg') cloudines = get_cloudines(input_json) pressure = get_val_unit(input_json, 'main', 'pressure', ' hPa') humidity_percent = get_val_unit(input_json, 'main', 'humidity', '%') coord_lon = str(input_json['coord']['lon']) coord_lat = str(input_json['coord']['lat']) sunrise_hour = get_hour_time(input_json, 'sys', 'sunrise') sunset_hour = get_hour_time(input_json, 'sys', 'sunset') requested_time = get_datetime_from_unix(0, input_json, 'dt') output_json = { "location_name": f"{city_name}, {country_code}", "temperature": temp_celsius, "wind": f"{wind_speed}, {wind_deg}", "cloudines": cloudines, "pressure": pressure, "humidity": humidity_percent, "sunrise": sunrise_hour, "sunset": sunset_hour, "geo_coordinates": [coord_lat, coord_lon], "requested_time": requested_time } return output_json
4696f6d6929eea941697bf7aab49d139e4bd6229
3,652,698
def _get_count(_khoros_object, _user_id, _object_type): """This function returns the count of a specific user object (e.g. ``albums``, ``followers``, etc.) for a user. :param _khoros_object: The core :py:class:`khoros.Khoros` object :type _khoros_object: class[khoros.Khoros] :param _user_id: The User ID associated with the user :type _user_id: int, str :param _object_type: The type of object for which to get the count (e.g. ``albums``, ``followers``, etc.) :returns: The user object count as an integer :raises: :py:exc:`khoros.errors.exceptions.GETRequestError` """ _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id) return int(_api_response['data']['items'][0][_object_type]['count'])
5e0cb02a74a819984ab271fcaad469a60f4bdf43
3,652,701
def antiSymmetrizeSignal(y, symmetryStep): """ Dischard symmetric part of a signal by taking the difference of the signal at x[n] and x[n + symmetry_step] get your corresponding x data as x[0:len(y)/2] Parameters ---------- y : array_like numpy array or list of data values to anti symmtetrize symmetryStep : scalar expected symmetry of the signal at x[n] occurs at x[n+symmetryStep] Returns ---------- y_symmetrized : ndarray numpy array of dimension size(y)/2 of the antisymmetrized data """ y = np.array(y) s = np.zeros(len(y)/2) for idx in range(0, len(s)): # (positive field - negative field)/2 s[idx] = (y[idx] - y[idx+symmetryStep])/2.-(y[0] - y[0+symmetryStep])/2. return s
0936d5fc3883d3ce6ee2f6c77fb9b4bc59177426
3,652,702
def lms_to_rgb(img): """ rgb_matrix = np.array( [[0.0809444479, -0.130504409, 0.116721066], [0.113614708, -0.0102485335, 0.0540193266], [-0.000365296938, -0.00412161469, 0.693511405] ] ) """ rgb_matrix = np.array( [[ 2.85831110e+00, -1.62870796e+00, -2.48186967e-02], [-2.10434776e-01, 1.15841493e+00, 3.20463334e-04], [-4.18895045e-02, -1.18154333e-01, 1.06888657e+00]] ) return np.tensordot(img, rgb_matrix, axes=([2], [1]))
76ce7a5f73712a6d9f241d66b3af8a54752b141d
3,652,703
import time def timeit(func): """ Decorator that returns the total runtime of a function @param func: function to be timed @return: (func, time_taken). Time is in seconds """ def wrapper(*args, **kwargs) -> float: start = time.time() func(*args, **kwargs) total_time = time.time() - start return total_time return wrapper
68723a74c96c2d004eed9533f9023d77833c509b
3,652,704
def merge_count(data1, data2): """Auxiliary method to merge the lengths.""" return data1 + data2
8c0280b043b7d21a411ac14d3571acc50327fdbc
3,652,705
def orthogonal_procrustes(fixed, moving): """ Implements point based registration via the Orthogonal Procrustes method. Based on Arun's method: Least-Squares Fitting of two, 3-D Point Sets, Arun, 1987, `10.1109/TPAMI.1987.4767965 <http://dx.doi.org/10.1109/TPAMI.1987.4767965>`_. Also see `this <http://eecs.vanderbilt.edu/people/mikefitzpatrick/papers/2009_Medim_Fitzpatrick_TRE_FRE_uncorrelated_as_published.pdf>`_ and `this <http://tango.andrew.cmu.edu/~gustavor/42431-intro-bioimaging/readings/ch8.pdf>`_. :param fixed: point set, N x 3 ndarray :param moving: point set, N x 3 ndarray of corresponding points :returns: 3x3 rotation ndarray, 3x1 translation ndarray, FRE :raises: ValueError """ validate_procrustes_inputs(fixed, moving) # This is what we are calculating R = np.eye(3) T = np.zeros((3, 1)) # Arun equation 4 p = np.ndarray.mean(moving, 0) # Arun equation 6 p_prime = np.ndarray.mean(fixed, 0) # Arun equation 7 q = moving - p # Arun equation 8 q_prime = fixed - p_prime # Arun equation 11 H = np.matmul(q.transpose(), q_prime) # Arun equation 12 # Note: numpy factors h = u * np.diag(s) * v svd = np.linalg.svd(H) # Replace Arun Equation 13 with Fitzpatrick, chapter 8, page 470, # to avoid reflections, see issue #19 X = _fitzpatricks_X(svd) # Arun step 5, after equation 13. det_X = np.linalg.det(X) if det_X < 0 and np.all(np.flip(np.isclose(svd[1], np.zeros((3, 1))))): # Don't yet know how to generate test data. # If you hit this line, please report it, and save your data. raise ValueError("Registration fails as determinant < 0" " and no singular values are close enough to zero") if det_X < 0 and np.any(np.isclose(svd[1], np.zeros((3, 1)))): # Implement 2a in section VI in Arun paper. v_prime = svd[2].transpose() v_prime[0][2] *= -1 v_prime[1][2] *= -1 v_prime[2][2] *= -1 X = np.matmul(v_prime, svd[0].transpose()) # Compute output R = X tmp = p_prime.transpose() - np.matmul(R, p.transpose()) T[0][0] = tmp[0] T[1][0] = tmp[1] T[2][0] = tmp[2] fre = compute_fre(fixed, moving, R, T) return R, T, fre
5818c67e478ad9dd59ae5a1ba0c847d60234f222
3,652,706
def make_model(arch_params, patch_size): """ Returns the model. Used to select the model. """ return RDN(arch_params, patch_size)
6cf91ea68bcf58d4aa143a606bd774761f37acb0
3,652,707
def calc_error(frame, gap, method_name): """Calculate the error between the ground truth and the GAP prediction""" frame.single_point(method_name=method_name, n_cores=1) pred = frame.copy() pred.run_gap(gap=gap, n_cores=1) error = np.abs(pred.energy - frame.energy) logger.info(f'|E_GAP - E_0| = {np.round(error, 3)} eV') return error
9a3eb0b115c394703cb7446852982fa1468607ad
3,652,708
def find_model(sender, model_name): """ Register new model to ORM """ MC = get_mc() model = MC.get((MC.c.model_name==model_name) & (MC.c.uuid!='')) if model: model_inst = model.get_instance() orm.set_model(model_name, model_inst.table_name, appname=__name__, model_path='') return orm.__models__.get(model_name)
4c78f135b502119fffb6b2ccf5f09335e739a97a
3,652,709
def list_subtitles(videos, languages, pool_class=ProviderPool, **kwargs): """List subtitles. The `videos` must pass the `languages` check of :func:`check_video`. All other parameters are passed onwards to the provided `pool_class` constructor. :param videos: videos to list subtitles for. :type videos: set of :class:`~subliminal.video.Video` :param languages: languages to search for. :type languages: set of :class:`~babelfish.language.Language` :param pool_class: class to use as provider pool. :type: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar :return: found subtitles per video. :rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle` """ listed_subtitles = defaultdict(list) # check videos checked_videos = [] for video in videos: if not check_video(video, languages=languages): logger.info('Skipping video %r', video) continue checked_videos.append(video) # return immediately if no video passed the checks if not checked_videos: return listed_subtitles # list subtitles with pool_class(**kwargs) as pool: for video in checked_videos: logger.info('Listing subtitles for %r', video) subtitles = pool.list_subtitles(video, languages - video.subtitle_languages) listed_subtitles[video].extend(subtitles) logger.info('Found %d subtitle(s)', len(subtitles)) return listed_subtitles
f5d9fa450f0df5c71c320d972e54c2502bbfd37d
3,652,710
def public_incursion_no_expires(url, request): """ Mock endpoint for incursion. Public endpoint without cache """ return httmock.response( status_code=200, content=[ { "type": "Incursion", "state": "mobilizing", "staging_solar_system_id": 30003893, "constellation_id": 20000568, "infested_solar_systems": [ 30003888, ], "has_boss": True, "faction_id": 500019, "influence": 1 } ] )
31d008b6479d8e2a5e4bc9f2d7b4af8cc4a40b03
3,652,711
def bad_topics(): """ Manage Inappropriate topics """ req = request.vars view_info = {} view_info['errors'] = [] tot_del = 0 if req.form_submitted: for item in req: if item[:9] == 'inapp_id_': inapp_id = int(req[item]) db(db.zf_topic_inappropriate.id==inapp_id).update(read_flag=True) tot_del += 1 topics = db((db.zf_topic_inappropriate.read_flag==False) & (db.zf_topic.id==db.zf_topic_inappropriate.topic_id)).select(db.zf_topic_inappropriate.ALL, db.zf_topic.title, orderby=~db.zf_topic_inappropriate.creation_date) view_info.update({'removed': tot_del}) return dict(request=request, topics=topics, view_info=view_info) else: topics = db((db.zf_topic_inappropriate.read_flag==False) & (db.zf_topic.id==db.zf_topic_inappropriate.topic_id)).select(db.zf_topic_inappropriate.ALL, db.zf_topic.title, orderby=~db.zf_topic_inappropriate.creation_date) return dict(request=request, topics=topics, view_info=view_info)
64c40b98a77c5934bd0593c9f5c4f31370980e8a
3,652,712
def get_min_id_for_repo_mirror_config(): """ Gets the minimum id for a repository mirroring. """ return RepoMirrorConfig.select(fn.Min(RepoMirrorConfig.id)).scalar()
21a99988a1805f61ede9d689494b59b61c0391d8
3,652,713
def check_series( Z, enforce_univariate=False, allow_empty=False, allow_numpy=True, enforce_index_type=None, ): """Validate input data. Parameters ---------- Z : pd.Series, pd.DataFrame Univariate or multivariate time series enforce_univariate : bool, optional (default=False) If True, multivariate Z will raise an error. allow_empty : bool enforce_index_type : type, optional (default=None) type of time index Returns ------- y : pd.Series, pd.DataFrame Validated time series Raises ------ ValueError, TypeError If Z is an invalid input """ # Check if pandas series or numpy array if not allow_numpy: valid_data_types = tuple( filter(lambda x: x is not np.ndarray, VALID_DATA_TYPES) ) else: valid_data_types = VALID_DATA_TYPES if not isinstance(Z, valid_data_types): raise TypeError( f"Data must be a one of {valid_data_types}, but found type: {type(Z)}" ) if enforce_univariate: _check_is_univariate(Z) # check time index check_time_index( Z.index, allow_empty=allow_empty, enforce_index_type=enforce_index_type ) return Z
5831c75953b8953ec54982712c1e4d3cccb22cc8
3,652,714
def B(s): """string to byte-string in Python 2 (including old versions that don't support b"") and Python 3""" if type(s)==type(u""): return s.encode('utf-8') # Python 3 return s
b741bf4a64bd866283ca789745f373db360f4016
3,652,715
def gather_tiling_strategy(data, axis): """Custom tiling strategy for gather op""" strategy = list() base = 0 for priority_value, pos in enumerate(range(len(data.shape) - 1, axis, -1)): priority_value = priority_value + base strategy.append(ct_util.create_constraint_on_tensor(tensor=data, values=priority_value, constraints=ct_util.TileConstraint.SET_PRIORITY, tensor_pos=pos)[0]) return strategy
afceb113c9b6c25f40f4f885ccaf08860427291f
3,652,716
def redscreen(main_filename, back_filename): """ Implements the notion of "redscreening". That is, the image in the main_filename has its "sufficiently" red pixels replaced with pized from the corresponding x,y location in the image in the file back_filename. Returns the resulting "redscreened" image. """ image = SimpleImage(main_filename) back = SimpleImage(back_filename) for pixel in image: average = (pixel.red + pixel.green + pixel.blue) // 3 # See if this pixel is "sufficiently" red if pixel.red >= average * INTENSITY_THRESHOLD: # If so, we get the corresponding pixel from the # back image and overwrite the pixel in # the main image with that from the back image. x = pixel.x y = pixel.y image.set_pixel(x, y, back.get_pixel(x, y)) return image
96824872ceb488497fbd56a662b8fb5098bf2341
3,652,718
def density_speed_conversion(N, frac_per_car=0.025, min_val=0.2): """ Fraction to multiply speed by if there are N nearby vehicles """ z = 1.0 - (frac_per_car * N) # z = 1.0 - 0.04 * N return max(z, min_val)
95285a11be84df5ec1b6c16c5f24b3831b1c0348
3,652,719
def int_to_datetime(int_date, ds=None): """Convert integer date indices to datetimes.""" if ds is None: return TIME_ZERO + int_date * np.timedelta64(1, 'D') if not hasattr(ds, 'original_time'): raise ValueError('Dataset with no original_time cannot be used to ' 'convert ints to datetimes.') first_int = ds.time.values[0] delta_int = _get_delta(ds.time) first_date = ds.original_time.values[0] delta_date = _get_delta(ds.original_time) return first_date + ((int_date - first_int) / delta_int) * delta_date
2e081ff019628800fb5c44eec4fa73333d755dde
3,652,721
def show_plugin(name, path, user): """ Show a plugin in a wordpress install and check if it is installed name Wordpress plugin name path path to wordpress install location user user to run the command as CLI Example: .. code-block:: bash salt '*' wordpress.show_plugin HyperDB /var/www/html apache """ ret = {"name": name} resp = __salt__["cmd.shell"]( ("wp --path={0} plugin status {1}").format(path, name), runas=user ).split("\n") for line in resp: if "Status" in line: ret["status"] = line.split(" ")[-1].lower() elif "Version" in line: ret["version"] = line.split(" ")[-1].lower() return ret
fded4735eda73dc19dd51dc13a1141345505b3b9
3,652,722
def get_receiver_type(rinex_fname): """ Return the receiver type (header line REC # / TYPE / VERS) found in *rinex_fname*. """ with open(rinex_fname) as fid: for line in fid: if line.rstrip().endswith('END OF HEADER'): break elif line.rstrip().endswith('REC # / TYPE / VERS'): return line[20:40].strip() raise ValueError('receiver type not found in header of RINEX file ' '{}'.format(rinex_fname))
7391f7a100455b8ff5ab01790f62518a3c4a079b
3,652,723
def is_cyclone_phrase(phrase): """Returns whether all the space-delimited words in phrases are cyclone words A phrase is a cyclone phrase if and only if all of its component words are cyclone words, so we first split the phrase into words using .split(), and then check if all of the words are cyclone words. """ return all([is_cyclone_word(word) for word in phrase.split()])
8014490ea2391b1acec1ba641ba89277065f2dd9
3,652,724
def wl_to_wavenumber(wl, angular=False): """Given wavelength in meters, convert to wavenumber in 1/cm. The wave number represents the number of wavelengths in one cm. If angular is true, will calculate angular wavenumber. """ if angular: wnum = (2*np.pi)/(wl*100) else: wnum = 1/(wl*100) return wnum
ca34e3abc5f9ed0d555c836819c9f3c8d3ab9e4b
3,652,725
def easeOutCubic(n): """A cubic tween function that begins fast and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """ _checkRange(n) n = n - 1 return n**3 + 1
20aea25b2ee937618df2b674178f2a767c373da7
3,652,726
import re def _fix_entries(entries): """recursive function to collapse entries into correct format""" cur_chapter_re, chapter_entry = None, None new_entries = [] for entry in entries: title, doxy_path, subentries = entry if subentries is not None: new_subentries = _fix_entries(subentries) new_entries.append([title, doxy_path, new_subentries]) elif cur_chapter_re and cur_chapter_re.match(title): chapter_entry[2].append(entry) else: new_entries.append(entry) chapter_match = CHAPTER_RE.match(title) if chapter_match: cur_chapter_re = re.compile( chapter_match.group('num') + r'\.\d+:') chapter_entry = entry chapter_entry[-1] = [] else: cur_chapter_re, chapter_entry = None, None return new_entries
1f8ac466533c17c1ad4e7cf2d27f2e7ff098ae79
3,652,728
def _check_bulk_delete(attempted_pairs, result): """ Checks if the RCv3 bulk delete command was successful. """ response, body = result if response.code == 204: # All done! return body errors = [] non_members = pset() for error in body["errors"]: match = _SERVER_NOT_A_MEMBER_PATTERN.match(error) if match is not None: pair = match.groupdict() non_members = non_members.add( (normalize_lb_id(pair["lb_id"]), pair["server_id"])) continue match = _LB_INACTIVE_PATTERN.match(error) if match is not None: errors.append(LBInactive(match.group("lb_id"))) continue match = _LB_DOESNT_EXIST_PATTERN.match(error) if match is not None: del_lb_id = normalize_lb_id(match.group("lb_id")) # consider all pairs with this LB to be removed removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs if lb_id == del_lb_id] non_members |= pset(removed) continue match = _SERVER_DOES_NOT_EXIST.match(error) if match is not None: del_server_id = match.group("server_id") # consider all pairs with this server to be removed removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs if node_id == del_server_id] non_members |= pset(removed) else: raise UnknownBulkResponse(body) if errors: raise BulkErrors(errors) elif non_members: to_retry = pset(attempted_pairs) - non_members return bulk_delete(to_retry) if to_retry else None else: raise UnknownBulkResponse(body)
2bf99d74a23d3522a2a53a711fbb2c8d43748eb1
3,652,729
def get_frontend_ui_base_url(config: "CFG") -> str: """ Return ui base url """ return as_url_folder(urljoin(get_root_frontend_url(config), FRONTEND_UI_SUBPATH))
4c34a1830431e28ec084853be6d93f1e487865a9
3,652,730
def read_image(name): """ Reads image into a training example. Might be good to threshold it. """ im = Image.open(name) pix = im.load() example = [] for x in range(16): for y in range(16): example.append(pix[x, y]) return example
be510bfee0a24e331d1b9bfb197b02edaafd0d70
3,652,731
def l3tc_underlay_lag_config_unconfig(config, dut1, dut2, po_name, members_dut1, members_dut2): """ :param config: :param dut1: :param dut2: :param po_name: :param members_dut1: :param members_dut2: :return: """ st.banner("{}Configuring LAG between Spine and Leaf node.".format('Un' if config != 'yes' else '')) result = True if config == 'yes': # configure po and add members [out, exceptions] = \ utils.exec_all(fast_start, [[poapi.config_portchannel, dut1, dut2, po_name, members_dut1, members_dut2, "add"]]) st.log([out, exceptions]) else: # del po and delete members [out, exceptions] = \ utils.exec_all(fast_start, [[poapi.config_portchannel, dut1, dut2, po_name, members_dut1, members_dut2, "del"]]) st.log([out, exceptions]) return result
de4c8775b178380e5d9c90ee3c74082d6553d97f
3,652,732
def _xrdcp_copyjob(wb, copy_job: CopyFile, xrd_cp_args: XrdCpArgs, printout: str = '') -> int: """xrdcp based task that process a copyfile and it's arguments""" if not copy_job: return overwrite = xrd_cp_args.overwrite batch = xrd_cp_args.batch sources = xrd_cp_args.sources chunks = xrd_cp_args.chunks chunksize = xrd_cp_args.chunksize makedir = xrd_cp_args.makedir tpc = xrd_cp_args.tpc posc = xrd_cp_args.posc # hashtype = xrd_cp_args.hashtype streams = xrd_cp_args.streams cksum = xrd_cp_args.cksum timeout = xrd_cp_args.timeout rate = xrd_cp_args.rate cmdline = f'{copy_job.src} {copy_job.dst}' return retf_print(_xrdcp_sysproc(cmdline, timeout))
ce4475329a6f75d1819874492f26ceef7113a0f2
3,652,733
import logging def merge_preclusters_ld(preclusters): """ Bundle together preclusters that share one LD snp * [ Cluster ] Returntype: [ Cluster ] """ clusters = list(preclusters) for cluster in clusters: chrom = cluster.gwas_snps[0].snp.chrom start = min(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps) end = max(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps) # A dictionary that maps from snp to merged clusters snp_owner = dict() for cluster in preclusters: for ld_snp in cluster.ld_snps: # If this SNP has been seen in a different cluster if ld_snp in snp_owner and snp_owner[ld_snp] is not cluster: # Set other_cluster to that different cluster other_cluster = snp_owner[ld_snp] merged_cluster = merge_clusters(cluster, other_cluster) # Remove the two previous clusters and replace them with # the merged cluster clusters.remove(cluster) clusters.remove(other_cluster) clusters.append(merged_cluster) # Set the new cluster as the owner of these SNPs. for snp in merged_cluster.ld_snps: snp_owner[snp] = merged_cluster for snp in cluster.ld_snps: snp_owner[snp] = merged_cluster # Skip the rest of this cluster. break else: snp_owner[ld_snp] = cluster for cluster in clusters: chrom = cluster.gwas_snps[0].snp.chrom start = min(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps) end = max(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps) logging.info("\tFound %i clusters from the GWAS peaks" % (len(clusters))) return clusters
fd5409c1fc8463a2c795b2cc2685c1cf1a77f4ad
3,652,734
def cross_recurrence_matrix( xps, yps ): """Cross reccurence matrix. Args: xps (numpy.array): yps (numpy.array): Returns: numpy.array : A 2D numpy array. """ return recurrence_matrix( xps, yps )
017fa50fdd3c68e4bf1703635365d84c3508d0b3
3,652,735
import numpy def define_panels(x, y, N=40): """ Discretizes the geometry into panels using 'cosine' method. Parameters ---------- x: 1D array of floats x-coordinate of the points defining the geometry. y: 1D array of floats y-coordinate of the points defining the geometry. N: integer, optional Number of panels; default: 40. Returns ------- panels: 1D Numpy array of Panel objects. The list of panels. """ R = (x.max()-x.min())/2.0 # circle radius x_center = (x.max()+x.min())/2.0 # x-coordinate of circle center theta = numpy.linspace(0.0, 2.0*numpy.pi, N+1) # array of angles x_circle = x_center + R*numpy.cos(theta) # x-coordinates of circle x_ends = numpy.copy(x_circle) # x-coordinate of panels end-points y_ends = numpy.empty_like(x_ends) # y-coordinate of panels end-points # extend coordinates to consider closed surface x, y = numpy.append(x, x[0]), numpy.append(y, y[0]) # compute y-coordinate of end-points by projection I = 0 for i in range(N): while I < len(x)-1: if (x[I] <= x_ends[i] <= x[I+1]) or (x[I+1] <= x_ends[i] <= x[I]): break else: I += 1 a = (y[I+1]-y[I])/(x[I+1]-x[I]) b = y[I+1] - a*x[I+1] y_ends[i] = a*x_ends[i] + b y_ends[N] = y_ends[0] # create panels panels = numpy.empty(N, dtype=object) for i in range(N): panels[i] = Panel(x_ends[i], y_ends[i], x_ends[i+1], y_ends[i+1]) return panels
e34ae13a7cdddc8be69e5cbba84b964bd11e6ec3
3,652,736
def compile_for_llvm(function_name, def_string, optimization_level=-1, globals_dict=None): """Compiles function_name, defined in def_string to be run through LLVM. Compiles and runs def_string in a temporary namespace, pulls the function named 'function_name' out of that namespace, optimizes it at level 'optimization_level', -1 for the default optimization, and marks it to be JITted and run through LLVM. """ namespace = {} if globals_dict is None: globals_dict = globals() exec def_string in globals_dict, namespace func = namespace[function_name] if optimization_level is not None: if optimization_level >= DEFAULT_OPT_LEVEL: func.__code__.co_optimization = optimization_level func.__code__.co_use_jit = True return func
61494afcde311e63138f75fb8bf59244d5c6d4e0
3,652,737
def setup_svm_classifier(training_data, y_training, testing_data, features, method="count", ngrams=(1,1)): """ Setup SVM classifier model using own implementation Parameters ---------- training_data: Pandas dataframe The dataframe containing the training data for the classifier testing_data: Pandas dataframe The dataframe containing the testing data for the classifier y_training: Pandas dataframe The dataframe containing the y training data for the classifier features: String or list of strings if using multiple features Names of columns of df that are used for trainig the classifier method: String Can be either "count" or "tfidf" for specifying method of feature weighting ngrams: tuple (min_n, max_n), with min_n, max_n integer values range for ngrams used for vectorization Returns ------- model: SVM Classifier (scratch implementation) Trained SVM Classifier from own implementation vec: sklearn CountVectorizer or TfidfVectorizer CountVectorizer or TfidfVectorizer fit and transformed for training data x_testing: Pandas dataframe The dataframe containing the test data for the SVM classifier """ # generate x and y training data if method == "count": vec, x_training, x_testing = define_features_vectorizer(features, training_data, testing_data,ngramrange=ngrams) elif method == "tfidf": vec, x_training, x_testing = define_features_tfidf(features, training_data, testing_data,ngramrange=ngrams) else: print("Method has to be either count or tfidf") return 1 # train classifier model = SVMClassifier_scratch() model.fit(x_training, y_training) return model, vec, x_testing
111937690db2c170852b57cdbfc3135c628ac26c
3,652,738
def buildHeaderString(keys): """ Use authentication keys to build a literal header string that will be passed to the API with every call. """ headers = { # Request headers 'participant-key': keys["participantKey"], 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': keys["subscriptionKey"] } return headers
4505fb679dec9727a62dd328f92f832ab45c417b
3,652,739
from typing import List from typing import cast def build_goods_query( good_ids: List[str], currency_id: str, is_searching_for_sellers: bool ) -> Query: """ Build buyer or seller search query. Specifically, build the search query - to look for sellers if the agent is a buyer, or - to look for buyers if the agent is a seller. In particular, if the agent is a buyer and the demanded good ids are {'tac_good_0', 'tac_good_2', 'tac_good_3'}, the resulting constraint expression is: tac_good_0 >= 1 OR tac_good_2 >= 1 OR tac_good_3 >= 1 That is, the OEF will return all the sellers that have at least one of the good in the query (assuming that the sellers are registered with the data model specified). :param good_ids: the list of good ids to put in the query :param currency_id: the currency used for pricing and transacting. :param is_searching_for_sellers: Boolean indicating whether the query is for sellers (supply) or buyers (demand). :return: the query """ data_model = _build_goods_datamodel( good_ids=good_ids, is_supply=is_searching_for_sellers ) constraints = [Constraint(good_id, ConstraintType(">=", 1)) for good_id in good_ids] constraints.append(Constraint("currency_id", ConstraintType("==", currency_id))) constraint_expr = cast(List[ConstraintExpr], constraints) if len(good_ids) > 1: constraint_expr = [Or(constraint_expr)] query = Query(constraint_expr, model=data_model) return query
97cccadc265743d743f3e2e757e0c81ff110072b
3,652,740
def make_piecewise_const(num_segments): """Makes a piecewise constant semi-sinusoid curve with num_segments segments.""" true_values = np.sin(np.arange(0, np.pi, step=0.001)) seg_idx = np.arange(true_values.shape[0]) // (true_values.shape[0] / num_segments) return pd.Series(true_values).groupby(seg_idx).mean().tolist()
d6004488ae0109b730cb73dc9e58e65caaed8798
3,652,741
def convert_rational_from_float(number): """ converts a float to rational as form of a tuple. """ f = Fraction(str(number)) # str act as a round return f.numerator, f.denominator
f3a00a150795b008ccc8667a3a0437eb2de2e2af
3,652,742
def classname(obj): """Returns the name of an objects class""" return obj.__class__.__name__
15b03c9ce341bd151187f03e8e95e6299e4756c3
3,652,743
def train(epoch, model, dataloader, optimizer, criterion, device, writer, cfg): """ training the model. Args: epoch (int): number of training steps. model (class): model of training. dataloader (dict): dict of dataset iterator. Keys are tasknames, values are corresponding dataloaders. optimizer (Callable): optimizer of training. criterion (Callable): loss criterion of training. device (torch.device): device of training. writer (class): output to tensorboard. cfg: configutation of training. Return: losses[-1] : the loss of training """ model.train() metric = PRMetric() losses = [] for batch_idx, (x, y) in enumerate(dataloader, 1): for key, value in x.items(): x[key] = value.to(device) y = y.to(device) optimizer.zero_grad() y_pred = model(x) if cfg.model_name == 'capsule': loss = model.loss(y_pred, y) else: loss = criterion(y_pred, y) loss.backward() optimizer.step() metric.update(y_true=y, y_pred=y_pred) losses.append(loss.item()) data_total = len(dataloader.dataset) data_cal = data_total if batch_idx == len(dataloader) else batch_idx * len(y) if (cfg.train_log and batch_idx % cfg.log_interval == 0) or batch_idx == len(dataloader): # p r f1 皆为 macro,因为micro时三者相同,定义为acc acc, p, r, f1 = metric.compute() logger.info(f'Train Epoch {epoch}: [{data_cal}/{data_total} ({100. * data_cal / data_total:.0f}%)]\t' f'Loss: {loss.item():.6f}') logger.info(f'Train Epoch {epoch}: Acc: {100. * acc:.2f}%\t' f'macro metrics: [p: {p:.4f}, r:{r:.4f}, f1:{f1:.4f}]') if cfg.show_plot and not cfg.only_comparison_plot: if cfg.plot_utils == 'matplot': plt.plot(losses) plt.title(f'epoch {epoch} train loss') plt.show() if cfg.plot_utils == 'tensorboard': for i in range(len(losses)): writer.add_scalar(f'epoch_{epoch}_training_loss', losses[i], i) return losses[-1]
41de6aa37b41c837d9921e673414a70cc798478b
3,652,744
def custom_timeseries_widget_for_behavior(node, **kwargs): """Use a custom TimeSeries widget for behavior data""" if node.name == 'Velocity': return SeparateTracesPlotlyWidget(node) else: return show_timeseries(node)
34b296ab98b0eb6f9e2ddd080d5919a0a7158adc
3,652,745
def db_tween_factory(handler, registry): """A database tween, doing automatic session management.""" def db_tween(request): response = None try: response = handler(request) finally: session = getattr(request, "_db_session", None) if session is not None: # always rollback/close the read-only session try: session.rollback() except DatabaseError: registry.raven_client.captureException() finally: registry.db.release_session(session) return response return db_tween
5e5150855db08931af8ba82e3f44e51b6caf54f3
3,652,746
import time def calibrate_profiler(n, timer=time.time): """ Calibration routine to returns the fudge factor. The fudge factor is the amount of time it takes to call and return from the profiler handler. The profiler can't measure this time, so it will be attributed to the user code unless it's subtracted off. """ starttime = timer() p = Profiler(fudge=0.0) for i in range(n): a_very_long_function_name() p.stop() stoptime = timer() simpletime = p.get_time('a_very_long_function_name') realtime = stoptime - starttime profiletime = simpletime + p.overhead losttime = realtime - profiletime return losttime/(2*n) # 2 profile events per function call
ee1f0af52f5530542503be4f277c90f249f83fb5
3,652,747
def getbias(x, bias): """Bias in Ken Perlin’s bias and gain functions.""" return x / ((1.0 / bias - 2.0) * (1.0 - x) + 1.0 + 1e-6)
0bc551e660e133e0416f5e426e5c7c302ac3fbbe
3,652,748
from typing import Dict from typing import Optional def get_exif_flash_fired(exif_data: Dict) -> Optional[bool]: """ Parses the "flash" value from exif do determine if it was fired. Possible values: +-------------------------------------------------------+------+----------+-------+ | Status | Hex | Binary | Fired | +-------------------------------------------------------+------+----------+-------+ | No Flash | 0x0 | 00000000 | No | | Fired | 0x1 | 00000001 | Yes | | "Fired, Return not detected" | 0x5 | 00000101 | Yes | | "Fired, Return detected" | 0x7 | 00000111 | Yes | | "On, Did not fire" | 0x8 | 00001000 | No | | "On, Fired" | 0x9 | 00001001 | Yes | | "On, Return not detected" | 0xd | 00001011 | Yes | | "On, Return detected" | 0xf | 00001111 | Yes | | "Off, Did not fire" | 0x10 | 00010000 | No | | "Off, Did not fire, Return not detected" | 0x14 | 00010100 | No | | "Auto, Did not fire" | 0x18 | 00011000 | No | | "Auto, Fired" | 0x19 | 00011001 | Yes | | "Auto, Fired, Return not detected" | 0x1d | 00011101 | Yes | | "Auto, Fired, Return detected" | 0x1f | 00011111 | Yes | | No flash function | 0x20 | 00100000 | No | | "Off, No flash function" | 0x30 | 00110000 | No | | "Fired, Red-eye reduction" | 0x41 | 01000001 | Yes | | "Fired, Red-eye reduction, Return not detected" | 0x45 | 01000101 | Yes | | "Fired, Red-eye reduction, Return detected" | 0x47 | 01000111 | Yes | | "On, Red-eye reduction" | 0x49 | 01001001 | Yes | | "On, Red-eye reduction, Return not detected" | 0x4d | 01001101 | Yes | | "On, Red-eye reduction, Return detected" | 0x4f | 01001111 | Yes | | "Off, Red-eye reduction" | 0x50 | 01010000 | No | | "Auto, Did not fire, Red-eye reduction" | 0x58 | 01011000 | No | | "Auto, Fired, Red-eye reduction" | 0x59 | 01011001 | Yes | | "Auto, Fired, Red-eye reduction, Return not detected" | 0x5d | 01011101 | Yes | | "Auto, Fired, Red-eye reduction, Return detected" | 0x5f | 01011111 | Yes | +-------------------------------------------------------+------+----------+-------+ :param exif_data: :return: If the flash was fired, or None if the exif information is not present """ if 'Flash' not in exif_data: return None return bool((int(exif_data['Flash']) & 1) > 0)
82b4fc095d60426622202243f141614b9632340f
3,652,749
import requests import json def get_geoJson(addr): """ Queries the Google Maps API for specified address, returns a dict of the formatted address, the state/territory name, and a float-ified version of the latitude and longitude. """ res = requests.get(queryurl.format(addr=addr, gmapkey=gmapkey)) dictr = {} if res.json()["status"] == "ZERO_RESULTS" or not res.ok: dictr["res"] = res else: print(json.dumps(res.json(), indent=4)) rresj = res.json()["results"][0] dictr["formatted_address"] = rresj["formatted_address"] dictr["latlong"] = rresj["geometry"]["location"] for el in rresj["address_components"]: if el["types"][0] == "administrative_area_level_1": dictr["state"] = el["short_name"] return dictr
500c2aa18c8b3b305c912b91efcc9f51121ca7b3
3,652,751
def display_data_in_new_tab(message, args, pipeline_data): """ Displays the current message data in a new tab """ window = sublime.active_window() tab = window.new_file() tab.set_scratch(True) edit_token = message['edit_token'] tab.insert(edit_token, 0, message['data']) return tab
a64b7ac4138b921a53adb96b9933f1825048b955
3,652,752
def _cost( q,p, xt_measure, connec, params ) : """ Returns a total cost, sum of a small regularization term and the data attachment. .. math :: C(q_0, p_0) = .01 * H(q0,p0) + 1 * A(q_1, x_t) Needless to say, the weights can be tuned according to the signal-to-noise ratio. """ s,r = params # Deformation scale, Attachment scale q1 = _HamiltonianShooting(q,p,s)[0] # Geodesic shooting from q0 to q1 # To compute a data attachment cost, we need the set of vertices 'q1' into a measure. q1_measure = Curve._vertices_to_measure( q1, connec ) attach_info = _data_attachment( q1_measure, xt_measure, r ) return [ .01* _Hqp(q, p, s) + 1* attach_info[0] , attach_info[1] ]
193d23a11d9704867d0a89846a6a7187de1e953a
3,652,753
def get_full_lang_code(lang=None): """ Get the full language code Args: lang (str, optional): A BCP-47 language code, or None for default Returns: str: A full language code, such as "en-us" or "de-de" """ if not lang: lang = __active_lang return lang or "en-us"
1e0e49797dc5ed3f1fd148ac4ca1ca073231268c
3,652,754
def acquire_images(cam, nodemap, nodemap_tldevice): """ This function acquires and saves 10 images from a device. :param cam: Camera to acquire images from. :param nodemap: Device nodemap. :param nodemap_tldevice: Transport layer device nodemap. :type cam: CameraPtr :type nodemap: INodeMap :type nodemap_tldevice: INodeMap :return: True if successful, False otherwise. :rtype: bool """ print '*** IMAGE ACQUISITION ***\n' try: result = True # Set acquisition mode to continuous # # *** NOTES *** # Because the example acquires and saves 10 images, setting acquisition # mode to continuous lets the example finish. If set to single frame # or multiframe (at a lower number of images), the example would just # hang. This would happen because the example has been written to # acquire 10 images while the camera would have been programmed to # retrieve less than that. # # Setting the value of an enumeration node is slightly more complicated # than other node types. Two nodes must be retrieved: first, the # enumeration node is retrieved from the nodemap; and second, the entry # node is retrieved from the enumeration node. The integer value of the # entry node is then set as the new value of the enumeration node. # # Notice that both the enumeration and the entry nodes are checked for # availability and readability/writability. Enumeration nodes are # generally readable and writable whereas their entry nodes are only # ever readable. # # Retrieve enumeration node from nodemap # In order to access the node entries, they have to be casted to a pointer type (CEnumerationPtr here) node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode('AcquisitionMode')) if not PySpin.IsAvailable(node_acquisition_mode) or not PySpin.IsWritable(node_acquisition_mode): print 'Unable to set acquisition mode to continuous (enum retrieval). Aborting...' return False # Retrieve entry node from enumeration node node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName('Continuous') if not PySpin.IsAvailable(node_acquisition_mode_continuous) or not PySpin.IsReadable(node_acquisition_mode_continuous): print 'Unable to set acquisition mode to continuous (entry retrieval). Aborting...' return False # Retrieve integer value from entry node acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue() # Set integer value from entry node as new value of enumeration node node_acquisition_mode.SetIntValue(acquisition_mode_continuous) print 'Acquisition mode set to continuous...' # Begin acquiring images # # *** NOTES *** # What happens when the camera begins acquiring images depends on the # acquisition mode. Single frame captures only a single image, multi # frame catures a set number of images, and continuous captures a # continuous stream of images. Because the example calls for the # retrieval of 10 images, continuous mode has been set. # # *** LATER *** # Image acquisition must be ended when no more images are needed. cam.BeginAcquisition() print 'Acquiring images...' # Retrieve device serial number for filename # # *** NOTES *** # The device serial number is retrieved in order to keep cameras from # overwriting one another. Grabbing image IDs could also accomplish # this. device_serial_number = '' node_device_serial_number = PySpin.CStringPtr(nodemap_tldevice.GetNode('DeviceSerialNumber')) if PySpin.IsAvailable(node_device_serial_number) and PySpin.IsReadable(node_device_serial_number): device_serial_number = node_device_serial_number.GetValue() print 'Device serial number retrieved as %s...' % device_serial_number # Retrieve, convert, and save images for i in range(NUM_IMAGES): try: # Retrieve next received image # # *** NOTES *** # Capturing an image houses images on the camera buffer. Trying # to capture an image that does not exist will hang the camera. # # *** LATER *** # Once an image from the buffer is saved and/or no longer # needed, the image must be released in order to keep the # buffer from filling up. image_result = cam.GetNextImage(1000) # Ensure image completion # # *** NOTES *** # Images can easily be checked for completion. This should be # done whenever a complete image is expected or required. # Further, check image status for a little more insight into # why an image is incomplete. if image_result.IsIncomplete(): print 'Image incomplete with image status %d ...' % image_result.GetImageStatus() else: # Print image information; height and width recorded in pixels # # *** NOTES *** # Images have quite a bit of available metadata including # things such as CRC, image status, and offset values, to # name a few. width = image_result.GetWidth() height = image_result.GetHeight() print 'Grabbed Image %d, width = %d, height = %d' % (i, width, height) # Convert image to mono 8 # # *** NOTES *** # Images can be converted between pixel formats by using # the appropriate enumeration value. Unlike the original # image, the converted one does not need to be released as # it does not affect the camera buffer. # # When converting images, color processing algorithm is an # optional parameter. image_converted = image_result.Convert(PySpin.PixelFormat_Mono8, PySpin.HQ_LINEAR) # Create a unique filename if device_serial_number: filename = 'Acquisition-%s-%d.jpg' % (device_serial_number, i) else: # if serial number is empty filename = 'Acquisition-%d.jpg' % i # Save image # # *** NOTES *** # The standard practice of the examples is to use device # serial numbers to keep images of one device from # overwriting those of another. image_converted.Save(filename) print 'Image saved at %s' % filename # Release image # # *** NOTES *** # Images retrieved directly from the camera (i.e. non-converted # images) need to be released in order to keep from filling the # buffer. image_result.Release() print '' except PySpin.SpinnakerException as ex: print 'Error: %s' % ex return False # End acquisition # # *** NOTES *** # Ending acquisition appropriately helps ensure that devices clean up # properly and do not need to be power-cycled to maintain integrity. cam.EndAcquisition() except PySpin.SpinnakerException as ex: print 'Error: %s' % ex return False return result
dd3454b3ddbff27dd73750c630ff5e63737fa50c
3,652,755
import importlib def apply_operations(source: dict, graph: BaseGraph) -> BaseGraph: """ Apply operations as defined in the YAML. Parameters ---------- source: dict The source from the YAML graph: kgx.graph.base_graph.BaseGraph The graph corresponding to the source Returns ------- kgx.graph.base_graph.BaseGraph The graph corresponding to the source """ operations = source['operations'] for operation in operations: op_name = operation['name'] op_args = operation['args'] module_name = '.'.join(op_name.split('.')[0:-1]) function_name = op_name.split('.')[-1] f = getattr(importlib.import_module(module_name), function_name) log.info(f"Applying operation {op_name} with args: {op_args}") f(graph, **op_args) return graph
d78410d27da574efc30d08555eaefde0c77cb513
3,652,756
def tt_logdotexp(A, b): """Construct a Theano graph for a numerically stable log-scale dot product. The result is more or less equivalent to `tt.log(tt.exp(A).dot(tt.exp(b)))` """ A_bcast = A.dimshuffle(list(range(A.ndim)) + ["x"]) sqz = False shape_b = ["x"] + list(range(b.ndim)) if len(shape_b) < 3: shape_b += ["x"] sqz = True b_bcast = b.dimshuffle(shape_b) res = tt_logsumexp(A_bcast + b_bcast, axis=1) return res.squeeze() if sqz else res
f543557a0b24159ede7d8cc0c8ed5df3ed2123f4
3,652,757
def _check_like(val, _np_types, _native_types, check_str=None): # pylint: disable=too-many-return-statements """ Checks the follwing: - if val is instance of _np_types or _native_types - if val is a list or ndarray of _np_types or _native_types - if val is a string or list of strings that can be parsed by check_str Does not check: - if val is an ndarray of strings that can be parsed by check_str """ _all_types = _np_types + _native_types if isinstance(val, _all_types): return True elif isinstance(val, string_types): return check_str and check_str(val) elif isinstance(val, (list, tuple)): for v in val: if isinstance(v, string_types): if check_str and check_str(v): continue if not isinstance(v, _all_types): return False return True elif hasattr(val, 'dtype'): if val.dtype == np.object: return all(isinstance(v, _native_types) for v in val) else: return val.dtype.type in _np_types else: return False
ab7875d329c09a491178b721c112b64142d2e566
3,652,758
def rotation_matrix(x, y, theta): """ Calculate the rotation matrix. Origin is assumed to be (0, 0) theta must be in radians """ return [np.cos(theta) * x - np.sin(theta) * y, np.sin(theta) * x + np.cos(theta) * y]
53f646429f7a4b719b197cacbc71442ebef719d4
3,652,759
from typing import List def create_players(num_human: int, num_random: int, smart_players: List[int]) \ -> List[Player]: """Return a new list of Player objects. <num_human> is the number of human player, <num_random> is the number of random players, and <smart_players> is a list of difficulty levels for each SmartPlayer that is to be created. The list should contain <num_human> HumanPlayer objects first, then <num_random> RandomPlayer objects, then the same number of SmartPlayer objects as the length of <smart_players>. The difficulty levels in <smart_players> should be applied to each SmartPlayer object, in order. """ goal = generate_goals(num_random + num_human + len(smart_players)) final = [] for x in range(num_human): final.append(HumanPlayer(x, goal[x])) for y in range(num_random): final.append(RandomPlayer(num_human + y, goal[num_human + y])) for z in range(len(smart_players)): final.append(SmartPlayer(num_human + num_random + z, goal[num_human + num_random + z], smart_players[z])) return final
10a7e840992417d46c79d794e66e1de5de16dd95
3,652,760
def extract_test_params(root): """VFT parameters, e.g. TEST_PATTERN, TEST_STRATEGY, ...""" res = {} ''' xpath = STATIC_TEST + '*' elems = root.findall(xpath) + root.findall(xpath+'/FIXATION_CHECK*') #return {e.tag:int(e.text) for e in elems if e.text.isdigit()} print(xpath) for e in elems: print(e.tag) if e.text.isdigit(): res[e.tag] = int(e.text) elif len(e.text) > 1: #print(e.tag, e.text,type(e.text),'$'*100) res[e.tag] =e.text else: for ee in e: if ee.tag not in ['QUESTIONS_ASKED','SF']: if ee.text.isdigit(): res[ee.tag] = int(ee.text) elif len(ee.text) > 1: res[ee.tag] = ee.text ''' for p in params: xpath = STATIC_TEST + p el = root.findall(xpath) if not el: res[p.split('/')[-1]] ='' elif el[0].text.isdigit(): res[el[0].tag] = int(el[0].text) else: res[el[0].tag] = el[0].text for pth in [DISPLAY_NAME,VISIT_DATE,SERIES_DATE_TIME,TEST_NODE+'PUPIL_DIAMETER',TEST_NODE+'PUPIL_DIAMETER_AUTO',TEST_NODE+'EXAM_TIME']: e=root.find(pth) if e.text is None: res[e.tag] = e.text else: if e.text.isdigit(): res[e.tag] = int(e.text) else: res[e.tag] = e.text ''' vkind = ['THRESHOLD', 'TOTAL', 'PATTERN'] for vk in vkind: vs = extract_vft_values(root, vk) mat = vf2matrix(vs) res[vk+'_MATRIX'] = [mat] ''' return res
ebd0e1d86af8d741ff993fc54b6ef4b3a7be6ac4
3,652,762
from typing import Optional from typing import List def csc_list( city: str, state: Optional[str] = None, country: Optional[str] = None, ) -> List[db.Geoname]: """ >>> [g.country_code for g in csc_list('sydney')] ['AU', 'CA', 'US', 'US', 'ZA', 'VU', 'US', 'US', 'CA'] >>> [g.name for g in csc_list('sydney', country='australia')] ['Sydney'] >>> [g.timezone for g in csc_list('sydney', state='victoria')][:3] ['Australia/Sydney', 'America/Glace_Bay', 'America/Phoenix'] """ if state and country: cinfo = db.country_info(country) states = [ g for g in db.select_geonames_name(state) if g.feature_class == 'A' and g.country_code == cinfo.iso ] cities = [ g for g in db.select_geonames_name(city) if g.feature_class == 'P' and g.country_code == cinfo.iso ] city_matches = list(_match(cities, states)) if city_matches: return [c for (c, _) in city_matches] # # Try omitting state. If the country is specified, that alone may be sufficient. # if country: cinfo = db.country_info(country) cities = [ g for g in db.select_geonames_name(city) if g.feature_class == 'P' and g.country_code == cinfo.iso ] if cities: return cities # # Perhaps state is really a city? # if state and country: cinfo = db.country_info(country) cities = [ g for g in db.select_geonames_name(state) if g.country_code == cinfo.iso ] if cities: return cities # # Perhaps the specified country is wrong? # if state: states = [g for g in db.select_geonames_name(state) if g.feature_class == 'A'] cities = [g for g in db.select_geonames_name(city) if g.feature_class == 'P'] city_matches = list(_match(cities, states)) if city_matches: return [c for (c, _) in city_matches] # # Perhaps city itself is unique? # cities = [g for g in db.select_geonames_name(city) if g.feature_class == 'P'] if cities: return cities return list(db.select_geonames_name(city))
6c27a16c22a40d095bd3e3fad7660bbee867751e
3,652,763
from typing import Iterable from typing import Tuple def calculate_frame_score(current_frame_hsv: Iterable[cupy.ndarray], last_frame_hsv: Iterable[cupy.ndarray]) -> Tuple[float]: """Calculates score between two adjacent frames in the HSV colourspace. Frames should be split, e.g. cv2.split(cv2.cvtColor(frame_data, cv2.COLOR_BGR2HSV)). Arguments: curr_frame_hsv: Current frame. last_frame_hsv: Previous frame. Returns: Tuple containing the average pixel change for each component as well as the average across all components, e.g. (avg_h, avg_s, avg_v, avg_all). """ current_frame_hsv = [x.astype(cupy.int32) for x in current_frame_hsv] last_frame_hsv = [x.astype(cupy.int32) for x in last_frame_hsv] delta_hsv = [0, 0, 0, 0] for i in range(3): num_pixels = current_frame_hsv[i].shape[0] * current_frame_hsv[i].shape[1] delta_hsv[i] = cupy.sum( cupy.abs(current_frame_hsv[i] - last_frame_hsv[i])) / float(num_pixels) delta_hsv[3] = sum(delta_hsv[0:3]) / 3.0 return tuple(delta_hsv)
db5819ab0696364569f79f326ab7e28f0f0371b3
3,652,764
def huber_loss_function(sq_resi, k=1.345): """Robust loss function which penalises outliers, as detailed in Jankowski et al (2018). Parameters ---------- sq_resi : `float` or `list` A single or list of the squared residuals. k : `float`, optional A constant that defines at which distance the loss function starts to penalize outliers. |br| Default: 1.345. Returns ------- rho : `float` or `list` The modified squared residuals. """ single_value = False if isinstance(sq_resi, float) or isinstance(sq_resi, int): sq_resi = np.array([sq_resi]) single_value = True elif isinstance(sq_resi, list): sq_resi = np.array(sq_resi) rho = [] residual = np.sqrt(abs(sq_resi)) for j in range(len(residual)): if residual[j] < k: rho.append( sq_resi[j]/2 ) else: rho.append( k * residual[j] - 1./2. * k**2 ) if single_value: return rho[0] else: return rho
bf8d5f3aa042297014b7b93316fe557784c4c5b1
3,652,765
import re def clean_sentence(sentence: str) -> str: """ Bertに入れる前にtextに行う前処理 Args: sentence (str): [description] Returns: str: [description] """ sentence = re.sub(r"<[^>]*?>", "", sentence) # タグ除外 sentence = mojimoji.zen_to_han(sentence, kana=False) sentence = neologdn.normalize(sentence) sentence = re.sub( r'[!"#$%&\'\\\\()*+,\-./:;<=>?@\[\]\^\_\`{|}~「」〔〕“”〈〉『』【】&*・()$#@?!`+¥%︰-@]。、♪', " ", sentence, ) # 記号 sentence = re.sub(r"https?://[\w/:%#\$&\?\(\)~\.=\+\-]+", "", sentence) sentence = re.sub(r"[0-90-9a-zA-Za-zA-Z]+", " ", sentence) sentence = "".join( [ emoji_dict[c].get("short_name", "") if c in emoji.UNICODE_EMOJI["en"] else c for c in sentence ] ) return sentence
bf5f9df5ab04ff96ae7f8199dfbeafae30d764eb
3,652,766
from typing import Union from enum import Enum def assert_user(user_id: int, permission: Union[str, Enum] = None) -> bool: """ Assert that a user_id belongs to the requesting user, or that the requesting user has a given permission. """ permission = ( permission.value if isinstance(permission, Enum) else permission ) return flask.g.user.id == user_id or flask.g.user.has_permission( permission )
6ef54d60a0b62e4ffb1330dba7bffeeac0df03c7
3,652,767
def single_prob(n, n0, psi, c=2): """ Eq. 1.3 in Conlisk et al. (2007), note that this implmentation is only correct when the variable c = 2 Note: if psi = .5 this is the special HEAP case in which the function no longer depends on n. c = number of cells """ a = (1 - psi) / psi F = (get_F(a, n) * get_F((c - 1) * a, n0 - n)) / get_F(c * a, n0) return float(F)
05c0c627a05bb683fa3c20cacefa121f5cddba14
3,652,768
def array_pair_sum_iterative(arr, k): """ returns the array of pairs using an iterative method. complexity: O(n^2) """ result = [] for i in range(len(arr)): for j in range(i + 1, len(arr)): if arr[i] + arr[j] == k: result.append([arr[i], arr[j]]) return result
c4f0eb5e290c784a8132472d85023662be291a71
3,652,769
def merge_named_payload(name_to_merge_op): """Merging dictionary payload by key. name_to_merge_op is a dict mapping from field names to merge_ops. Example: If name_to_merge_op is { 'f1': mergeop1, 'f2': mergeop2, 'f3': mergeop3 }, Then two payloads { 'f1': a1, 'f2': b1, 'f3': c1 } and { 'f1': a2, 'f2': b2, 'f3': c2 } will be merged into { 'f1': mergeop1(a1, a2), 'f2': mergeop2(b1, b2), 'f3': mergeop3(c1, c2) }. """ def merge(p1,p2): p = {} for name, op in name_to_merge_op.items(): p[name] = op(p1[name], p2[name]) return p return merge
ee20147b7937dff208da6ea0d025fe466d8e92ed
3,652,770
def euclidean_distance(this_set, other_set, bsf_dist): """Calculate the Euclidean distance between 2 1-D arrays. If the distance is larger than bsf_dist, then we end the calculation and return the bsf_dist. Args: this_set: ndarray The array other_set: ndarray The comparative array. bsf_dist: The best so far distance. Returns: output: float The accumulation of Euclidean distance. """ sum_dist = 0 for index in range(0, len(this_set)): sum_dist += (this_set[index] - other_set[index]) ** 2 if sum_dist > bsf_dist: return bsf_dist return sum_dist
7055c0de77cad987738c9b3ec89b0381002fbfd4
3,652,771
def host(provider: Provider) -> Host: """Create host""" return provider.host_create(utils.random_string())
36e1b6f0ddf8edc055d56cac746271f5d3801111
3,652,773
def bj_struktur_p89(x, n: int = 5, **s): # brute force """_summary_ :param x: _description_ :type x: _type_ :param n: _description_, defaults to 5 :type n: int, optional :return: _description_ :rtype: _type_ """ gamma, K = gamma_K_function(**s) b_j = np.empty((x.size, n + 1)) for i, xi in enumerate(x): for j in range(n + 1): b_j[i, j] = bj_p89(K, xi, j) return b_j
d21fc501411ada9f2173da7ca447418e2f51a86f
3,652,774
def _get_pulse_width_and_area(tr, ipick, icross, max_pulse_duration=.08): """ Measure the width & area of the arrival pulse on the displacement trace Start from the displacement peak index (=icross - location of first zero crossing of velocity) :param tr: displacement trace :type tr: obspy.core.trace.Trace or microquake.core.Trace :param ipick: index of pick in trace :type ipick: int :param icross: index of first zero crossing in corresponding velocity trace :type icross: int :param max_pulse_duration: max allowed duration (sec) beyond pick to search for zero crossing of disp pulse :type max_pulse_duration: float return pulse_width, pulse_area :returns: pulse_width, pulse_area: Returns the width and area of the displacement pulse :rtype: float, float """ fname = '_get_pulse_width_and_area' data = tr.data sign = np.sign(data) nmax = int(max_pulse_duration * tr.stats.sampling_rate) iend = ipick + nmax epsilon = 1e-10 if icross >= iend: i = iend - 1 for i in range(icross, iend): diff = np.abs(data[i] - data[ipick]) if diff < epsilon or sign[i] != sign[icross]: break if i == iend - 1: logger.info("%s: Unable to locate termination of displacement " "pulse for tr:%s!" % (fname, tr.get_id())) return 0, 0 istop = i pulse_width = float(istop - ipick) * tr.stats.delta pulse_area = np.trapz(data[ipick:istop], dx=tr.stats.delta) return pulse_width, pulse_area
43598f797f2956def740881b33b38d8824ba7ff3
3,652,775
def load_backend(name, options=None): """Load the named backend. Returns the backend class registered for the name. If you pass None as the name, this will load the default backend. See the documenation for get_default() for more information. Raises: UnknownBackend: The name is not recognized. LoadingError: There was an error loading the backend. """ if name is None: assert options is None return get_default() if options is None: options = {} if name not in _backends: raise UnknownBackend(name) options = _backends[name][1](**options) key = (name, tuple(sorted(list(options.items())))) res = _active_backends.get(key, None) if res is None: try: res = _backends[name][0](options) _active_backends[key] = res except Exception as e: raise LoadingError(name) from e return res
1df4c1b0c0d9d81e607a5884f3391883ab6ea3c5
3,652,776