content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _get_event_id(oracle_cursor): # -> (int) """gets the event_id to be used for updating the NR history :oracle_conn : a Cx_Oracle connection to the NRO database :returns (int): a valid NRO event_id to be used for updating NRO records """ oracle_cursor.execute("""select event_seq.NEXTVAL from dual""") row = oracle_cursor.fetchone() event_id = int(row[0]) oracle_cursor.execute(""" INSERT INTO event (event_id, event_type_cd, event_timestamp) VALUES (:event_id, 'SYST', sysdate) """, event_id=event_id ) return event_id
ee524bb3c4819e9cb614219900be4695219d046f
35,489
import pickle def broken_model(): """Create a non-functional model object.""" r = MockRedis() r.set("1.2.0", pickle.dumps("lol")) return lambda: r
a1dee18e86f61263f629b8b20ceb75e0f55585d3
35,491
def _envFile(self, s, *args, **kw): """Same as Environmet.File but without expanding $VAR logic """ if SCons.Util.is_Sequence(s): result=[] for e in s: result.append(self.fs.File((e,) + args, kw)) return result return self.fs.File((s,) + args, kw)
9d388255faaa71705dc555c0d3349ef0f334c86d
35,492
def xception_model(num_classes, pretrained=True, **kwargs): """Constructs a xception model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ encoder = AlignedXception(num_classes=1000) # encoder = pretrainedmodels.models.xception() if pretrained: state_dict = model_zoo.load_url( pretrained_settings['xception']['imagenet']['url'], model_dir='models') for name, weights in state_dict.items(): if 'pointwise' in name: state_dict[name] = weights.unsqueeze(-1).unsqueeze(-1) encoder.load_state_dict(state_dict) model = RetinaNet(encoder=encoder, num_classes=num_classes, **kwargs) return model
f9b3167d0f569230119904eb10c6a0627c44da1d
35,493
def lwdown(atemp,clouds): """Call signature:: lwd = lwdown(atemp,clouds) estimate downward (incoming) radiation in long wave wave band from air temperature according to Parkinson and Washington (1979), A Large-Scale Numerical Model of Sea Ice, JGR, 84(C1), 311-337. INPUT: atemp :: air temperature in Kelvin clouds :: fractional cloud cover, range: [0 1] OUTPUT: lwd :: down ward long wave radiation (positive down) Author: Martin Losch, Dec 2006 (rewritten for python in Mar 2015) """ # some constants stefanBoltzmann = 5.670e-8 # W/m^2/K^4 # first long wave downward radiation (eq. 5 of P&W, who cite Idso and # Jackson, 1969) lwd = stefanBoltzmann*atemp**4 \ * ( 1- 0.261*np.exp(-7.77e-4*(273-atemp)**2)) \ * ( 1 + 0.275 * clouds ) return lwd
b80838371a14363dd0f78dc329718e408882a1ff
35,494
def render_volume(workspace, cutoff=None, solid_color=(1., 1., 1.), style='surface', origin=(0., 0., 0.), window_size=(1920, 1200), opacity=1., background=(0.3, 0.3, 0.3), show_grid=True, plot_directly=True, show_axes=True, show_outline=True, cmap='gray', add_to_plot=None, notebook=False): """ Volume render using Pyvista Threshold filter :param workspace: domain :type workspace: Workspace or ndarray :param cutoff: specifying the values to render :type cutoff: tuple(int, int), optional :param solid_color: if set to None, the material is colored by the matrix's values. Otherwise, a solid color can be specified (e.g. for white (1., 1., 1.)) :type solid_color: tuple(float, float, float), optional :param style: specifying the representation style ('surface', 'edges', 'wireframe', 'points') :type style: string, optional :param origin: origin of the data as :type origin: tuple(float, float, float), optional :param window_size: with the popup window size :type window_size: tuple(int, int), optional :param opacity: opacity of volume :type opacity: float, optional :param background: color of the background from (0., 0., 0.) (black) to (1., 1., 1.) (white) :type: background: tuple(float, float, float) :param show_grid: show the grid with the size of the sides :type show_grid: bool, optional :param plot_directly: whether to return a Plotter object (to make further changes to it) or show the plot directly :type plot_directly: bool, optional :param show_axes: show orientation axis in the bottom left corner :type show_axes: bool, optional :param show_outline: show the bounding box outline of the domain :type show_outline: bool, optional :param cmap: matplotlib colormap to use (overwritten by solid_color if specified) :type cmap: str, optional :param add_to_plot: pass an already existing plotter object to add on top of this plot :type add_to_plot: pyvista.Plotter, optional :param notebook: plotting interactively in a jupyter notebook (overwrites show_grid to False) :type notebook: bool, optional :return: None is plot_directly is True, otherwise a plotter object :rtype: pyvista.Plotter object or None :Example >>> import pumapy as puma >>> ws_volume = puma.import_3Dtiff(puma.path_to_example_file("200_fiberform.tif"), 1.3e-6) >>> puma.render_volume(ws_volume) """ if cutoff is None: solid_color = None r = Renderer(add_to_plot, "threshold", workspace, cutoff, solid_color, style, origin, window_size, opacity, background, show_grid, plot_directly, show_axes, show_outline, cmap, None, notebook) return r.render()
6bddc01d467041170cdd42e9f2d4f0334fe2e151
35,495
def cutoff_list(a,feature=int(0)): """ for list a, apply function cutoff to each element """ for i in range(len(a)): a[i]=cutoff(a[i],feature=feature) return a
fe13bf9cc4f097b22911e247ed6ab468552a365b
35,496
def carla_location_to_pose(carla_location): """ Convert a carla location to a icv pose See carla_location_to_icv_point() for details. pose quaternion remains zero. :param carla_location: the carla location :type carla_location: carla.Location :return: a icv pose :rtype: geometry_msgs.msg.Pose """ icv_pose = Pose() icv_pose.position = carla_location_to_icv_point(carla_location) return icv_pose
60e3a0cf2075d0d1cdfe631225a8de21718b068f
35,497
def estimate_biases(model_dat, ymdat): """numerical optimize modification indicators for equations, one at a time""" tau = model_dat["xdat"].shape[1] biases = zeros(model_dat["ndim"]) biases_std = zeros(model_dat["ndim"]) for bias_ind in range(model_dat["ndim"]): # compute biases bias, hess_i, sse = optimize_biases(model_dat, bias_ind, ymdat) biases[bias_ind] = bias # compute biases_std resvar = sse / (tau - 1) bias_std = (2 * resvar * (1 / hess_i)) ** (1 / 2) biases_std[bias_ind] = bias_std return biases, biases_std
513f79d37426f6dc3889ee4de0b9c510273bfabe
35,498
import logging def find_bad_positions(coverage_matrix, target_folder = None, trait = None, samplename = None, trait_cutoff = None, whitelist = None): """ Walk through all bases and find contigous regions of bases that fail the coverage/strandbias cutoff Create an output file of these regions If a base an expected variant (recorded in a ExpectedVariants instance ('whitelist'), record the trait to the instance) Return the updated whitelist """ region = None last_gene = None last_pos = 0 last_gene = None total_bases = 0 bad_bases = 0 sample = samplename bad_output_name = target_folder + sample + "_failed_regions_%s_cutoff_%s.csv" % (trait, trait_cutoff) bad_output = open(bad_output_name, "w") if trait == "strandbias": output_header = ["gene", "chrom", "start", "stop", "mean_strand_bias", "size"] elif trait == "coverage": output_header = ["gene", "chrom", "start", "stop", "mean_coverage", "size"] bad_output.write("\t".join(output_header) + "\n") for index, pandas_dict in coverage_matrix.iterrows(): chrompos, chrom, dp, gene, minus_dp, plus_dp, pos, start, stop, strand_ratio = pandas_dict row = [chrom, start, stop, gene, ".", "NA", (pos-start), strand_ratio] row = "\t".join([str(x) for x in row]) start = int(start) stop = int(stop) pos = int(pos) if whitelist: if whitelist.dict.get(chrompos): whitelist.add_coverage(chrompos, dp) whitelist.add_strand_ratio(chrompos, strand_ratio) ########################################################## pass_check = False if trait == "coverage": if dp > trait_cutoff: pass_check = True elif trait == "strandbias": if strand_ratio < trait_cutoff: pass_check = True total_bases += 1 if not pass_check: bad_bases += 1 ########################################################## if gene != last_gene or last_pos != (pos - 1): if region: result = region.print_output() bad_output.write(result + "\n") region = None elif not pass_check: if region: region.add_row(row) else: region = CoverageCheckClasses.BadRegion(row) else: if region: result = region.print_output() bad_output.write(result + "\n") region = None last_chrom = chrom last_pos = pos last_gene = gene bad_output.close() ###################################################################### good_bases = total_bases - bad_bases pass_percent = 100 * round( good_bases / float(total_bases), 3) if trait == "strandbias": sampleinfo.add_strandbias(sample, [pass_percent, good_bases, total_bases]) logging.info( "%s percent (%s/%s) of positions in %s have a strand bias below the threshold (%s:1)." % (pass_percent, good_bases, total_bases, sample, trait_cutoff) ) elif trait == "coverage": sampleinfo.add_coverage(sample, [pass_percent, good_bases, total_bases]) logging.info( "%s percent (%s/%s) of positions in %s have at least the minimum coverage of %sX." % (pass_percent, good_bases, total_bases, sample, trait_cutoff) ) return whitelist
02fbcb688fc763a414805c3c898027c14b12b9f0
35,500
def s_norm(p_string, uppercase=False): """ Filters out all punctuation, normalizes the casing to either lowercase or uppercase of all letters, and removes extraneous whitespace between characters. That is, all whitespace will be replaced by a single space character separating the words. :param p_string The string to normalize. :param uppercase Whether to make the resulting string uppercase. By default, the resulting string will be all lowercase. """ nopunct = s_liftpunct(p_string) if uppercase: nopunct = nopunct.upper() else: nopunct = nopunct.lower() return ' '.join(nopunct.split())
69519ddfb2fd58bab3b6db9695946a168ce3eec4
35,501
def _venv_changed(session: nox.sessions.Session) -> bool: """Return True if the installed session is different to that specified in the lockfile.""" result = False if _venv_populated(session): expected = _file_content(_session_lockfile(session)) actual = _file_content(_session_cachefile(session)) result = actual != expected return result
a5723d9e3f5f71f3f7b83dc0bfa6ed659c9dca13
35,502
import random def greedy_policy(A, s, Q, epsilon = None): """在给定一个状态下,从行为空间A中选择一个行为a,使得Q(s,a) = max(Q(s,)) 考虑到多个行为价值相同的情况 """ max_q, a_max_q = -float('inf'), [] for a_opt in A: q = get_dict(Q, s, a_opt) if q > max_q: max_q = q a_max_q = [a_opt] elif q == max_q: a_max_q.append(a_opt) return random.choice(a_max_q)
12ae83e3c28e6d65d4b3cc81aa712657c82834c3
35,503
def login_user(cursor, username): """ create new session for user with username, return session key """ userid = get_userid(cursor, username) key = token_urlsafe() cursor.execute( "REPLACE INTO sessions ('userid', 'key') VALUES (?, ?)", (userid, key) ) return key
b507aa966608a372df785b3e7d3c8a1a26c2d23a
35,504
def tapisize(fieldKeyName): """Transforms a string into a Tapis query parameter """ return fieldKeyName.lower()
cc8032a6cc9e822193430134bb33da8aef74cf06
35,505
def get_minions(returner): """ Return a list of all minions CLI Example: .. code-block:: bash salt '*' ret.get_minions mysql """ returners = salt.loader.returners(__opts__, __salt__) return returners["{0}.get_minions".format(returner)]()
f4dd2a96884ddfc356b3214b89bfd1da96e15a40
35,506
from typing import Union import torch def nx_second_order_proximity( G: Union[nx.Graph, nx.DiGraph], node_ids: Union[Tensor, ndarray, list], whole_graph_proximity: bool = True, to_batch: bool = False, distance_metric: str = "cosine", norm_rows_in_sample: bool = False, norm_rows: bool = True, ) -> Tensor: """ Takes a networkx graph G and generates second-order node proximities, also known as structural equivalence relations. Nodes are similar, if they share similar ties to alters. Diagonal elements are set to zero. Note that this includes non-PyTorch operations! Parameters ---------- G : Union[nx.Graph,nx.DiGraph] Input graph node_ids : Union[Tensor,ndarray,list] List of nodes. Must exist in G. whole_graph_proximity : bool, optional If True, similarities between nodes in node_ids is computed based on all alters in the graph (including those not in node_ids) If False, similarities are only calculated based on nodes contained in node_ids. ATTN: Note that if True, ordering of rows reflects G.nodes if False, ordering reflects node_ids supplied (subnetwork) by default True to_batch : bool, optional If true, will remove the row entries of nodes not in node_list If norm_rows is True, will also re-norm the rows, by default True distance_metric : str, optional Any distance metric from scipy.spatial.distance that works without parameter, by default 'cosine' norm_rows_in_sample : bool, optional If True, distances are scaled such that the highest distance is 1. This implies that distances depend on the sample provided, by default False norm_rows: bool, optional If True, distances are scaled for each node, such that sum(a_ij)=1 This does not take into account the similarity to itself, a_ii, which is always 0. Returns ------- ndarray Similarity matrix of dimension len(node_ids)^2 """ if isinstance(node_ids, list): node_ids = np.array(node_ids) if isinstance(node_ids, Tensor): node_ids = node_ids.numpy() if whole_graph_proximity: adjacency_matrix = np.zeros([len(G.nodes), len(G.nodes)]) similarity_matrix = np.zeros([len(node_ids), len(G.nodes)]) else: adjacency_matrix = np.zeros([len(node_ids), len(node_ids)]) similarity_matrix = np.zeros([len(node_ids), len(node_ids)]) if whole_graph_proximity: adjacency_matrix = nx.adjacency_matrix(G, weight="weight").todense() else: G_sub = G.subgraph(node_ids) for i, node in enumerate(node_ids): for j, (alter, datadict) in enumerate(G_sub[node].items()): if hasattr(datadict, "weight"): weight = datadict["weight"] else: weight = 1 adjacency_matrix[i, j] = weight similarity_matrix = pdist(adjacency_matrix, metric=distance_metric) similarity_matrix = 1 - squareform(similarity_matrix) similarity_matrix = similarity_matrix - np.eye( similarity_matrix.shape[0], similarity_matrix.shape[1] ) if norm_rows_in_sample: similarity_matrix = similarity_matrix / np.max( similarity_matrix ) # Norm max similarity within the sample to 1 if norm_rows and not to_batch: similarity_matrix = row_norm(similarity_matrix) similarity_matrix = np.nan_to_num(similarity_matrix, copy=False) if whole_graph_proximity: selection = np.searchsorted(np.array(G.nodes), node_ids) assert ( np.array(G.nodes)[selection] == node_ids ).all(), "Internal error, subsetting nodes" similarity_matrix = similarity_matrix[selection, :] if to_batch: similarity_matrix = whole_graph_rows_to_batch( similarity_matrix, selection, norm_rows=norm_rows ) return torch.as_tensor(similarity_matrix)
1eb2e37e0d52b0428843f71adc901a02c62eadb5
35,507
def calculate_average_resolution(sizes): """Returns the average dimensions for a list of resolution tuples.""" count = len(sizes) horizontal = sum([x[0] for x in sizes]) / count vertical = sum([x[1] for x in sizes]) / count return (horizontal, vertical)
06dac1834989df96ce7bff88c435dd4067bfccbd
35,508
def is_enum0(*args): """ is_enum0(F) -> bool Is the first operand a symbolic constant (enum member)? @param F (C++: flags_t) """ return _ida_bytes.is_enum0(*args)
137b34efa475738ff3a9d1633822f1a1c7a0ca32
35,509
def view(image, ui_collapsed=False, annotations=True, interpolation=True, cmap=cm.viridis, mode='v', shadow=True, slicing_planes=False, gradient_opacity=0.2): """View the image. Creates and returns an ipywidget to visualize the image. The image can be 2D or 3D. The type of the image can be an numpy.array, itk.Image, vtk.vtkImageData, imglyb.ReferenceGuardingRandomAccessibleInterval, or something that is NumPy array-like, e.g. a Dask array. Parameters ---------- image : array_like, itk.Image, or vtk.vtkImageData The 2D or 3D image to visualize. ui_collapsed : bool, optional, default: False Collapse the native widget user interface. annotations : bool, optional, default: True Display annotations describing orientation and the value of a mouse-position-based data probe. interpolation: bool, optional, default: True Linear as opposed to nearest neighbor interpolation for image slices. cmap: string, optional, default: 'Viridis (matplotlib)' Colormap. Some valid values available at itkwidgets.cm.* mode: 'x', 'y', 'z', or 'v', optional, default: 'v' Only relevant for 3D images. Viewing mode: 'x': x-plane 'y': y-plane 'z': z-plane 'v': volume rendering shadow: bool, optional, default: True Use shadowing in the volume rendering. slicing_planes: bool, optional, default: False Enable slicing planes on the volume rendering. gradient_opacity: float, optional, default: 0.2 Gradient opacity for the volume rendering, in the range (0.0, 1.0]. Returns ------- viewer : ipywidget Display by placing at the end of a Jupyter cell or calling IPython.display.display. Query or set properties on the object to change the visualization or retrieve values created by interacting with the widget. """ viewer = Viewer(image=image, ui_collapsed=ui_collapsed, annotations=annotations, interpolation=interpolation, cmap=cmap, mode=mode, shadow=shadow, slicing_planes=slicing_planes, gradient_opacity=gradient_opacity) return viewer
5c71f27ba46258ad97dade0cab04fbf65e8e3c22
35,510
import click def install_chute(ctx, chute, node, follow, version): """ Install a chute from the store. CHUTE must be the name of a chute in the store. NODE must be the name of a node that you control. """ client = ControllerClient() result = client.install_chute(chute, node, select_version=version) click.echo(util.format_result(result)) if follow: result2 = client.follow_chute(chute, node) click.echo(util.format_result(result2)) click.echo("Streaming messages until the update has completed.") click.echo("Ending output with Ctrl+C will not cancel the update.\n") ctx.invoke(watch_update_messages, node_id=result['router_id'], update_id=result['_id']) return result
90781fc0c9d4cca877f44b9b93be2c8d075fa4a3
35,511
from pathlib import Path def create_folder(subfolder: str, folder: str) -> None: """ Function for creating folder structure for saved stationdata """ path_to_create = Path(folder, subfolder) Path(path_to_create).mkdir(parents=True, exist_ok=True) return None
e50052d22cb8385e1c3a83caa643ec0d0289d1b0
35,512
def locate_spikes_peakutils( data, fps=58.21, thresh=0.7, min_dist=None, max_allowed_firing_rate=1 ) -> np.ndarray: """ Find spikes from a dF/F matrix using the peakutils package. The fps parameter is used to calculate the minimum allowed distance \ between consecutive spikes, and to disqualify cells which had no evident dF/F peaks, which result in too many false-positives. :param float max_allowed_firing_rate: Maximal number of spikes per second that are considered viable. """ assert len(data.shape) == 2 and data.shape[0] > 0 if min_dist is None: min_dist = int(fps) else: min_dist = int(min_dist) all_spikes: np.ndarray = np.zeros_like(data) nan_to_zero = np.nan_to_num(data) max_spike_num = int(data.shape[1] // fps) * max_allowed_firing_rate for row, cell in enumerate(nan_to_zero): peaks = peakutils.indexes(cell, thres=thresh, min_dist=min_dist) num_of_peaks = len(peaks) if (num_of_peaks > 0) and (num_of_peaks < max_spike_num): all_spikes[row, peaks] = 1 return all_spikes
e32c8ebc8024b4547592769744051b6d30897851
35,514
def has_valuable(camera): """Function checks if the camera sees any valuable. @param camera: webots camera object to use for the recognition @returns True if a valuable is detected, False otherwise """ # Check if camera has recogniton and is enabled if not camera.hasRecognition() or camera.getRecognitionSamplingPeriod() == 0: # the camera was not enabled or has not recognition return "No recognition on camera" # If the camera has recognition enabled, get the list of objects, and check if any of them are valuables return ( len(VALUABLE_SET & {t.get_model() for t in camera.getRecognitionObjects()}) > 0 )
5ff65e4dc52abbcd42700168fb7aa26e83aa2aab
35,515
def _partition_fold(v,data): """ partition the data ready for cross validation Inputs: v: (int) cross validation parameter, number of cross folds data: (np.array) training data Outputs: list of partitioned indicies """ partition = [] for i in range(v): if i==v-1: partition.append(range(int(i*len(data)/5),len(data))) else: partition.append(range(int(i*len(data)/5),int(i*len(data)/5+(len(data)/5)))) return partition
fc833b5120c5d8e479af1758f86e0541c5d7d87c
35,516
def get_distance(m, M, Av=0): """ calculate distance [in pc] from extinction-corrected magnitude using the equation: d=10**((m-M+5-Av)/5) Note: m-M=5*log10(d)-5+Av see http://astronomy.swin.edu.au/cosmos/I/Interstellar+Reddening Parameters --------- m : apparent magnitude M : absolute magnitude Av : extinction (in V band) """ assert (m is not None) & (str(m) != "nan") assert (M is not None) & (str(M) != "nan") distance = 10 ** (0.2 * (m - M + 5 - Av)) return distance
b4773065d7cf1bc793400ac344c4ca7a580f8567
35,517
def get_csr_as_text(csr_filename): """Convert CSR file to plaintext with OpenSSL.""" return _run_openssl(['req', '-in', csr_filename, '-noout', '-text']).decode('utf-8')
f08d1d914b992474e2c51406bab58f2f2ad37c6c
35,518
def rename_entry(*args): """ rename_entry(ord, name, flags=0x0) -> bool Rename entry point. @param ord: ordinal number of the entry point (C++: uval_t) @param name: name of entry point. If the specified location already has a name, the old name will be appended to a repeatable comment. (C++: const char *) @param flags: See AEF_* (C++: int) @return: success """ return _ida_entry.rename_entry(*args)
d70cd7b42b8e6b4e748c7c2599a46859dd5f6059
35,520
from typing import Iterable from typing import Tuple def split_file_to_annotations_and_definitions(lines: Iterable[str]) -> Tuple[EnumLines, EnumLines, EnumLines]: """Enumerate a line iterable and splits into 3 parts.""" enum_lines = sanitize_file_lines(lines) metadata, definitions, statements = multi_split_by(enum_lines, [_predicate_1, _predicate_2]) return metadata, definitions, statements
64a5f6f4d32f80d90f1247472564ae04e902ae1d
35,521
def _SafeCreateLogoutURL(mr): """Make a logout URL w/ a detailed continue URL, otherwise use a short one.""" try: return users.create_logout_url(mr.current_page_url) except users.RedirectTooLongError: if mr.project_name: return users.create_logout_url('/p/%s' % mr.project_name) else: return users.create_logout_url('/')
06ee47ba38abb660096bace71704ddd0becd465a
35,522
from typing import Tuple from typing import Dict def random_speech_to_text( draw ) -> st.SearchStrategy[Tuple[SpeechToTextGen, Dict]]: """Generates different speech_to_text functions.""" kwargs = draw(random_speech_to_text_kwargs()) return speech_to_text(**kwargs), kwargs
035d905aba274f1083daae07e2492f9ae84cd21f
35,523
def line_neighbor_score(mat, idx, line_width=1, distance_range=(20, 40), window_size=5, line_trick='min', neighbor_trick='mean', metric='diff'): """ :math: enrichment-score = line-trick(line) - neighbor-trick(neighbor) :param idx: (int) stripe location index :param mat: (2D ndarray) line matrix :param distance_range: (int) only consider this range off the diagonal :param line_width: (int) stripe width (# of bins) :param window_size: (int) size of the window for calculating enrichment :param line_trick: (str) 'min' or 'med' :param neighbor_trick: (str) 'mean' or 'med' :param metric: (str) 'diff' or 'ratio' """ half = int(line_width // 2) x1, x2 = idx - half, idx - half + line_width new_mat = np.zeros((distance_range[1] - distance_range[0],)) for j in range(distance_range[0], distance_range[1]): if j < window_size + half or j >= mat.shape[1] - window_size - half: continue y = j - distance_range[0] if line_trick == 'min': line_score = min(np.mean(mat[x1:x2, j - window_size - half:j - half]), np.mean(mat[x1:x2, j + 1 + half:j + window_size + half + 1])) else: line_score = np.median(np.concatenate( [mat[x1:x2, j - window_size - half:j - half], mat[x1:x2, j + 1 + half:j + window_size + half + 1]] )) if neighbor_trick == 'mean': neighbor_score = max(np.mean(mat[idx-window_size:x1, j-window_size-half:j+window_size+half+1]), np.mean(mat[x2+1:idx+window_size+1, j-window_size-half:j+window_size+half+1])) else: neighbor_score = max(np.median(mat[idx-window_size:x1, j-window_size-half:j+window_size+half+1]), np.median(mat[x2+1:idx+window_size+1, j-window_size-half:j+window_size+half+1])) if metric == 'diff': new_mat[y] = line_score - neighbor_score else: new_mat[y] = (line_score / neighbor_score - 1) if neighbor_score != 0 else line_score return new_mat
a3b4a49479c5bcf7f12e4bb23affda942ec3670e
35,524
def plugin_name_to_layerapi2_label(plugin_name): """Get a layerapi2 label from a plugin name. Args: plugin_name (string): the plugin name from which we create the label. Returns: (string): the layerapi2 label. """ return "plugin_%s@%s" % (plugin_name, MFMODULE_LOWERCASE)
c91a5c463e3b15e324c049e9557a2711031dcb95
35,525
def unscii(font_name): """ Given a font name, return a font object for usage. """ return UnsciiFont(font_name)
41a8a03417d1d231cbd2bf99b09d8efd98afcd8c
35,526
def get_environ_dict(): """Return a dictionary of all environment keys/values.""" return { 'os.environ': _get_os_environ_dict(( 'AUTH_DOMAIN', 'CURRENT_CONFIGURATION_VERSION', 'CURRENT_MODULE_ID', 'CURRENT_VERSION_ID', 'DEFAULT_VERSION_HOSTNAME', 'FEDERATED_IDENTITY', 'FEDERATED_PROVIDER', 'GAE_LOCAL_VM_RUNTIME', 'HTTP_HOST', 'HTTP_PROXY', 'HTTP_X_APPENGINE_HTTPS', 'HTTP_X_APPENGINE_QUEUENAME', 'HTTP_X_ORIGINAL_HOST', 'HTTP_X_ORIGINAL_SCHEME', 'SERVER_NAME', 'SERVER_PORT', 'SERVER_SOFTWARE', 'USER_IS_ADMIN', )), 'app_identity': _get_app_identity_dict(( 'get_service_account_name', 'get_application_id', 'get_default_version_hostname', )), 'modules': _get_modules_dict(( 'get_current_module_name', 'get_current_version_name', 'get_current_instance_id', 'get_modules', 'get_versions', 'get_default_version', 'get_hostname', )), 'namespace_manager': _get_namespace_manager_dict(( 'get_namespace', 'google_apps_namespace', )), }
056b8499db4dbc14db904eb5f1c01ed49ae39a18
35,527
def k_from_a_ea(a, e_a, temp, r_gas): """ convert using "alternate" form of Arrhenius eq :param a: pre-exponential factor :param e_a: activation energy with units consistent with given r_gas :param temp: temperature in K :param r_gas: universal gas constant in units consistent with e_a and temps """ return a * np.exp(-e_a/(r_gas*temp))
79415b8ee583e03282d7ab02b5e255d611e5677f
35,528
def extract_source_info(df_adverse_ev): """ Find information about who submitted the report """ qual_list = [] for i in range(0,len(df_adverse_ev)): if df_adverse_ev.iloc[i]['primarysource'] is not None: col_names = list(df_adverse_ev.iloc[i]['primarysource'].keys()) if 'qualification' in col_names: qual_list.append(pd.to_numeric(df_adverse_ev.iloc[i]['primarysource']['qualification'])) else: qual_list.append(np.nan) else: qual_list.append(np.nan) df_adverse_ev['source'] = qual_list df_adverse_ev = df_adverse_ev.drop(['primarysource'], axis=1) return df_adverse_ev
246571a57467b03ed5f5bd2456108e4b93ec136f
35,530
import json def load_base_models_json(filename="base_models.json"): """Load base models json to allow selecting pre-trained model. Args: filename (str) - filename for the json file with pre-trained models Returns: base_models - python dict version of JSON key-value pairs """ with open(filename) as json_file: base_models = json.load(json_file) return base_models
c17f123e192b94e6f87938bca10822ea785e2d91
35,531
def _parsedatetime_parse(date_string): """Parse the given date_string using the parsedatetime module.""" # for more details on how the parsedatetime.Calendar.parse function works, see: # https://github.com/bear/parsedatetime/blob/830775dc5e36395622b41f12317f5e10c303d3a2/parsedatetime/__init__.py#L1779 cal = parsedatetime.Calendar() parsed_date = cal.parse(date_string) return parsed_date
a8ce67294d9c035c0777a2d1c0a4fe6e43f65154
35,532
def BFMM( source, target = None, charge = None, dipole1 = None, dipole2 = None, compute_source_velocity = False, compute_source_analytic_gradient = False, compute_source_anti_analytic_gradient = False, compute_target_velocity = False, compute_target_analytic_gradient = False, compute_target_anti_analytic_gradient = False, array_source_velocity = None, array_source_analytic_gradient = None, array_source_anti_analytic_gradient = None, array_target_velocity = None, array_target_analytic_gradient = None, array_target_anti_analytic_gradient = None, precision = 4, ): """ Pythonic interface for Biharmonic FMM Wraps the function: bfmm2dparttarg Parameters: source (required), float(2, ns): location of sources target (optional), float(2, nt): location of targets charge (optional), complex(ns): charges at source locations dipole1 (optional), complex(ns): dipole1 at source locations dipole2 (optional), complex(ns): dipole2 at source locations compute_#_* (optional), bool: whether to compute * at # locations array_#_* (optional), complex(n): preallocated arrays for result n = ns for #=source, nt for #=target if these arrays are not provided, are not of the correct size, not of the correct type, or not fortran contiguous, new arrays for the results will be allocated at runtime precision (optional), float: precision, see documentation for FMM Returns: Dictionary: 'ier': (integer) output code 0: successful completion of code 4: failure to allocate memory for tree 8: failure to allocate memory for FMM workspaces 16: failure to allocate memory for multipole/local expansions 'source': (quantities computed at source locations) 'u' : complex(ns), potential 'u_analytic_gradient' : complex(ns), analytic gradient 'u_anti_analytic_gradient' : complex(ns), anti-analytic gradient 'target': (quantities computed at target locations): same as above, but for target related things ns replaced by nt, in the shapes Some notes about the output: 2) If array_#_* is provided and was acceptable, the code: "array_#_* is output['#']['*']" will return True If the array was provided but incorrect, then the code will return False 3) Entries of the dictionary will only exist if they were asked for i.e. if no 'source' quantities were requested, the 'source' dictionary will not exist """ source, _, ns = check_array(source, (2,None), float, 'source', True) charge, ifcharge = check_array(charge, (ns,), complex, 'charge') dipole1, ifdipole1 = check_array(dipole1, (ns,), complex, 'dipole1') dipole2, ifdipole2 = check_array(dipole2, (ns,), complex, 'dipole2') if (ifdipole1 or ifdipole2) and not (ifdipole1 and ifdipole2): raise Exception('If one of the dipoles is set, than the other must \ also be set.') ifdipole = ifdipole1 vel, ifvel = check_output(array_source_velocity, compute_source_velocity, (ns,), complex) grada, ifgrada = check_output(array_source_analytic_gradient, compute_source_analytic_gradient, (ns,), complex) gradaa, ifgradaa = check_output(array_source_anti_analytic_gradient, compute_source_anti_analytic_gradient, (ns,), complex) target, iftarget, nt = check_array(target, (2,None), float, 'target', True) if not iftarget: if compute_target_velocity or compute_target_analytic_gradient \ or compute_target_anti_analytic_gradient: raise Exception('If asking for a target quanitity, \ target must be given') veltarg, ifveltarg = check_output(array_target_velocity, compute_target_velocity, (nt,), complex) gradatarg, ifgradatarg = check_output(array_target_analytic_gradient, compute_target_analytic_gradient, (nt,), complex) gradaatarg, ifgradaatarg = check_output(array_target_anti_analytic_gradient, compute_target_anti_analytic_gradient, (nt,), complex) ier = int(0) iprec = initialize_precision(precision) bhfmm2dparttarg(ier, iprec, ns, source, ifcharge, charge, ifdipole, dipole1, dipole2, ifvel, vel, ifgrada, grada, ifgradaa, gradaa, nt, target, ifveltarg, veltarg, ifgradatarg, gradatarg, ifgradaatarg, gradaatarg) output = {} any_source = compute_source_velocity or compute_source_analytic_gradient \ or compute_source_anti_analytic_gradient if any_source: source_output = {} if compute_source_velocity: source_output['u'] = vel if compute_source_analytic_gradient: source_output['u_analytic_gradient'] = grada if compute_source_anti_analytic_gradient: source_output['u_anti_analytic_gradient'] = gradaa output['source'] = source_output any_target = compute_target_velocity or compute_target_analytic_gradient \ or compute_target_anti_analytic_gradient if any_target: target_output = {} if compute_target_velocity: target_output['u'] = veltarg if compute_target_analytic_gradient: target_output['u_analytic_gradient'] = gradatarg if compute_target_anti_analytic_gradient: target_output['u_anti_analytic_gradient'] = gradaatarg output['target'] = target_output output['ier'] = ier return output
e1fbce824777c1d260e1e83a8a8fe7b1b2d05886
35,535
def get_dihedrals(a, b, c, d): """ A function that gets dihedral angles between two residues. See set_neighbors6D for usage. """ b0 = -1.0*(b - a) b1 = c - b b2 = d - c b1 /= np.linalg.norm(b1, axis=-1)[:,None] v = b0 - np.sum(b0*b1, axis=-1)[:,None]*b1 w = b2 - np.sum(b2*b1, axis=-1)[:,None]*b1 x = np.sum(v*w, axis=-1) y = np.sum(np.cross(b1, v)*w, axis=-1) return np.arctan2(y, x)
91897e67e7d73eeb60dc73b2ba45e1aaeec5c470
35,537
def iexact(self, compiler, connection): """A method to extend Django IExact class. Case-insensitive exact match. If the value provided for comparison is None, it will be interpreted as an SQL NULL. :type self: :class:`~django.db.models.lookups.IExact` :param self: the instance of the class that owns this method. :type compiler: :class:`~django_spanner.compiler.SQLCompilerst` :param compiler: The query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. :type connection: :class:`~google.cloud.spanner_dbapi.connection.Connection` :param connection: The Spanner database connection used for the current query. :rtype: tuple[str, str] :returns: A tuple of the SQL request and parameters. """ lhs_sql, params = self.process_lhs(compiler, connection) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) rhs_sql = self.get_rhs_op(connection, rhs_sql) # Wrap the parameter in ^ and $ to restrict the regex to an exact match. if self.rhs_is_direct_value() and params and not self.bilateral_transforms: params[0] = "^(?i)%s$" % params[0] else: # lhs_sql is the expression/column to use as the regular expression. # Use concat to make the value case-insensitive. lhs_sql = "CONCAT('^(?i)', " + lhs_sql + ", '$')" if not self.rhs_is_direct_value() and not params: # If rhs is not a direct value and parameter is not present we want # to have only 1 formatable argument in rhs_sql else we need 2. rhs_sql = rhs_sql.replace("%%s", "%s") # rhs_sql is REGEXP_CONTAINS(%s, %%s), and lhs_sql is the column name. return rhs_sql % lhs_sql, params
ebe51c6b8c34a0cc1746169e8fe6e8eb79e01152
35,538
import json def load_data_from_json(jsonfile): """Load the data contained in a .json file and return the corresponding Python object. :param jsonfile: The path to the .json file :type jsonfile: str :rtype: list or dict """ jsondata = open(jsonfile).read() data = json.loads(jsondata) return data
f0f7a0620be8ffcd15a57fd561dda8525866faa3
35,539
def grpc_check(fpath): """ Check whether grpc service is enabled in this .proto file. Note: only proto file with the following form will pass our check. service MyService { rpc MethodA(XXX) returns (XXX) { rpc MethodB(XXX) returns (XXX) { } } """ if not fpath.endswith(".proto"): return False grpc_found = False with open(fpath) as fin: kw1_found = False kw2_found = False for line in fin: line = line.strip() if kw1_found and kw2_found: if PATT_RET_ONLY.match(line): grpc_found = True break elif kw1_found: if PATT_RPC_RET.match(line): kw2_found = True grpc_found = True break if PATT_RPC_ONLY.match(line): kw2_found = True elif PATT_SERVICE.match(line): kw1_found = True return grpc_found
17a1539f8913bb35d2e89f31d7f48acbc1f35b54
35,540
def bb_intersection_over_union(boxA, boxB): """ Computes IoU (Intersection over Union for 2 given bounding boxes) Args: boxA (list): A list of 4 elements holding bounding box coordinates (x1, y1, x2, y2) boxB (list): A list of 4 elements holding bounding box coordinates (x1, y1, x2, y2) Returns: iou (float): Overlap between 2 bounding boxes in terms of overlap """ # determine the (x, y)-coordinates of the intersection rectangle xA = max(boxA[0], boxB[0]) yA = max(boxA[1], boxB[1]) xB = min(boxA[2], boxB[2]) yB = min(boxA[3], boxB[3]) # compute the area of intersection rectangle interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1) # compute the area of both the rectangles boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1) boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1) # compute the intersection over union by taking the intersection area and dividing it by the sum of both boxes # intersection area / areas - the intersection area iou = interArea / float(boxAArea + boxBArea - interArea) # return the intersection over union value return iou
290d625dd3ed7ab37ecf3aa7f39a3b1727cecec0
35,541
def area(boxlist, axis=-1): """Computes area of boxes. Args: boxlist: BoxList holding N boxes scope: name scope. Returns: a tensor with shape [N] representing box areas. """ with tf.name_scope('Area'): x_min, y_min, x_max, y_max = tf.split(value=boxlist, num_or_size_splits=4, axis=axis) heights = tf.maximum(0.0, y_max - y_min) widths = tf.maximum(0.0, x_max - x_min) return tf.squeeze(heights * widths, axis=axis)
6c0e791b021d5196651c0b1830019f23840596fd
35,543
def _MatrixTriangularSolveGrad(op, grad): """Gradient for MatrixTriangularSolve.""" a = op.inputs[0] adjoint_a = op.get_attr("adjoint") lower_a = op.get_attr("lower") c = op.outputs[0] grad_b = linalg_ops.matrix_triangular_solve( a, grad, lower=lower_a, adjoint=not adjoint_a) if adjoint_a: grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True) else: grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True) if lower_a: grad_a = array_ops.matrix_band_part(grad_a, -1, 0) else: grad_a = array_ops.matrix_band_part(grad_a, 0, -1) return (grad_a, grad_b)
3faac87370dd9dfa589174976c7dd6bed76e4628
35,544
from typing import Dict from typing import Any def list_persons_command(client: Client, args: Dict[str, Any]) -> CommandResults: """Get persons list from TOPdesk. Args: client: The client to preform command on. args: The arguments of the persons command. Return CommadResults of list of persons. """ persons = client.get_list_with_query(list_type="persons", start=args.get('start', None), page_size=args.get('page_size', None), query=args.get('query', None)) if len(persons) == 0: return CommandResults(readable_output='No persons found') headers = ['Id', 'Name', 'Telephone', 'JobTitle', 'Department', 'City', 'BranchName', 'Room'] readable_persons = [] for person in persons: readable_person = { 'Id': person.get('id', None), 'Name': person.get('dynamicName', None), 'Telephone': person.get('phoneNumber', None), 'JobTitle': person.get('jobTitle', None), 'Department': person.get('department', None), 'City': person.get('city', None), 'BranchName': person.get('branch', {}).get('name', None) if person.get('branch') else None, 'Room': person.get('location', {}).get('room', None) if person.get('location') else None } readable_persons.append(readable_person) readable_output = tableToMarkdown(f'{INTEGRATION_NAME} persons', readable_persons, headers=headers, removeNull=True) return CommandResults( readable_output=readable_output, outputs_prefix=f'{INTEGRATION_NAME}.Person', outputs_key_field='Id', outputs=capitalize_for_outputs(persons), raw_response=persons )
8fb8c1065b433e7821fa46d64332c2238d502f7f
35,545
def group_user_delete(user, group): """Delete an user from a certain group""" if not pagure_config.get("ENABLE_USER_MNGT", True): flask.abort(404) if not pagure_config.get("ENABLE_GROUP_MNGT", False): flask.abort(404) form = pagure.forms.ConfirmationForm() if form.validate_on_submit(): try: pagure.lib.query.delete_user_of_group( flask.g.session, username=user, groupname=group, user=flask.g.fas_user.username, is_admin=pagure.utils.is_admin(), ) flask.g.session.commit() pagure.lib.git.generate_gitolite_acls(project=None, group=group) flask.flash( "User `%s` removed from the group `%s`" % (user, group) ) except pagure.exceptions.PagureException as err: flask.g.session.rollback() flask.flash("%s" % err, "error") return flask.redirect( flask.url_for("ui_ns.view_group", group=group) ) except SQLAlchemyError: # pragma: no cover flask.g.session.rollback() flask.flash( "Could not remove user `%s` from the group `%s`." % (user.user, group), "error", ) _log.exception( "Could not remove user `%s` from the group `%s`." % (user.user, group) ) return flask.redirect(flask.url_for("ui_ns.view_group", group=group))
bc813bb31aed7798a59b117f7e1fb984f96dfc3b
35,546
def repair_follow_organization_view(request): """ Process the new or edit organization forms :param request: :return: """ # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer authority_required = {'verified_volunteer'} if not voter_has_authority(request, authority_required): return redirect_to_sign_in_page(request, authority_required) follow_organization_entries_updated = 0 follow_organization_entries_not_updated = 0 google_civic_election_id = request.GET.get('google_civic_election_id', 0) state_code = request.GET.get('state_code', "") # find entries without a voter_linked_organization_we_vote_id follow_organization_list = [] try: organization_query = FollowOrganization.objects.all() organization_query = organization_query.filter( (Q(voter_linked_organization_we_vote_id__isnull=True) | Q(voter_linked_organization_we_vote_id='')) ) follow_organization_list = list(organization_query) except Exception as e: pass voter_manager = VoterManager() for follow_organization in follow_organization_list: voter_linked_organization_we_vote_id = \ voter_manager.fetch_linked_organization_we_vote_id_from_local_id(follow_organization.voter_id) if positive_value_exists(voter_linked_organization_we_vote_id): try: follow_organization.voter_linked_organization_we_vote_id = voter_linked_organization_we_vote_id follow_organization.save() follow_organization_entries_updated += 1 except Exception as e: follow_organization_entries_not_updated += 1 return HttpResponseRedirect(reverse('organization:organization_list', args=()))
38a78fdc21ab6c9b0a06e8447462fe5580e3df1c
35,547
def vect3_add(v1, v2): """ Adds two 3d vectors. v1, v2 (3-tuple): 3d vectors return (3-tuple): 3d vector """ return (v1[0]+v2[0], v1[1]+v2[1], v1[2]+v2[2])
b43fde71f0cc5e927879a2b6942c60de8ac6cc79
35,548
import multiprocessing def get_task_pool(thread=False): """ Get a new task pool, which is either a single-thread or process pool depending on the current config. Returns: A new :class:`multiprocessing.pool.Pool` instance. """ if thread or not config['app_multicore'] or config['enable_backtest'] and config['backtest_multicore']: task_pool = multiprocessing.pool.ThreadPool(processes=1) else: processes = config['backtest_processes'] if config['enable_backtest'] else config['app_processes'] task_pool = multiprocessing.Pool(processes=processes) task_pool.daemon = True return task_pool
515ae5c2ac167df1eb0f4ba2c3d4795a9830e3eb
35,550
def get_date(article: ElementTree) -> int: """ Extracts the year of the article. If ArticleDate exist use its year otherwise use the year form JournalIssue """ d = article.find("ArticleDate") if d is not None: return int(d.find("Year").text) d = article.find("Journal").find("JournalIssue").find("PubDate") y = d.find("Year") if y is not None: return int(y.text) return int(d.find("MedlineDate").text.split(" ")[0].split("-")[0])
3ca2f13d234df411e904195d9d1c08534ada1cea
35,551
def split(column, pattern=''): """ Splits str around pattern (pattern is a regular expression) > pattern is a string representation of the regular expression """ return _with_expr(exprs.StringSplit, column, pattern)
d091a5e89adf0d5de994b9a11c1c856cea5a8494
35,552
def precut(layers, links, all_terms, user_info): """ This function cuts terms in layers if they do not exist inside the accuracy file of model 1. It also cuts all links if one of the terms inside does not exist inside the accuracy file of model 1. Finaly it cuts all terms taht do not exist inside the accuracy file of model 1. :return: Cut layers, links (edges) and terms are returned. """ new_layers = [] for layer in layers: new_layer = [] for node in layer: if node in user_info: new_layer.append(node) if len(new_layer) != 0: new_layers.append(new_layer) else: new_layer = [] new_links = set() for link in links: if link[0] in user_info and link[1] in user_info: new_links.add(link) new_all_terms = {} for term in all_terms: if term in user_info: new_all_terms[term] = all_terms[term] return new_layers, new_links, new_all_terms
cf04eec77d01ad931f7654a3743baaf51aad53fa
35,554
def computeMeanStd_binned_old( inDatas, valCol, binCol, binMin, binMax, binCount ): """Compute binned stats for a set of tables""" sums = np.zeros( binCount ) sumsSq = np.zeros_like( sums ) counts = np.zeros_like( sums ) bins = np.linspace( binMin, binMax, binCount+1 ) binSize = ( binMax - binMin ) / binCount for d_idx, d in enumerate( inDatas ): dbg( 'd_idx d binSize' ) dbg( 'd[binCol]' ) for i in range( binCount ): binBot = bins[i] binTop = bins[i+1] dbg( 'binBot binTop' ) # theIdx = ( (binTop - d[ binCol ]) < binSize ) & ( ( binTop - d[ binCol ] ) > 0 ) theIdx = ( binBot < d[ binCol ].values ) & ( d[ binCol ].values <= binTop ) dbg( 'binBot binTop' ) DotData( names = ('rows',), Columns = theIdx.nonzero() ).saveToSV( 'nz%02d.tsv' % i ) #rowsStr = ','.join(map(str,list(theIdx.nonzero()))) #print 'binnedRows=', rowsStr hereVals = d[ theIdx ][ valCol ] DotData( names = ( 'temp', ), Columns = ( hereVals, ) ).saveToSV( 'temp2%2d.tsv' % i ) dbg( '"BEF" theIdx.sum() i bins[i] bins[i+1] len(hereVals)' ) counts[i] += len( hereVals ) sums[i] += np.sum( hereVals ) sumsSq[i] += np.sum( hereVals * hereVals ) dbg( '"AFT" i bins[i] bins[i+1] len(hereVals)' ) if False: # fast version binsHere = np.digitize( d[ binCol ], bins ) - 1 dbg( 'len(binsHere) binsHere' ) np.clip( binsHere, 0, binCount-1, out = binsHere ); dbg( 'binsHere' ) counts += np.bincount( binsHere, minlength = binCount ) sums += np.bincount( binsHere, weights = d[ valCol ], minlength = binCount ) sumsSq += np.bincount( binsHere, weights = d[ valCol ] * d[ valCol ], minlength = binCount ) countsOrig = counts.astype( int ) counts[ counts == 0 ] = np.nan means = sums / counts stds = sumsSq / counts - means * means return pd.DataFrame( dict( binBeg = bins[:-1], binEnd = bins[1:], counts = countsOrig, sums = sums, sumsSq = sumsSq, means = means, stds = stds ) )
195fc421b3091725e629e05e27c64a862c7fc31b
35,555
def Compute7(surface, multiple=False): """ Computes an AreaMassProperties for a surface. Args: surface (Surface): Surface to measure. Returns: AreaMassProperties: The AreaMassProperties for the given Surface or None on failure. """ url = "rhino/geometry/areamassproperties/compute-surface" if multiple: url += "?multiple=true" args = [surface] if multiple: args = [[item] for item in surface] response = Util.ComputeFetch(url, args) return response
f844f43b372ac2c8f124c27e346f9805583c3240
35,556
from typing import Optional from typing import Union def instantiate(config: Optional[Union[str, ClassDescription]], *args, **extra_kwargs): """Instantiates class given by `config.cls` with optional args given by `config.args` """ try: if config is None: return None elif type(config) is str: cls = _locate(config) return cls() else: cls = _locate(config['cls']) kwargs = config.get('args', dict()) return cls( *args, **extra_kwargs, **kwargs ) except: logger.exception(f"Could not instantiate from config {config}") raise
c0f8f951212bb157b4ee8805615f83bad82db8e2
35,557
async def get_hm_generic_entity( central_unit: CentralUnit, address: str, parameter: str ) -> GenericEntity | None: """Return the hm generic_entity.""" hm_device = get_hm_device(central_unit, address) assert hm_device hm_entity = hm_device.entities.get((address, parameter)) assert hm_entity return hm_entity
25f2845951d7ab34155b7ee2347a8438b115a1eb
35,558
def dists2corners_numpy(a): """ :param a: dist ndarray, shape = (*, h, w, 4=(t, r, b, l)) :return a: Box ndarray, shape is (*, h, w, 4=(xmin, ymin, xmax, ymax)) """ assert a.ndim >= 3, 'must be greater than 3d' h, w, _ = a.shape[-3:] # shape = (*, h, w, 4=(xmin, ymin, xmax, ymax)) ret = np.zeros_like(a) widths, heights = np.meshgrid(np.arange(w), np.arange(h)) # shape = (h, w, 1) widths, rights, lefts = np.broadcast_arrays(widths, a[..., 1], a[..., 3]) heights, tops, bottoms = np.broadcast_arrays(heights, a[..., 0], a[..., 2]) xmin = np.expand_dims(widths - lefts, axis=-1) # xmin ymin = np.expand_dims(heights - tops, axis=-1) # ymin xmax = np.expand_dims(widths + rights, axis=-1) # xmax ymax = np.expand_dims(heights + bottoms, axis=-1) # ymax ret[..., ::2] = np.clip(np.concatenate((xmin, xmax), axis=-1), 0, w) ret[..., 1::2] = np.clip(np.concatenate((ymin, ymax), axis=-1), 0, h) return ret
3f20df4cb3bc8ab1f1a595e64514e7b722fd07a1
35,559
def cryptowatch_ohlc_data_for_pair(Pair): """gets ohlc data from cryptowatch, and return a dict of dataframes""" exchanges = list(Pair.markets) data = {} for ex in exchanges: data[ex] = cryptowatch_ohlc(Pair.name, ex, Pair.period, Pair.start, Pair.end) return data
4f482ae1575d68e193321db0c2bd7346d4bf0950
35,560
from io import StringIO def to_string_stream(x): """ For modules that require a encoding as ``str`` in both Python 2 and Python 3, we can't just encode automatically. """ if PY2 and isinstance(x, text_type): x = x.encode('utf-8') return StringIO(x)
8fd36bab988410881a560d27d4e8d54d36f58634
35,561
def run_epoch(model, data, optimizer): """ Run a train and validation epoch and return average bpd for each. """ traindata, valdata = data model.train() train_bpd = epoch_iter(model, traindata, optimizer) model.eval() val_bpd = epoch_iter(model, valdata, optimizer) return train_bpd, val_bpd
5f441470347818e1c4f0fcdd67467100920bd6a0
35,563
def special_loss(logits, labels): """ This loss (and the rest of the training procedure) was taken from Philipp Kraehenbuehl's code. """ mask = labels != 255 labels1 = tf.clip_by_value(labels, 0, 1) lz = tf.nn.softplus(-tf.abs(logits)) * mask return tf.reduce_sum(lz + (tf.to_float(logits > 0) - labels1) * logits * mask, 1)
2dd1d3c967cf653a719ff993ff0b2de2d90404e2
35,564
def tobs(): """Return a list of all tobs for the most active station as a list of JSON""" # Query most active station station_list = session.query(Measurement.station, func.count('*')).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all() mostactive=station_list[0][0] # Query the last 12 months of temperature observation data for this station and plot the results as a histogram mostactive_latestdate = session.query(Measurement).filter(Measurement.station==mostactive).order_by(Measurement.date.desc()).limit(1) for value in mostactive_latestdate: latestdate = value.date #Converting the date to from str format to date format to calculate the date 1 year ago converted_activedate = dt.datetime.strptime(latestdate, "%Y-%m-%d") # Calculate the date 1 year ago from the last data point of the most active station in the database date_one_activeyear = converted_activedate - dt.timedelta(days=365) temp_data = session.query(Measurement.tobs).filter(Measurement.station==mostactive).filter(Measurement.date >= date_one_activeyear).order_by(Measurement.date).all() # Create a list from the row data all_tobs = [] for tobs in temp_data: tobs_dict = {} tobs_dict["tobs"] = tobs all_tobs.append(tobs_dict) return jsonify(all_tobs)
ad7461c3dacf715fee38a4fcdc292e58b2c48d2a
35,565
from scipy.special import lambertw def prox_max_entropy(X, step, gamma=1, type="relative"): """Proximal operator for maximum entropy regularization. g(x) = gamma sum_i x_i ln(x_i) has the analytical solution of gamma W(1/gamma exp((X-gamma)/gamma)), where W is the Lambert W function. If type == 'relative', the penalty is expressed in units of the function value; if type == 'absolute', it's expressed in units of the variable `X`. """ assert type in ["relative", "absolute"] if type == "relative": gamma_ = _step_gamma(step, gamma) else: gamma_ = gamma # minimize entropy: return gamma_ * np.real(lambertw(np.exp((X - gamma_) / gamma_) / gamma_)) above = X > 0 X[above] = gamma_ * np.real(lambertw(np.exp(X[above] / gamma_ - 1) / gamma_)) return X
7d5f1525ad1b7c413bad03b3aeba7b0af2e163ee
35,567
def phase_vocode(audio_data: np.ndarray, speed: float) -> np.ndarray: """Applies phase vocoding to a 'np.ndarray' representing WAV data.""" reader = ArrayReader(audio_data.transpose()) writer = ArrayWriter(reader.channels) phasevocoder(reader.channels, speed=speed).run(reader, writer) return writer.data.transpose()
e6d4d7e874c3f18c2c2a065b03d04d5e11d1963e
35,568
def partion_data_in_two(dataset, dataset_labels, in_sample_labels, oos_labels): """Partition dataset into in-distribution and OODs by labels. Args: dataset: the text from text_to_rank dataset_labels: dataset labels in_sample_labels: a list of newsgroups which the network will/did train on oos_labels: the complement of in_sample_labels; these newsgroups the network has never seen Returns: the dataset partitioned into in_sample_examples, in_sample_labels, oos_examples, and oos_labels in that order """ _dataset = dataset[:] # aliasing safeguard _dataset_labels = dataset_labels in_sample_idxs = np.zeros(np.shape(_dataset_labels), dtype=bool) ones_vec = np.ones(np.shape(_dataset_labels), dtype=int) for label in in_sample_labels: in_sample_idxs = np.logical_or(in_sample_idxs, _dataset_labels == label * ones_vec) oos_sample_idxs = np.zeros(np.shape(_dataset_labels), dtype=bool) for label in oos_labels: oos_sample_idxs = np.logical_or(oos_sample_idxs, _dataset_labels == label * ones_vec) return _dataset[in_sample_idxs], _dataset_labels[in_sample_idxs], _dataset[ oos_sample_idxs], _dataset_labels[oos_sample_idxs]
df3eda7d64c9060c6a8f1d45dbf5a0d5cd3c2436
35,570
def check_and_update_generation_args(args): """ checks all generation commandline arguments. Since these arguments are all lists and shorthand can be used, we expand them to match the expected length for instance, [1.0] becomes [1.0 1.0] if all other generation arguments are of length 2 """ hyperparameters = ['num_outputs', 'temperature', 'top_k', 'top_p', 'repetition_penalty', 'num_beams', 'num_beam_groups', 'diversity_penalty', 'no_repeat_ngram_size'] max_hyperparameter_len = max([len(getattr(args, h)) for h in hyperparameters]) valid_len = [1, max_hyperparameter_len] for h in hyperparameters: if (len(getattr(args, h)) not in valid_len): logger.error('Hyperparameters should either have the same number of values as others or have exactly one value.') # If only one value is provided, use the same value for all samples setattr(args, h, getattr(args, h) * (max_hyperparameter_len // len(getattr(args, h)))) logger.info('Will output %d sequences for each input.', sum(args.num_outputs)) return args
3d569b70b6ea4651af9cef0b7eafada1352b54a1
35,571
def category_condition_disable(request, structure_slug, category_slug, condition_id, structure): """ Disables a condition from a category :type structure_slug: String :type category_slug: String :type condition_id: Integer :type structure: OrganizationalStructure (from @is_manager) :param structure_slug: structure slug :param category_slug: category slug :param condition_id: condition id :param structure: structure object (from @is_manager) :return: render """ category = get_object_or_404(TicketCategory, organizational_structure=structure, slug=category_slug) condition = get_object_or_404(TicketCategoryCondition, pk=condition_id, category=category) if condition.is_active: condition.is_active = False condition.save(update_fields = ['is_active']) messages.add_message(request, messages.SUCCESS, _("Clausola {} disattivata con successo").format(condition)) # log action logger.info('[{}] manager of structure {}' ' {} disabled a condition' ' for category {}'.format(timezone.localtime(), structure, request.user, category)) else: messages.add_message(request, messages.ERROR, _("Clausola {} già disattivata").format(condition)) return redirect('uni_ticket:manager_category_detail', structure_slug=structure_slug, category_slug=category_slug)
87457c5b851bc791ec0e28d01100084948b9b58b
35,572
def stitch_image_pair(img_a, img_b, stitch_direc): """Function to stitch image B to image A in the mentioned direction Args: img_a (numpy array): of shape (H, W, C) with opencv representation of image A (i.e C: B,G,R) img_b (numpy array): of shape (H, W, C) with opencv representation of image B (i.e C: B,G,R) stitch_direc (int): 0 for vertical and 1 for horizontal stitching Returns: stitched_image (numpy array): stitched image with maximum content of image A and image B after cropping to remove the black space """ img_a_gray = cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY) img_b_gray = cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY) matches_a, matches_b = get_matches(img_a_gray, img_b_gray, num_keypoints=1000, threshold=0.8) h_mat = compute_homography_ransac(matches_a, matches_b) if stitch_direc == 0: canvas = cv2.warpPerspective(img_b, h_mat, (img_a.shape[1], img_a.shape[0] + img_b.shape[0])) canvas[0:img_a.shape[0], :, :] = img_a[:, :, :] x_start, y_start, x_end, y_end = get_crop_points(h_mat, img_a, img_b, 0) else: canvas = cv2.warpPerspective(img_b, h_mat, (img_a.shape[1] + img_b.shape[1], img_a.shape[0])) canvas[:, 0:img_a.shape[1], :] = img_a[:, :, :] x_start, y_start, x_end, y_end = get_crop_points(h_mat, img_a, img_b, 1) stitched_img = canvas[y_start:y_end,x_start:x_end,:] return stitched_img
cc8143a3e9ebdae9a01c79a31a8fb7753bf0e2ee
35,573
def as_composite(identifier: str) -> str: """ Translate the identifier of a mapry composite to a composite name in Python. :param identifier: mapry identifier of a composite :return: translated to a Python identifier of a composite >>> as_composite(identifier='Some_URL_class') 'SomeURLClass' """ return mapry.naming.ucamel_case(identifier=identifier)
9e08c59acc60fbdb4ca4667f9d6b06502d521083
35,574
def find_saturation(freq, saturation_values, attenuation): """ Return a saturation value based on the frequency and amount of attenuation :param int freq: the freq of interest, in Hz :param saturation_values: a dict containing the saturation values (keys are frequencies) :param attenuation: the amount of attenuation applied :returns: the closest saturation value corresponding to the frequency """ sat_freqs = saturation_values.keys() closest_index = np.abs(np.subtract(sat_freqs, freq)).argmin() closest_freq = sat_freqs[closest_index] next_freq = sat_freqs[min(closest_index + 1, len(sat_freqs) - 1)] freq_diff = (next_freq - closest_freq) if freq_diff == 0: saturation = saturation_values[closest_freq] else: variance = abs(freq - closest_freq) / freq_diff closest_sat = saturation_values[closest_freq] saturation = closest_sat + abs(closest_sat - saturation_values[next_freq]) * variance saturation += attenuation return saturation
a7f54ef3d80d0c1fbb5e51172812f2e4088dae1e
35,575
def lstm_temporal(x, h0, Wx, Wh, b): """ Forward pass for an LSTM over an entire sequence of data. We assume an input sequence composed of T vectors, each of dimension D. The LSTM uses a hidden size of H, and we work over a minibatch containing N sequences. After running the LSTM forward, we return the hidden states for all timesteps. Note that the initial cell state is passed as input, but the initial cell state is set to zero. Also note that the cell state is not returned; it is an internal variable to the LSTM and is not accessed from outside. Inputs: - x: Input data of shape (N, T, D) - h0: Initial hidden state of shape (N, H) - Wx: Weights for input-to-hidden connections, of shape (D, 4H) - Wh: Weights for hidden-to-hidden connections, of shape (H, 4H) - b: Biases of shape (4H,) Returns a tuple of: - h: Hidden states for all timesteps of all sequences, of shape (N, T, H) """ N, T, _ = x.shape _, H = h0.shape c = np.zeros([N, 0, H]) h = np.zeros([N, 0, H]) for t in range(T): h_step, c_step = lstm_step(x[:, t, :], h[:, t - 1, :] if t > 0 else h0, c[:, t - 1, :] if t > 0 else np.zeros((N, H)), Wx, Wh, b) # pylint: disable=line-too-long h_step = h_step.reshape(N, 1, H) c_step = c_step.reshape(N, 1, H) h = np.append(h, h_step, axis=1) c = np.append(c, c_step, axis=1) return h
04cd724d1fb7a996986b97d33b5a38ecefaf8185
35,576
from numpy.distutils.cpuinfo import cpu import psutil from typing import OrderedDict import platform def get_info_hardware(): """Create a dictionary for CPU information.""" def _cpu_freq(): """psutil can return `None` sometimes, esp. in Travis.""" func = "psutil.cpu_freq: " try: hz = psutil.cpu_freq() except IOError: return (func + "IOError",) * 3 # See psutil issue #1071 except AttributeError: return (func + "AttributeError",) * 3 # See psutil issue #1006 except NotImplementedError: return ( func + "NotImplementedError", ) * 3 # on occigen (cluster cines) if hz is None: return (func + "None",) * 3 # See psutil issue #981 else: ret = [] for h in hz: try: h = f"{h:.3f}" except TypeError: pass ret.append(h) return ret try: # Keys are specific to Linux distributions only info_hw = filter_modify_dict( cpu.info[0], [ "uname_m", "address sizes", "bogomips", "cache size", "model name", "cpu cores", "siblings", ], [ "arch", "address_sizes", "bogomips", "cache_size", "cpu_name", "nb_cores", "nb_siblings", ], ) info_hw["cpu_MHz_actual"] = [] for d in cpu.info: info_hw["cpu_MHz_actual"].append(float(d["cpu MHz"])) except KeyError as e: print("KeyError with", e) info_hw = OrderedDict() hz_current, hz_min, hz_max = _cpu_freq() info_hw_alt = OrderedDict( ( ("arch", platform.machine()), ("cpu_name", platform.processor()), ("nb_procs", psutil.cpu_count()), ("cpu_MHz_current", hz_current), ("cpu_MHz_min", hz_min), ("cpu_MHz_max", hz_max), ) ) info_hw = update_dict(info_hw, info_hw_alt) return info_hw
33d6dc139218d1e45907bb58100cdc95300d8785
35,577
from pathlib import Path def read_main_results_file(): """Return a Series where each row is one of the COMSOL Main results""" results_filepath = Path(MAIN_RESULTS_FILENAME) results_series = pd.read_table(results_filepath, sep=" ", squeeze=True, index_col=0, header=None) results_series.index.name = None results_series.name = "COMSOL Main Results" return results_series
b5f66df57cd89763af2d95fb22039c49a71f22c5
35,578
def games_per_time_period(df, days_per_period=7): """Return list of describing n matches played each time period in days, from start to end of dataframe""" # define start and end dates to observe start = df['date'].min() # end is specified as 6 days after recorded final date of play, so pd.date_range behaves as we want for players with under 7 days of play end = pd.to_datetime(df['date'].max()) end = end.strftime('%Y-%m-%d') # back to string # list end dates of each time period starting from first recorded day dates = [date.strftime('%Y-%m-%d') # format all pandas datetime values as string for date in pd.date_range(start=start, end=end)] # periodise between dates by specified unit # get match counts per time period counts = [] periods = [] while len(dates) > 0: start = dates[0] end = dates[days_per_period - 1] if len(dates) >= days_per_period else dates[len(dates) - 1] period = df[(df['date'] >= start) & (df['date'] <= end)] count = len(period) periods.append((start, end)) counts.append(count) del dates[:days_per_period] return counts
e0e327b8cdc8b04f2fde826cef90585e7e7be4cb
35,581
def get_image_dims(image_data, check_is_rgb=False): """Decodes image and return its height and width. Args: image_data: Bytes data representing encoded image. check_is_rgb: Whether to check encoded image is RGB. Returns: Decoded image size as a tuple of (height, width) Raises: ValueError: If check_is_rgb is set and input image has other format. """ image = read_image(image_data) if check_is_rgb and image.mode != 'RGB': raise ValueError('Expects RGB image data, gets mode: %s' % image.mode) width, height = image.size return height, width
745ff6dccc5c7ebb6052abc142e0b8a0ceda269a
35,582
def open_mic_stream(pa, device_index, device_name): """ Open microphone stream from first best microphone device. """ if not device_index and device_name: device_index = find_input_device(device_name) stream = pa.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, input_device_index=device_index, frames_per_buffer=INPUT_FRAMES_PER_BLOCK) return stream
8e69211e4aab87ca684a5ebf8ba222e2551f6734
35,583
def color_threshold(img): """ RGB color space threshold :param img: Undistorted image :return: Thresholded binary image Ref: Course notes """ yellow = select_yellow(img) white = select_white(img) combined_binary = np.zeros_like(yellow) combined_binary[(yellow >= 1) | (white >= 1)] = 1 return combined_binary
f11ddf7e452d7b8891562500819e41f9ea30bf0f
35,584
def perturb_box(box, min_iou=0.5, sigma_factor=0.1, rng=None): """ Perturb the input box by adding gaussian noise to the co-ordinates args: box - input box min_iou - minimum IoU overlap between input box and the perturbed box sigma_factor - amount of perturbation, relative to the box size. Can be either a single element, or a list of sigma_factors, in which case one of them will be uniformly sampled. Further, each of the sigma_factor element can be either a float, or a tensor of shape (4,) specifying the sigma_factor per co-ordinate returns: array - the perturbed box """ if rng is None: rng = np.random if isinstance(sigma_factor, list): # If list, sample one sigma_factor as current sigma factor c_sigma_factor = rng.choice(sigma_factor) else: c_sigma_factor = sigma_factor if not isinstance(c_sigma_factor, np.ndarray): c_sigma_factor = c_sigma_factor * np.ones(4) perturb_factor = np.sqrt(box[2] * box[3]) * c_sigma_factor # multiple tries to ensure that the perturbed box has iou > min_iou with the input box for i_ in range(100): c_x = box[0] + 0.5 * box[2] c_y = box[1] + 0.5 * box[3] c_x_per = rng.normal(c_x, perturb_factor[0]) c_y_per = rng.normal(c_y, perturb_factor[1]) w_per = rng.normal(box[2], perturb_factor[2]) h_per = rng.normal(box[3], perturb_factor[3]) if w_per <= 1: w_per = box[2] * rand_uniform(0.15, 0.5, rng)[0] if h_per <= 1: h_per = box[3] * rand_uniform(0.15, 0.5, rng)[0] box_per = np.round( np.array( [c_x_per - 0.5 * w_per, c_y_per - 0.5 * h_per, w_per, h_per])) if box_per[2] <= 1: box_per[2] = box[2] * rand_uniform(0.15, 0.5, rng) if box_per[3] <= 1: box_per[3] = box[3] * rand_uniform(0.15, 0.5, rng) box_iou = iou(np.reshape(box, (1, 4)), np.reshape(box_per, (1, 4))) # if there is sufficient overlap, return if box_iou > min_iou: return box_per, box_iou # else reduce the perturb factor perturb_factor *= 0.9 return box_per, box_iou
5ad15e766123f887e6cfc19b818efe4542e06148
35,585
def smootherstep(a, b, x): """Improved S-curve interpolation function. Based on reference implementation of the improved algorithm proposed by Ken Perlin that is available at https://en.wikipedia.org/wiki/Smoothstep """ x = clamp((x - a)/(b - a), 0.0, 1.0) return x*x*x*(x*(x*6 - 15) + 10);
6fa2e8694f2171f0e73aa19042aad2c69384a9e7
35,586
def _clean_conargs(**conargs): """Clean connection arguments""" conargs['metadata'] = [x.strip() for x in conargs['metadata'].split(',') if x.strip()] return conargs
f5942f750949ab674bd99778e79ea35c2d0bb775
35,587
import json def select_customer(): """Select customers from customer_name.""" cursor = mysql.cursor(MySQLdb.cursors.DictCursor) TRN = request.form['TRN'] cursor.execute('''SELECT * FROM Customer WHERE TRN=%s''', (TRN,)) data = cursor.fetchone() if data: return json.jsonify(data) return json.jsonify({})
8353a0d9bbcea731dd1ce0e2f031c22be5b673fe
35,588
def is_date(string): """ Returns whether the string can be interpreted as a date from using Python's dateutil.parser library. If it can parse, it will return true, and false otherwise. """ try: parse(string, fuzzy=False) return True except ValueError: return False
d07ad17c8d3a24b28e21b42c0907e6e2c281f8d2
35,589
def rosin_rammler(nbins, d50, md_total, sigma, rho_p, rho): """ This function is deprecated: Use psf.rosin_rammler() instead. Return the volume size distribution from the Rosin Rammler distribution Returns the fluid particle diameters in the selected number of bins on a volume basis from the Rosin Rammler distribution with parameters k = -ln(0.5) and alpha = 1.8. Parameters ---------- nbins : int Desired number of size bins in the particle volume size distribution d50 : float Volume mean particle diameter (m) md_total : float Total particle mass flux (kg/s) sigma : float Interfacial tension between the phase undergoing breakup and the ambient receiving continuous phase (N/m) rho_p : float Density of the phase undergoing breakup (kg/m^3) rho : float Density of the ambient receiving continuous phase (kg/m^3) Returns ------- de : ndarray Array of particle sizes at the center of each bin in the distribution (m) md : ndarray Total mass flux of particles corresponding to each bin (kg/s) Notes ----- This method computes the un-truncated volume size distribution from the Rosin-Rammler distribution and then enforces that all particle sizes be less than the maximum stable size by moving mass in larger sizes to the maximum stable size bin. References ---------- Johansen, Brandvik, and Farooq (2013), "Droplet breakup in subsea oil releases - Part 2: Predictions of droplet size distributions with and without injection of chemical dispersants." Marine Pollution Bulletin, 73: 327-335. doi:10.1016/j.marpolbul.2013.04.012. """ # Get the maximum stable size dmax = de_max(sigma, rho_p, rho) # Define the parameters of the distribution k = np.log(0.5) alpha = 1.8 de, V_frac = psf.rosin_rammler(nbins, d50, k, alpha) # Compute the mass fraction for each diameter md = V_frac * md_total # Truncate the distribution at the maximum stable droplet size imax = -1 for i in range(len(de)): if de[i] > dmax: if imax < 0: imax = i de[i] = dmax else: md[imax] += md[i] md[i] = 0. # Return the particle size distribution return (de, md)
3807c8c965caae492a3fecf6e803685cf7315710
35,591
def _get_layer(layer_idx, nn, for_pres): """ Returns a tuple representing the layer label. """ if nn.layer_labels[layer_idx] in ['ip', 'op']: fill_colour = _IPOP_FILLCOLOR elif nn.layer_labels[layer_idx] in ['softmax', 'linear']: fill_colour = _DECISION_FILLCOLOR else: fill_colour = _FILLCOLOR label = nn.get_layer_descr(layer_idx, for_pres) return (str(layer_idx), {'label': label, 'shape': 'rectangle', 'fillcolor': fill_colour, 'style': 'filled', 'fontname': _LAYER_FONT})
74ea378c5490eaf3c1e2cf7ab2413a55566725ea
35,592
from resistics.sampling import to_timedelta def inc_duration(win_size: int, olap_size: int, fs: float) -> RSTimeDelta: """ Get the increment between window start times If the overlap size = 0, then the time increment between windows is simply the window duration. However, when there is an overlap, the increment between window start times has to be adjusted by the overlap size Parameters ---------- win_size : int The window size in samples olap_size : int The overlap size in samples fs : float The sample frequency Hz Returns ------- RSTimeDelta The duration of the window Examples -------- >>> from resistics.window import inc_duration >>> increment = inc_duration(128, 32, 128) >>> print(increment) 0:00:00.75 >>> increment = inc_duration(128*3600, 128*60, 128) >>> print(increment) 0:59:00 """ return to_timedelta(1 / fs) * float(win_size - olap_size)
bdf754e3b1ace20cac1f99ef19dd04727594d710
35,593
def get_nearest(kd_node, point, dim, dist_func, return_distances=False, i=0, best=None): """ Find the closest neighbour of a point in a list of points using a KD-Tree. Based on a recipe from code.activestate.com """ if kd_node: dist = dist_func(point, kd_node[2]) dx = kd_node[2][i] - point[i] if not best: best = [dist, kd_node[2]] elif dist < best[0]: best[0], best[1] = dist, kd_node[2] i = (i + 1) % dim # Goes into the left branch, and then the right branch if needed get_nearest( kd_node[dx < 0], point, dim, dist_func, return_distances, i, best) if dx * dx < best[0]: get_nearest( kd_node[dx >= 0], point, dim, dist_func, return_distances, i, best) return best if return_distances else best[1]
480ac80404d16cb8ed4de9093a56326b15ad56b9
35,594
def plot_estimated_vs_simulated_edges( graph, sp_Graph, lrn=None, max_res_nodes=None, lamb=1.0 ): """Function to plot estimated vs simulated edge weights to look for significant deviations """ assert lamb >= 0.0, "lambda must be non-negative" assert type(lamb) == float, "lambda must be float" # both variables below are long range nodes but lrn is from the simulated and max_res_nodes is from the empirical assert type(lrn) == list, "lrn must be a list of int 2-tuples" assert type(max_res_nodes) == list, "max_res_nodes must be a list of int 2-tuples" # getting edges from the simulated graph idx = [list(graph.edges).index(val) for val in lrn] sim_edges = np.append(np.array([graph[val[0]][val[1]]["w"] for i, val in enumerate(graph.edges) if i not in idx]), np.array([graph[val[0]][val[1]]["w"] for i, val in enumerate(graph.edges) if i in idx])) idx = [list(sp_Graph.edges).index(val) for val in max_res_nodes] w_plot = np.append(sp_Graph.w[[i for i in range(len(sp_Graph.w)) if i not in idx]], sp_Graph.w[idx]) X = sm.add_constant(sim_edges) mod = sm.OLS(w_plot[range(len(graph.edges))], X) res = mod.fit() muhat, betahat = res.params # getting index of long range edges lre_idx = [list(graph.edges).index(val) for val in lrn] fig = plt.figure(dpi=100) ax = fig.add_subplot() ax.scatter(sim_edges, w_plot[range(len(sim_edges))], marker=".", alpha=1, zorder=0, color="grey", s=3) ax.scatter(sim_edges[-len(lrn)::], w_plot[-len(lrn)::], marker=".", alpha=1, zorder=0, color="black", s=10) x_ = np.linspace(np.min(sim_edges), np.max(sim_edges), 20) ax.plot(x_, muhat + betahat * x_, zorder=2, color="orange", linestyle='--', linewidth=1) ax.text(0.8, 0.05, "R²={:.4f}".format(res.rsquared), transform=ax.transAxes) ax.text(0.8, 0.15, "$\lambda$={:.3}".format(lamb), transform=ax.transAxes) ax.set_xlabel("simulated edge weights") ax.set_ylabel("estimated edge weights") return(None)
7921715ccedaa192f291b228e9b12963ae8789dd
35,595
def index_shape(geometry: Column, resolution: Column): """ Generate an H3 spatial index for an input GeoJSON geometry column. This function accepts GeoJSON `Point`, `LineString`, `Polygon`, `MultiPoint`, `MultiLineString`, and `MultiPolygon` input features, and returns the set of H3 cells at the specified resolution which completely cover them (could be more than one cell for a substantially large geometry and substantially granular resolution). The schema of the output column will be `T.ArrayType(T.StringType())`, where each value in the array is an H3 cell. This spatial index can then be used for bucketing, clustering, and joins in Spark via an `explode()` operation. """ return _index_shape(geometry, resolution)
a921f58eb31f24c3bbfb7b0b4e013b75ec2ad0f4
35,596
def mat_mul( input_tensor, weight_tensor, activation=None, activation_params=None, name="mat_mul"): """Compute a matrix multiplication for `input_tensor` and `weight_tensor`. Args: input_tensor: A 2D `Tensor`. Shaped as `NC`, where `N` is batch size and `C` is number of channels. weight_tensor: A 2D `Tensor`. Shaped as `NC` or `CN`, where `N` is number of neurons and `C` is the same as in `input_tensor`. activation/activation_params: Activation function to use (optional). name: Operator name (optional). """ input_tensor, weight_tensor = common.check_and_add_layout_transform( name=name, op=types_pb2.InnerProduct, input_tensors=[input_tensor, weight_tensor]) weight_layout = weight_tensor.shape.layout actIdx = 1 if weight_layout == types_pb2.NC else 0 neuronIdx = 0 if weight_layout == types_pb2.NC else 1 assert (len(input_tensor.shape.dims) == 2 and len(weight_tensor.shape.dims) == 2 and input_tensor.shape.dims[1] == weight_tensor.shape.dims[actIdx]) output_tensor_dims = [ input_tensor.shape.dims[0], weight_tensor.shape.dims[neuronIdx] ] params = node_pb2.Params() if activation is not None: params.act_params.CopyFrom( activation_ops.to_proto(activation, activation_params)) return common.add_node( name=name, op=types_pb2.InnerProduct, input_tensors=[input_tensor, weight_tensor], output_tensors_dims=[output_tensor_dims], output_tensor_layout=types_pb2.NC, params=params)[0]
f4b7755d6701438cd622837c976958dc511aa591
35,597
def get_events(wrapper, student: str = None, time: str = None) -> dict: """ list events """ start, end = get_dates(time) event_data = wrapper.get_events(start_date=start, end_date=end, login=student) return event_data
b682190334646889770c723693941da8c251db68
35,598
def merge_preclusters_ld(preclusters): """ Bundle together preclusters that satisfy the two criteria at the same time: * 1. gwas_snp of clusterA is within ld_snps of clusterB (in LD) * 2. (p-value of gwas_snp of clusterB * 10-3) <= p-value of gwas_snp of clusterA (<= p-value of gwas_snp of clusterB) Args: * [ Cluster ] Returntype: [ Cluster ] """ # sort preclusters by the order of p-value of GWAS SNPs, from lowest to highest preclusters.sort(key=lambda cluster: cluster.gwas_snps[0].pvalue) # create a vaiable for the status of each precluster whether it has been grouped with any other group_status = [False] * len(preclusters) merged_clusters = [] # clusterA - take its gwas_snp for checking for i,clusterA in enumerate(preclusters[ :-1]): # generate temp_cluster if group_status[i]: where_is_A = [n for n,cluster in enumerate(merged_clusters) if clusterA.gwas_snps[0].snp.rsID in [gwas_snp.snp.rsID for gwas_snp in cluster.gwas_snps]] assert len(where_is_A), 'clusterA should be in one place only in merged_clusters' temp_cluster = merged_clusters.pop(where_is_A[0]) else: temp_cluster = None # get the index of what to be merged with clusterA what_to_merge = [] # clusterB - take its ld_snps and p-value of gwas_snp for comparison for k,clusterB in enumerate(preclusters[i + 1: ]): k = k + i + 1 # two have the posibility to be merged only when they are from the same chromosome if clusterB.ld_snps[0].chrom == clusterA.ld_snps[0].chrom: # first, check p-value if clusterB.gwas_snps[0].pvalue * 1e-3 <= clusterA.gwas_snps[0].pvalue: # secondly, check ld_snps if clusterA.gwas_snps[0].snp.rsID in [ld_snp.rsID for ld_snp in clusterB.ld_snps]: # thirdly, check whether in temp_cluster if temp_cluster is None: what_to_merge += [k] else: if clusterB.gwas_snps[0].snp.rsID not in [gwas_snp.snp.rsID for gwas_snp in temp_cluster.gwas_snps]: what_to_merge += [k] # no more following clusters will meet the criterion, so break out of clusterB's for-loop else: break # if nothing to merge, put temp_cluster back if len(what_to_merge) == 0: if temp_cluster is not None: merged_clusters.append(temp_cluster) # if something to merge, do merge_clusters else: clusters_to_merge = [] # for any has been merged with clusters other than clusterA, get that merged one and merge it with clusterA if any(group_status[m] for m in what_to_merge): for n in set(n for m in what_to_merge if group_status[m] for n,cluster in enumerate(merged_clusters) if preclusters[m].gwas_snps[0].snp.rsID in [gwas_snp.snp.rsID for gwas_snp in cluster.gwas_snps]): clusters_to_merge.append(merged_clusters.pop(n)) # for any has not been merged with others, get it to merge with clusterA for m in what_to_merge: if not group_status[m]: clusters_to_merge.append(preclusters[m]) temp_cluster = merge_clusters([clusterA if temp_cluster is None else temp_cluster] + clusters_to_merge) merged_clusters.append(temp_cluster) # update the status of these clusters after merging for m in [i] + what_to_merge: group_status[m] = True # add those ungrouped clusters into merged_clusters clusters = merged_clusters + [preclusters[n] for n,s in enumerate(group_status) if not s] return clusters
64b0a82181df9afa352a796d4943b8392bb742d7
35,599
def construct_system(sp, scale=1.0): """Construct systems according to job statepoint. Parameters ---------- sp: dict (from job.sp) Dictionary contains information necessary to construct a system. Stored as state of job. The dictionary should resemble: {"molecule": str, "engine": str, "replica": int, "temperature": float (in K), "pressure": float (in kPa), "ensemble": str, "N_liquid": int, "N_vap": int, "box_L_liq": int (nm), "box_L_vap", int (nm), "init_liq_den": float (g/cm3), "init_vap_den": float (g/cm3), "mass": float (g/mol), "forcefield_name": str, "cutoff_style": str, "r_cut": float (in nm)} scale : float, default 1.0 Scale factor by which to scale the box. Useful for system initialization if a shrink step makes equilibration easier. Returns ------- [filled_liq_box, filled_vap_box] Return list of system as specified. """ # Update this dict as new recipes are made molecule_dict = { "methaneUA": MethaneUA(), "pentaneUA": PentaneUA(), "benzeneUA": None, "waterSPC/E": WaterSPC(), "ethanolAA": None, } molecule = molecule_dict[sp["molecule"]] molecule.name = sp["molecule"] liq_box = mb.Box([sp["box_L_liq"] * scale] * 3) filled_liq_box = mb.fill_box( compound=[molecule], n_compounds=[sp["N_liquid"]], box=liq_box ) if sp["box_L_vap"] and sp["N_vap"]: vap_box = mb.Box([sp["box_L_vap"] * scale] * 3) filled_vap_box = mb.fill_box( compound=[molecule], n_compounds=[sp["N_vap"]], box=vap_box ) return [filled_liq_box, filled_vap_box] else: return [filled_liq_box, None]
c0f610b527d7d4873d72e46438a9a9b67931f379
35,600
def _tile_images(imgs, tile_shape, concatenated_image, margin_color=None): """Concatenate images whose sizes are same. @param imgs: image list which should be concatenated @param tile_shape: shape for which images should be concatenated @param concatenated_image: returned image. if it is None, new image will be created. """ x_num, y_num = tile_shape one_width = imgs[0].shape[1] one_height = imgs[0].shape[0] if concatenated_image is None: concatenated_image = np.zeros((one_height * y_num, one_width * x_num, 3), dtype=np.uint8) if margin_color is not None: concatenated_image[:, :] = margin_color for y in range(y_num): for x in range(x_num): i = x + y * x_num if i >= len(imgs): pass else: concatenated_image[y*one_height:(y+1)*one_height,x*one_width:(x+1)*one_width,] = imgs[i] return concatenated_image
0698245a08ce2115fe7154d07f8ccdfb228aca28
35,601
def true_positive_rate(y_true, y_pred): """Returns True Positive Rate. Wrapper function of metrics_K() Args: y_true (Tensorflow Tensor): Array of 'true' 0s and 1s y_pred (Tensorflow Tensor): Array of predicted 0s and 1s Returns: specificity_loss (Tensor): True Positive Rate ([0:1]) """ TP, TN, FP, FN, TPR, SPC, FPR, ACC, MCC = metrics_K(y_true, y_pred) return TPR
602138bec793f4c80dabf12db987fd75769920f6
35,602
def create_camera_widget(name): """Create a camera control widget""" obj = create_widget(name) if not obj.data.vertices: verts = [(0.275136, 0, -0.275136), (0.359483, 0, -0.148903), (0.389102, 0, 0), (0.359483, 0, 0.148903), (0.275136, 0, 0.275136), (0.148903, 0, 0.359483), (-1.94818e-07, 0, 0.389102), (-1.17505e-07, 0, -0.389102), (0.148903, 0, -0.359483), (0.663549, 0, -0.0936016), (0.663549, 0, 0.0936016), (0.497656, 0, 0.0936016), (0.497656, 0, -0.0936017), (0.663549, 0, 0.173501), (0.663549, 0, -0.173501), (0.875195, 0, 0), (-0.148903, 0, 0.359483), (-0.148903, 0, -0.359483), (-0.275136, 0, -0.275136), (-0.359483, 0, -0.148903), (-0.389102, 0, 0), (-0.359483, 0, 0.148903), (-0.275136, 0, 0.275136), (1.03426e-07, 0, 0.875195), (0.173502, 0, 0.663549), (-0.173501, 0, 0.663549), (0.0936017, 0, 0.497656), (-0.0936016, 0, 0.497656), (-0.0936016, 0, 0.663549), (0.0936017, 0, 0.663549), (-0.0936015, 0, -0.663549), (0.0936017, 0, -0.663549), (0.0936017, 0, -0.497656), (-0.0936016, 0, -0.497656), (0.173502, 0, -0.663549), (-0.173501, 0, -0.663549), (9.42269e-08, 0, -0.875195), (-0.875195, 0, 0), (-0.663549, 0, 0.173501), (-0.663549, 0, -0.173502), (-0.497656, 0, 0.0936015), (-0.497656, 0, -0.0936018), (-0.663549, 0, -0.0936018), (-0.663549, 0, 0.0936015), ] edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (7, 8), (0, 8), (10, 11), (9, 12), (11, 12), (10, 13), (9, 14), (13, 15), (14, 15), (16, 22), (17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (7, 17), (6, 16), (23, 24), (23, 25), (24, 29), (25, 28), (26, 29), (27, 28), (31, 32), (30, 33), (32, 33), (31, 34), (30, 35), (34, 36), (35, 36), (37, 38), (37, 39), (38, 43), (39, 42), (40, 41), (40, 43), (41, 42), (27, 26)] mesh = obj.data mesh.from_pydata(verts, edges, []) mesh.update() return obj
8cdac505c83a9a90bd6f7c4a415850a9d20d0646
35,603
def query_related(uri, filter=None, limit=20): """ Query for terms that are related to a term, or list of terms, according to the mini version of ConceptNet Numberbatch. """ if uri.startswith('/c/'): query = uri elif uri.startswith('/list/') and uri.count('/') >= 3: try: _, _list, language, termlist = uri.split('/', 3) query = [] term_pieces = termlist.split(',') for piece in term_pieces: if '@' in piece: term, weight = piece.split('@') weight = float(weight) else: term = piece weight = 1. query.append(('/c/{}/{}'.format(language, term), weight)) except ValueError: return error( {'@id': uri}, 400, "Couldn't parse this term list: %r" % uri ) else: return error( {'@id': uri}, 404, '%r is not something that I can find related terms to.' % uri ) found = VECTORS.similar_terms(query, filter=filter, limit=limit) related = [ {'@id': key, 'weight': round(float(weight), 3)} for (key, weight) in found.items() ] response = { '@id': uri, 'related': related } return response
18b2908bbe8559388b52697b515e8b1ad23bb3c5
35,604
import numpy def chain(pairs): """ Generate chain from location pairs. """ table, chain = [], [] for i in numpy.unique(numpy.array(pairs).flatten()): if chain == []: chain.append(i) value = i continue if i == value + 1: chain.append(i) value = i continue table.append(chain) chain = [] chain.append(i) value = i else: table.append(chain) return table
01c55e204d56cbef9e08bd509422846bb844c7fe
35,605