content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def bbox_from_points(points): """Construct a numeric list representing a bounding box from polygon coordinates in page representation.""" xys = [[int(p) for p in pair.split(',')] for pair in points.split(' ')] return bbox_from_polygon(xys)
75742907d85990ee3bbfa133d9ba51f70b3f76ee
11,535
def return_true(): """Return True Simple function used to check liveness of workers. """ return True
3c4b469ce28aef47723a911071f01bea9eb4cf27
11,536
def get_active_test_suite(): """ Returns the test suite that was last ran >>> get_active_test_suite() "Hello" """ return TEST_RUNNER_STATE.test_suite
dc578da283429480a872175ff1cd5462bf803925
11,538
def header_from_stream(stream, _magic=None) -> (dict, list, int): """ Parse SAM formatted header from stream. Dict of header values returned is structured as such: {Header tag:[ {Attribute tag: value}, ]}. Header tags can occur more than once and so each list item represents a different tag line. :param stream: Stream containing header data. :param _magic: Data consumed from stream while peeking. Will be prepended to read data. :return: Tuple containing (Dict of header values, list of Reference objects, placeholder to keep return value consistent with header_from_buffer()). """ header = defaultdict(list) while stream.peek(1)[0] == b'@': line = stream.readline() tag = line[1:2] if tag == b'CO': header[tag].append(line[4:]) else: header[tag].append({m[0]: m[1] for m in header_re.findall(line)}) return header, [bam.Reference(ref[b'SN'], int(ref[b'LN'])) for ref in header.pop(b'SQ')] if b'SQ' in header else [], 0
e0e071f38787950fa499344c63cc4040e5fccb23
11,539
def walk_binary_file_or_stdin(filepath, buffer_size = 32768): """ Yield 'buffer_size' bytes from filepath until EOF, or from standard input when 'filepath' is '-'. """ if filepath == '-': return walk_binary_stdin(buffer_size) else: return walk_binary_file(filepath, buffer_size)
290ea9e159c8f0e3df6713b8abcd0c141cb4858a
11,540
def register_device() -> device_pb2.DeviceResponse: """ Now that the client credentials are set, the device can be registered. The device is registered by instantiating an OauthService object and using the register() method. The OauthService requires a Config object and an ISecureCredentialStore object to be constructed. Once the register method is called, a DeviceResponse object will be returned. NOTE: This function will check if the device id set in config.json has already been registered. If it has, then the DeviceResponse for the existing registered device will be returned. Otherwise, the registration will proceed and the DeviceResponse for the new registration will be returned. Returns: A DeviceResponse object indicating whether or not the device registration was successful """ oauth_service: OauthService = helpers.get_oauth_service() if check_device_is_registered(): print( f"Registration already exists for device_id = {helpers.environment_config['device_id']}" ) device_response: device_pb2.DeviceResponse = oauth_service.get_who_am_i() else: print(f"Registering device_id = {helpers.environment_config['device_id']}") device_response: device_pb2.DeviceResponse = oauth_service.register( device_id=helpers.environment_config["device_id"], device_name=helpers.environment_config["device_name"], credential=helpers.environment_config["tenant_secret"], ) save_environment_config() return device_response
5a3958456f55315fa91be4e60324be3e5d9d3af8
11,541
def _sto_to_graph(agent: af.SubTaskOption) -> subgraph.Node: """Convert a `SubTaskOption` to a `Graph`.""" node_label = '{},{},{}'.format(agent.name or 'SubTask Option', agent.subtask.name or 'SubTask', agent.agent.name or 'Policy') return subgraph.Node(label=node_label, type='sub_task_option')
311f591be99bc045d2572b22f9cd3462bce2b10c
11,542
def filter_input(self, forced=False, context=None): """ Passes each hunk (file or code) to the 'input' methods of the compressor filters. """ content = [] for hunk in self.hunks(forced, context=context): content.append(hunk) return content
1ea0ac16cf1e20732ad8c37b6126c80fe94d2ee5
11,543
def delete(request, user): """ Deletes a poll """ poll_id = request.POST.get('poll_id') try: poll = Poll.objects.get(pk=poll_id) except: return JsonResponse({'error': 'Invalid poll_id'}, status=404) if poll.user.id != user.id: return JsonResponse({'error': 'You cannot delete this poll'}, status=403) poll.delete() return JsonResponse({'message': 'Poll was deleted'})
36a46e1b72cd06178ac00706c24451736fd454cd
11,544
def get_loader(path): """Gets the configuration loader for path according to file extension. Parameters: path: the path of a configuration file, including the filename extension. Returns the loader associated with path's extension within LOADERS. Throws an UnknownConfigurationException if no such loader exists. """ for ext, loader in LOADERS: fullext = '.' + ext if path[-len(fullext):] == fullext: return loader raise exception.UnknownConfigurationException, "No configuration loader found for path '%s'" % path
a122c67d6ebacf2943ec69765d5feab649f5c341
11,546
def mat_stretch(mat, target): """ Changes times of `mat` in-place so that it has the same average BPM and initial time as target. Returns `mat` changed in-place. """ in_times = mat[:, 1:3] out_times = target[:, 1:3] # normalize in [0, 1] in_times -= in_times.min() in_times /= in_times.max() # restretch new_start = out_times.min() in_times *= (out_times.max() - new_start) in_times += new_start return mat
204efb1d8a19c7efe0efb5710add62436a4b5cee
11,547
def parse_range(cpu_range): """Create cpu range object""" if '-' in cpu_range: [x, y] = cpu_range.split('-') # pylint: disable=invalid-name cpus = range(int(x), int(y)+1) if int(x) >= int(y): raise ValueError("incorrect cpu range: " + cpu_range) else: cpus = [int(cpu_range)] return cpus
51079648ffddbcba6a9699db2fc4c04c7c3e3202
11,548
def causal_parents(node, graph): """ Returns the nodes (string names) that are causal parents of the node (have the edge type "causes_or_promotes"), else returns empty list. Parameters node - name of the node (string) graph - networkx graph object """ node_causal_parents = [] if list(graph.predecessors(node)): possibleCausalParents = graph.predecessors(node) for possibleCausalParent in possibleCausalParents: if graph[possibleCausalParent][node]["type"] == "causes_or_promotes": node_causal_parents.append(possibleCausalParent) return node_causal_parents
4618e9649d3ea37c9a3a0d8faf7a44b00e386f1c
11,549
async def user_me(current_user=Depends(get_current_active_user)): """ Get own user """ return current_user
40c5bb5a45cad8154489db3fc0da3c0fe54d783d
11,551
from typing import Optional from typing import Tuple import crypt def get_password_hash(password: str, salt: Optional[str] = None) -> Tuple[str, str]: """Get user password hash.""" salt = salt or crypt.mksalt(crypt.METHOD_SHA256) return salt, crypt.crypt(password, salt)
ea3d7e0d8c65e23e40660b8921aa872dc9e2f53c
11,552
def start_at(gra, key): """ start a v-matrix at a specific atom Returns the started vmatrix, along with keys to atoms whose neighbors are missing from it """ symb_dct = atom_symbols(gra) ngb_keys_dct = atoms_sorted_neighbor_atom_keys( gra, symbs_first=('X', 'C',), symbs_last=('H',), ords_last=(0.1,)) ngb_keys = ngb_keys_dct[key] if not ngb_keys: zma_keys = [] elif len(ngb_keys) == 1: # Need special handling for atoms with only one neighbor if symb_dct[key] in ('H', 'X'): key2 = ngb_keys[0] zma_keys = (key2,) + ngb_keys_dct[key2] else: key2 = ngb_keys[0] ngb_keys = tuple(k for k in ngb_keys_dct[key2] if k != key) zma_keys = (key, key2) + ngb_keys else: zma_keys = (key,) + ngb_keys_dct[key] vma = () for row, key_ in enumerate(zma_keys): idx1 = idx2 = idx3 = None if row > 0: key1 = next(k for k in ngb_keys_dct[key_] if k in zma_keys[:row]) idx1 = zma_keys.index(key1) if row > 1: key2 = next(k for k in ngb_keys_dct[key1] if k in zma_keys[:row] and k != key_) idx2 = zma_keys.index(key2) if row > 2: key3 = next(k for k in zma_keys[:row] if k not in (key_, key1, key2)) idx3 = zma_keys.index(key3) sym = symb_dct[key_] key_row = [idx1, idx2, idx3] vma = automol.vmat.add_atom(vma, sym, key_row) return vma, zma_keys
baa4d463316d47611a696bea456f8e1d0e4b5755
11,553
def associate_kitti(detections, trackers, det_cates, iou_threshold, velocities, previous_obs, vdc_weight): """ @param detections: """ if (len(trackers) == 0): return np.empty((0, 2), dtype=int), np.arange(len(detections)), np.empty((0, 5), dtype=int) """ Cost from the velocity direction consistency """ Y, X = velocity_direction_batch(detections, previous_obs) inertia_Y, inertia_X = velocities[:, 0], velocities[:, 1] inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1) inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1) diff_angle_cos = inertia_X * X + inertia_Y * Y diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1) diff_angle = np.arccos(diff_angle_cos) diff_angle = (np.pi / 2.0 - np.abs(diff_angle)) / np.pi valid_mask = np.ones(previous_obs.shape[0]) valid_mask[np.where(previous_obs[:, 4] < 0)] = 0 valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1) scores = np.repeat(detections[:, -1][:, np.newaxis], trackers.shape[0], axis=1) angle_diff_cost = (valid_mask * diff_angle) * vdc_weight angle_diff_cost = angle_diff_cost.T angle_diff_cost = angle_diff_cost * scores """ Cost from IoU """ iou_matrix = iou_batch(detections, trackers) """ With multiple categories, generate the cost for catgory mismatch """ num_dets = detections.shape[0] num_trk = trackers.shape[0] cate_matrix = np.zeros((num_dets, num_trk)) for i in range(num_dets): for j in range(num_trk): if det_cates[i] != trackers[j, 4]: cate_matrix[i][j] = -1e6 cost_matrix = - iou_matrix - angle_diff_cost - cate_matrix if min(iou_matrix.shape) > 0: a = (iou_matrix > iou_threshold).astype(np.int32) if a.sum(1).max() == 1 and a.sum(0).max() == 1: matched_indices = np.stack(np.where(a), axis=1) else: matched_indices = linear_assignment(cost_matrix) else: matched_indices = np.empty(shape=(0, 2)) unmatched_detections = [] for d, det in enumerate(detections): if (d not in matched_indices[:, 0]): unmatched_detections.append(d) unmatched_trackers = [] for t, trk in enumerate(trackers): if (t not in matched_indices[:, 1]): unmatched_trackers.append(t) # filter out matched with low IOU matches = [] for m in matched_indices: if (iou_matrix[m[0], m[1]] < iou_threshold): unmatched_detections.append(m[0]) unmatched_trackers.append(m[1]) else: matches.append(m.reshape(1, 2)) if (len(matches) == 0): matches = np.empty((0, 2), dtype=int) else: matches = np.concatenate(matches, axis=0) return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
8f3fb7628940cacd68de4f93b8469e212a12d854
11,554
import json import uuid import time def seat_guest(self, speech, guest, timeout): """ Start the view 'seatGuest' :param speech: the text that will be use by the Local Manager for tablet and vocal :type speech: dict :param guest: name of the guest to seat :type guest: string :param timeout: maximum time to wait for a reaction from the local manager :type timeout: float """ goal = RequestToLocalManagerGoal(action="seatGuest", payload=json.dumps({ 'id': str(uuid.uuid4()), 'timestamp': time.time(), 'args': { 'speech': speech, 'guest': guest } })) return self._send_goal_and_wait(goal, timeout)
42a8ccd03638dfb48072c1d1b10c0c05a8f867ec
11,555
def retreive_retries_and_sqs_handler(task_id): """This function retrieve the number of retries and the SQS handler associated to an expired task Args: task_id(str): the id of the expired task Returns: rtype: dict Raises: ClientError: if DynamoDB query failed """ try: response = table.query( KeyConditionExpression=Key('task_id').eq(task_id) ) # CHeck if 1 and only 1 return response.get('Items')[0].get('retries'), response.get('Items')[0].get('sqs_handler_id') except ClientError as e: errlog.log("Cannot retreive retries and handler for task {} : {}".format(task_id, e)) raise e
c432d9f73f8d1de8fbcf48b35e41a2879ca25954
11,556
import torch def decompose(original_weights: torch.Tensor, mask, threshould: float) -> torch.Tensor: """ Calculate the scaling matrix. Use before pruning the current layer. [Inputs] original_weights: (N[i], N[i+1]) important_weights: (N[i], P[i+1]) [Outputs] scaling_matrix: (P[i+1], N[i+1]) """ important_weights = convert_to_important_weights(original_weights, mask) msglogger.info("important_weights", important_weights.size()) scaling_matrix = torch.zeros(important_weights.size()[-1], original_weights.size()[-1]) msglogger.info("scaling_matrix", scaling_matrix.size()) msglogger.info("original_weights", original_weights.size()) for i, weight in enumerate(original_weights.transpose(0, -1)): if weight in important_weights.transpose(0, -1): scaling_matrix[important_weights.transpose(0, -1) == weight][i] = 1 else: most_similar_neuron, similarity, scale = most_similar(weight, important_weights) most_similar_neuron_index_in_important_weights = important_weights == most_similar_neuron if similarity >= threshould: scaling_matrix[most_similar_neuron_index_in_important_weights][i] = scale return scaling_matrix
844562c839b95eb172197f22781f2316639b2d95
11,557
def calc_Kullback_Leibler_distance(dfi, dfj): """ Calculates the Kullback-Leibler distance of the two matrices. As defined in Aerts et al. (2003). Also called Mutual Information. Sort will be ascending. Epsilon is used here to avoid conditional code for checking that neither P nor Q is equal to 0. """ epsilon = 0.00001 P = dfi + epsilon Q = dfj + epsilon divergence = np.sum(P * np.log2(P / Q)) return divergence
fd66434557598717db7cc73ca9a88fde9ab7e73d
11,558
def test_python_java_classes(): """ Run Python tests against JPY test classes """ sub_env = {'PYTHONPATH': _build_dir()} log.info('Executing Python unit tests (against JPY test classes)...') return jpyutil._execute_python_scripts(python_java_jpy_tests, env=sub_env)
11a9e43126799738a7c8e5cf8614cbe63d15cd2f
11,559
def trim_datasets_using_par(data, par_indexes): """ Removes all the data points needing more fitting parameters than available. """ parameters_to_fit = set(par_indexes.keys()) trimmed_data = list() for data_point in data: if data_point.get_fitting_parameter_names() <= parameters_to_fit: trimmed_data.append(data_point) return trimmed_data
5a06f7f5662fb9d7b5190e0e75ba41c858a85d0b
11,560
def _parse_field(field: str) -> Field: """ Parse the given string representation of a CSV import field. :param field: string or string-like field input :return: a new Field """ name, _type = str(field).split(':') if '(' in _type and _type.endswith(')'): _type, id_space = _type.split('(')[0:-1] return Field(name or _type, FieldType.from_str(_type), id_space) return Field(name or _type, FieldType.from_str(_type))
413cc12675e57db57da75dd9044c1884e638282c
11,561
import time import scipy def removeNoise( audio_clip, noise_thresh, mean_freq_noise, std_freq_noise, noise_stft_db, n_grad_freq=2, n_grad_time=4, n_fft=2048, win_length=2048, hop_length=512, n_std_thresh=1.5, prop_decrease=1.0, verbose=False, visual=False, ): """Remove noise from audio based upon a clip containing only noise Args: audio_clip (array): The first parameter. noise_clip (array): The second parameter. n_grad_freq (int): how many frequency channels to smooth over with the mask. n_grad_time (int): how many time channels to smooth over with the mask. n_fft (int): number audio of frames between STFT columns. win_length (int): Each frame of audio is windowed by `window()`. The window will be of length `win_length` and then padded with zeros to match `n_fft`.. hop_length (int):number audio of frames between STFT columns. n_std_thresh (int): how many standard deviations louder than the mean dB of the noise (at each frequency level) to be considered signal prop_decrease (float): To what extent should you decrease noise (1 = all, 0 = none) visual (bool): Whether to plot the steps of the algorithm Returns: array: The recovered signal with noise subtracted """ if verbose: start = time.time() # STFT over noise if verbose: print("STFT on noise:", td(seconds=time.time() - start)) start = time.time() # STFT over signal if verbose: start = time.time() sig_stft = _stft(audio_clip, n_fft, hop_length, win_length) sig_stft_db = _amp_to_db(np.abs(sig_stft)) if verbose: print("STFT on signal:", td(seconds=time.time() - start)) start = time.time() # Calculate value to mask dB to mask_gain_dB = np.min(_amp_to_db(np.abs(sig_stft))) print(noise_thresh, mask_gain_dB) # Create a smoothing filter for the mask in time and frequency smoothing_filter = np.outer( np.concatenate( [ np.linspace(0, 1, n_grad_freq + 1, endpoint=False), np.linspace(1, 0, n_grad_freq + 2), ] )[1:-1], np.concatenate( [ np.linspace(0, 1, n_grad_time + 1, endpoint=False), np.linspace(1, 0, n_grad_time + 2), ] )[1:-1], ) smoothing_filter = smoothing_filter / np.sum(smoothing_filter) # calculate the threshold for each frequency/time bin db_thresh = np.repeat( np.reshape(noise_thresh, [1, len(mean_freq_noise)]), np.shape(sig_stft_db)[1], axis=0, ).T # mask if the signal is above the threshold sig_mask = sig_stft_db < db_thresh if verbose: print("Masking:", td(seconds=time.time() - start)) start = time.time() # convolve the mask with a smoothing filter sig_mask = scipy.signal.fftconvolve(sig_mask, smoothing_filter, mode="same") sig_mask = sig_mask * prop_decrease if verbose: print("Mask convolution:", td(seconds=time.time() - start)) start = time.time() # mask the signal sig_stft_db_masked = ( sig_stft_db * (1 - sig_mask) + np.ones(np.shape(mask_gain_dB)) * mask_gain_dB * sig_mask ) # mask real sig_imag_masked = np.imag(sig_stft) * (1 - sig_mask) sig_stft_amp = (_db_to_amp(sig_stft_db_masked) * np.sign(sig_stft)) + ( 1j * sig_imag_masked ) if verbose: print("Mask application:", td(seconds=time.time() - start)) start = time.time() # recover the signal recovered_signal = _istft(sig_stft_amp, hop_length, win_length) recovered_spec = _amp_to_db( np.abs(_stft(recovered_signal, n_fft, hop_length, win_length)) ) if verbose: print("Signal recovery:", td(seconds=time.time() - start)) if visual: plot_spectrogram(noise_stft_db, title="Noise") if visual: plot_statistics_and_filter( mean_freq_noise, std_freq_noise, noise_thresh, smoothing_filter ) if visual: plot_spectrogram(sig_stft_db, title="Signal") if visual: plot_spectrogram(sig_mask, title="Mask applied") if visual: plot_spectrogram(sig_stft_db_masked, title="Masked signal") if visual: plot_spectrogram(recovered_spec, title="Recovered spectrogram") return recovered_signal
3d92ae7427ab33cc875219b3f005ca86802dd4c2
11,562
from simulator import simulate import logging import json def handle_request(r): """Handle the Simulator request given by the r dictionary """ print ("handle_request executed .. ") print (r) # Parse request .. config = SimArgs() config.machine = r[u'machine'] config.overlay = [r[u'topology']] # List of topologies - just one config.group = r[u'cores'] overlay = r[u'topology'].split('-') overlay_name = overlay[0] overlay_args = overlay[1:] if overlay_name == 'hybrid': overlay_name = 'cluster' config.hybrid = True; config.hybrid_cluster = overlay_args[0]; config.overlay = [u'cluster'] if overlay_args == 'mm' : config.multimessage = True elif overlay_args == 'rev' : config.reverserecv = True c = config (last_nodes, leaf_nodes, root) = simulate(config) # Generate response to be sent back to client assert len(config.models)==1 # Exactly one model has been generated res = {} res['root'] = root res['model'] = config.models[0] res['last_node'] = last_nodes[0] res['leaf_nodes'] = leaf_nodes[0] res['git-version'] = helpers.git_version().decode('ascii') print(res) logging.info(('Responding with >>>')) logging.info((json.dumps(res))) logging.info(('<<<')) write_statistics(c.machine) return json.dumps(res)
73a55ec93bfdf398b896b3c208a476296c1c04f5
11,564
def number_empty_block(n): """Number of empty block""" L = L4 if n == 4 else L8 i = 0 for x in range(n): for y in range(n): if L[x][y] == 0: i = i + 1 return i
1dc7f228cdcbf4c3a1b6b553bff75ba1bb95bdbe
11,565
def compute_referendum_result_by_regions(referendum_and_areas): """Return a table with the absolute count for each region. The return DataFrame should be indexed by `code_reg` and have columns: ['name_reg', 'Registered', 'Abstentions', 'Null', 'Choice A', 'Choice B'] """ ans = referendum_and_areas.groupby( ['code_reg', 'name_reg']).sum().reset_index().set_index('code_reg') ans = ans.drop(columns="Town code") return ans
fb02c28b5caca9147a27bd2f205c07d377d8561c
11,566
def fixed_rho_total_legacy(data, rho_p, rho_s, beads_2_M): """ *LEGACY*: only returns polycation/cation concentrations. Use updated version (`fixed_rho_total()`), which returns a dictionary of all concentrations. Computes the polycation concentration in the supernatant (I) and coacervate (II) phases for different Bjerrum lengths. Parameters ---------- data : dictionary of Pandas dataframes Contains dataframes of data from liquid state theory calculations indexed by Bjerrum length. Dataframes have densities in beads/sigma^3. rho_p : float Average density of polymer (cation + anion) in both phases [mol/L] rho_s : float Average density of salt (just cation since 1 cation and 1 anion come from one KBr molecule) in both phases [mol/L] beads_2_M : float Multiplicative conversion to get from beads/sigma^3 to moles of monomer/L. Returns ------- lB_arr : (Nx1) numpy array Array of Bjerrum non-dimensionalized by sigma (defined in definition of "data" dictionary). rho_PCI_list : N-element list List of densities of polycation in phase I (supernatant) [mol/L] rho_PCII_list : N-element list List of densities of polycation in phase II (coacervate) [mol/L] alpha_list : N-element list (only returned if ret_alpha==True) List of volume fractions of phase I [nondim]. """ # initializes outputs lB_valid_list = [] rho_PCI_list = [] rho_PCII_list = [] rho_CI_list = [] rho_CII_list = [] alpha_list = [] # computes coexistence at each Bjerrum length and stores results if physical for lB in data.keys(): df = data[lB] df_s = compute_rho_s(df, rho_p, beads_2_M) # ensures that the total salt concentration is within the possible two-phase range if rho_s <= np.max(df_s['rhoS'])*beads_2_M and \ rho_s >= np.min(df_s['rhoS'])*beads_2_M: # finds the index of the dataframe that has the closest salt concentration to the given value diff_rho_s = np.abs(df_s['rhoS']*beads_2_M - rho_s) i_same_salt = np.argmin(diff_rho_s) alpha = df_s['alpha'].iloc[i_same_salt] # recomputes the volume fraction of supernatant more precisely using # interpolation alpha = np.interp(rho_s, df_s['rhoS']*beads_2_M, df_s['alpha'].to_numpy(dtype='float64')) if alpha == 1: print('rho_s = {0:.64f}'.format(rho_s/beads_2_M)) print('rho_p = {0:.64f}'.format(rho_p/beads_2_M)) print('rhoPCI = {0:.64f}'.format(df['rhoPCI'].loc[i_same_salt])) print('rhoPCII = {0:.64f}'.format(df['rhoPCII'].loc[i_same_salt])) print('rhoCI = {0:.64f}'.format(df['rhoCI'].loc[i_same_salt])) print('rhoCII = {0:.64f}'.format(df['rhoCII'].loc[i_same_salt])) print(df.loc[i_same_salt]) # ensures that the ratio of volume I to total volume is physical # (i.e., in the range [0,1]) if alpha > 1 or alpha < 0: continue lB_valid_list += [lB] rho_PCI_list += [df_s['rhoPCI'].iloc[i_same_salt]*beads_2_M] rho_PCII_list += [df_s['rhoPCII'].iloc[i_same_salt]*beads_2_M] rho_CI_list += [df_s['rhoCI'].iloc[i_same_salt]*beads_2_M] rho_CII_list += [df_s['rhoCII'].iloc[i_same_salt]*beads_2_M] alpha_list += [alpha] lB_arr = np.array(lB_valid_list) return rho_PCI_list, rho_PCII_list, rho_CI_list, rho_CII_list, lB_arr, alpha_list
a13419c5dc95702e93dca48822e2731bd05745b5
11,567
def portfolio(): """Function to render the portfolio page.""" form = PortfolioCreateForm() if form.validate_on_submit(): try: portfolio = Portfolio(name=form.data['name'], user_id=session['user_id']) db.session.add(portfolio) db.session.commit() except (DBAPIError, IntegrityError): flash('Something went terribly wrong.') return render_template('stocks/stocks.html', form=form) return redirect(url_for('.search_form')) companies = Company.query.filter_by(user_id=session['user_id']).all() return render_template('./stocks/stocks.html', companies=companies, form=form), 200
adc84381bf6397023fc943f9c2edb1e34879400d
11,569
def svn_client_get_simple_provider(*args): """svn_client_get_simple_provider(svn_auth_provider_object_t provider, apr_pool_t pool)""" return apply(_client.svn_client_get_simple_provider, args)
dcdaaa1b448443e7b3cdb8984dc31d8a009c5606
11,570
def svn_client_invoke_get_commit_log(*args): """ svn_client_invoke_get_commit_log(svn_client_get_commit_log_t _obj, char log_msg, char tmp_file, apr_array_header_t commit_items, void baton, apr_pool_t pool) -> svn_error_t """ return apply(_client.svn_client_invoke_get_commit_log, args)
5c4e8f30309037eabb74c99e77f1b0f4f3172428
11,571
import random def find_valid_nodes(node_ids, tree_1, tree_2): """ Recursive function for finding a subtree in the second tree with the same output type of a random subtree in the first tree Args: node_ids: List of node ids to search tree_1: Node containing full tree tree_2: Node containing full tree Returns: Random subtree of the first tree AND a valid node id of the second tree The output_type of the subtree will match the output_type of the valid node of the second tree """ # Randomly choose a node in the first tree node_id = random.choice(node_ids) # Get output_type of the random node in first tree output_type = tree_1.get_id_outputs()[node_id] # Find nodes with the same output_type in the second tree valid_node_ids = [] for n in tree_2.get_id_outputs(): if tree_2.get_id_outputs()[n] == output_type: valid_node_ids.append(n) if len(valid_node_ids) == 0: # Rerun function without invalid output_type return find_valid_nodes([i for i in node_ids if tree_1.get_id_outputs()[i] != output_type], tree_1, tree_2) # Take off root id node_id = node_id[1:] # Get subtree object from tree_1 subtree_1 = find_subtree(tree_1, node_id) # Randomly choose a node in the second valid_node_id = random.choice(valid_node_ids) # Take off root id valid_node_id = valid_node_id[1:] # Get subtree object from tree_2 subtree_2 = find_subtree(tree_2, valid_node_id) return subtree_1, valid_node_id, subtree_2, node_id
88cfd535487ba460e3c212c9c85269abd68f6ef2
11,572
import torch def pytorch_local_average(n, local_lookup, local_tensors): """Average the neighborhood tensors. Parameters ---------- n : {int} Size of tensor local_lookup : {dict: int->float} A dictionary from rank of neighborhood to the weight between two processes local_tensors : {dict: int->tensor} A dictionary from rank to tensors to be aggregated. Returns ------- tensor An averaged tensor """ averaged = torch.DoubleTensor(np.zeros(n)) for node_id, node_weight in local_lookup.items(): averaged += node_weight * local_tensors[node_id] return averaged
294a4d63ce5eff42ccd3abe1354640f6f934e96f
11,573
def get_rr_Ux(N, Fmat, psd, x): """ Given a rank-reduced decomposition of the Cholesky factor L, calculate L^{T}x where x is some vector. This way, we don't have to built L, which saves memory and computational time. @param N: Vector with the elements of the diagonal matrix N @param Fmat: (n x m) matrix consisting of the reduced rank basis @param psd: PSD of the rank-reduced approximation @param x: Vector we want to process as Lx @return Ux """ n = N.shape[0] m = Fmat.shape[1] r = np.zeros(n) t = np.zeros(m) Z, B, D = get_rr_cholesky_rep(N, Fmat, psd) BD = (B.T * np.sqrt(D)).T for ii in range(n-1, -1, -1): r[ii] = x[ii]*np.sqrt(D[ii]) + np.dot(BD[ii,:].T, t) t += x[ii] * Z[ii,:] return r
9a0aaa95d904b9bc993d295a49d95a48d0f6245f
11,574
def get_poll_options(message: str) -> list: """ Turns string into a list of poll options :param message: :return: """ parts = message.split(CREATE_POLL_EVENT_PATTERN) if len(parts) > 1: votes = parts[-1].split(",") if len(votes) == 1 and votes[0] == ' ': return [] else: return votes return []
fd1209403038d5b1ca75d7abd8567bb47fd6bd9a
11,575
def get_avg_percent_bonds(bond_list, num_opts, adj_lists, num_trials, break_co_bonds=False): """ Given adj_list for a set of options, with repeats for each option, find the avg and std dev of percent of each bond type :param bond_list: list of strings representing each bond type :param num_opts: number of options specified (should be length of adj_lists) :param adj_lists: list of lists of adjs: outer is for each option, inner is for each repeat :param num_trials: number of repeats (should be length of inner adj_lists list) :param break_co_bonds: Boolean, to determine whether determine oligomers and remaining bonds after removing C-O bonds to simulate RCF :return: avg_bonds, std_bonds: list of floats, list of floats: for each option tested, the average and std dev of bond distributions (percentages) """ analysis = [] for i in range(num_opts): cur_adjs = adj_lists[i] analysis.append([analyze_adj_matrix(cur_adjs[j], break_co_bonds=break_co_bonds) for j in range(num_trials)]) bond_percents = {} avg_bonds = {} std_bonds = {} for bond_type in bond_list: bond_percents[bond_type] = [[analysis[j][i][BONDS][bond_type]/sum(analysis[j][i][BONDS].values()) for i in range(num_trials)] for j in range(num_opts)] avg_bonds[bond_type] = [np.mean(bond_pcts) for bond_pcts in bond_percents[bond_type]] std_bonds[bond_type] = [np.sqrt(np.var(bond_pcts)) for bond_pcts in bond_percents[bond_type]] return avg_bonds, std_bonds
74c34afea07ab98941c70b571197fe0ea43bcb88
11,576
from io import StringIO def current_fig_image(): """Takes current figure of matplotlib and returns it as a PIL image. Also clears the current plot""" plt.axis('off') fig = plt.gcf() buff = StringIO.StringIO() fig.savefig(buff) buff.seek(0) img = Image.open(buff).convert('RGB') plt.clf() return img
6da91a7157db0cd0df8ebc1e4f3d6007f35f2621
11,577
def get_bgp_peer( api_client, endpoint_id, bgp_peer_id, verbose=False, **kwargs ): # noqa: E501 """Get eBGP peer # noqa: E501 Get eBGP peer details # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> response = api.get_bgp_peer(client, endpoint_id, bgp_peer_id, async_req=True) :param int endpoint_id: ID for IPsec endpoint (required) :param int bgp_peer_id: ID for BGP peer (required) :param async_req bool: execute request asynchronously :param bool verbose: True for verbose output :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: APIResponse or awaitable if async """ local_var_params = locals() request_params = ["verbose"] # noqa: E501 collection_formats = {} query_params = [] for param in [p for p in request_params if local_var_params.get(p) is not None]: query_params.append((param, local_var_params[param])) # noqa: E501 path_params = {"endpoint_id": endpoint_id, "bgp_peer_id": bgp_peer_id} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params["Accept"] = api_client.select_header_accept( ["application/json"] ) # noqa: E501 # Authentication setting auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501 return api_client.call_api( "/ipsec/endpoints/{endpoint_id}/ebgp_peers/{bgp_peer_id}", "GET", path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type="object", # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get("async_req"), _return_http_data_only=local_var_params.get( "_return_http_data_only" ), # noqa: E501 _preload_content=local_var_params.get("_preload_content", True), _request_timeout=local_var_params.get("_request_timeout"), collection_formats=collection_formats, )
3a9fa41b2918c1537402d8894fe3315ff90241e8
11,578
def rewrite_elife_funding_awards(json_content, doi): """ rewrite elife funding awards """ # remove a funding award if doi == "10.7554/eLife.00801": for i, award in enumerate(json_content): if "id" in award and award["id"] == "par-2": del json_content[i] # add funding award recipient if doi == "10.7554/eLife.04250": recipients_for_04250 = [ { "type": "person", "name": {"preferred": "Eric Jonas", "index": "Jonas, Eric"}, } ] for i, award in enumerate(json_content): if "id" in award and award["id"] in ["par-2", "par-3", "par-4"]: if "recipients" not in award: json_content[i]["recipients"] = recipients_for_04250 # add funding award recipient if doi == "10.7554/eLife.06412": recipients_for_06412 = [ { "type": "person", "name": {"preferred": "Adam J Granger", "index": "Granger, Adam J"}, } ] for i, award in enumerate(json_content): if "id" in award and award["id"] == "par-1": if "recipients" not in award: json_content[i]["recipients"] = recipients_for_06412 return json_content
aba819589e50bc847d56a0f5a122b2474425d39c
11,579
def remove_subnet_from_router(router_id, subnet_id): """Remove a subnet from the router. Args: router_id (str): The router ID. subnet_id (str): The subnet ID. """ return neutron().remove_interface_router(router_id, { 'subnet_id': subnet_id })
4b7e9123f148fddaa3d53bcabe1f37b35c974162
11,580
def direct_to_template(request, template): """Generic template direction view.""" return render_to_response(template, {}, request)
5a030f302450829d397fbc27f73bd24470bfe50b
11,581
from datetime import datetime def now(): """Return the current time as date object.""" return datetime.now()
79ddcf3c2e22ff57626520e0b41af8b0f58972d6
11,582
def is_valid_node_name(name): """ Determine if a name is valid for a node. A node name: - Cannot be empty - Cannot start with a number - Cannot match any blacklisted pattern :param str name: The name to check. :return: True if the name is valid. False otherwise. :rtype: bool """ return name and name not in BLACKLISTED_NODE_NAMES
6a935f7172e96fd418084543da0fe81d6bb77be5
11,583
def trajCalc(setup): """ Creates trajectory between point A and the ground (B) based off of the initial position and the angle of travel Arguments: setup: [Object] ini file parameters Returns: A [list] lat/lon/elev of the tail of the trajectory B [list] lat/lon/elev of the head of the trajectory """ B = np.array([0, 0, 0]) # convert angles to radians ze = np.radians(setup.zangle) az = np.radians(setup.azim) # Create trajectory vector traj = np.array([np.sin(az)*np.sin(ze), np.cos(az)*np.sin(ze), -np.cos(ze)]) # backwards propegate the trajectory until it reaches 100000 m up n = 85920/traj[2] # B is the intersection between the trajectory vector and the ground A = n*traj # Convert back to geo coordinates B = np.array(loc2Geo(setup.lat_centre, setup.lon_centre, 0, B)) A = np.array(loc2Geo(setup.lat_centre, setup.lon_centre, 0, A)) # print("Created Trajectory between A and B:") # print(" A = {:10.4f}N {:10.4f}E {:10.2f}m".format(A[0], A[1], A[2])) # print(" B = {:10.4f}N {:10.4f}E {:10.2f}m".format(B[0], B[1], B[2])) A[2] /= 1000 B[2] /= 1000 setup.lat_i = A[0] setup.lon_i = A[1] setup.elev_i = A[2] return A, B
30614d0193aacdd594400fbbda396bca40a75156
11,584
from typing import Dict async def health() -> Dict[str, str]: """Health check function :return: Health check dict :rtype: Dict[str: str] """ health_response = schemas.Health(name=settings.PROJECT_NAME, api_version=__version__) return health_response.dict()
ffda9fa5795c02bd197ed8715d44b384781c866f
11,585
def projection_v3(v, w): """Return the signed length of the projection of vector v on vector w. For the full vector result, use projection_as_vec_v3(). Since the resulting vector is along the 1st vector, you can get the full vector result by scaling the 1st vector to the length of the result of this function. """ return dot_v3(v, w) / w.length()
3aeb8783a5eb680f0e2085d6a0f3b8b80511b10f
11,586
import math import collections def bleu(pred_seq, label_seq, k): """计算BLEU""" pred_tokens, label_tokens = pred_seq.split(' '), label_seq.split(' ') len_pred, len_label = len(pred_tokens), len(label_tokens) score = math.exp(min(0, 1 - len_label / len_pred)) for n in range(1, k + 1): num_matches, label_subs = 0, collections.defaultdict(int) for i in range(len_label - n + 1): label_subs[''.join(label_tokens[i: i + n])] += 1 for i in range(len_pred - n + 1): if label_subs[''.join(pred_tokens[i: i + n])] > 0: num_matches += 1 label_subs[''.join(pred_tokens[i: i + n])] -= 1 score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n)) return score
ae7485687a44afc9ced6f2f4ed5ac8fe0d67b295
11,588
def report_by_name(http_request, agent_name): """ A version of report that can look up an agent by its name. This will generally be slower but it also doesn't expose how the data is stored and might be easier in some cases. """ agent = get_list_or_404(Agent, name=agent_name)[0] return report(http_request, agent.id)
ee9daf5b7e7e5f15af3e507f55e0a5e2a1388aca
11,589
def create_application(global_config=None, **local_conf): """ Create a configured instance of the WSGI application. """ sites, types = load_config(local_conf.get("config")) return ImageProxy(sites, types)
c8d3c7158902df00d88512a23bb852bb226b5ba4
11,590
def dimensionState(moons,dimension): """returns the state for the given dimension""" result = list() for moon in moons: result.append((moon.position[dimension],moon.velocity[dimension])) return result
e67a37e4a1556d637be74992fc3801ee56f0e6f9
11,591
def login(): """Log in current user.""" user = get_user() if user.system_wide_role != 'No Access': flask_login.login_user(user) return flask.redirect(common.get_next_url( flask.request, default_url=flask.url_for('dashboard'))) flask.flash(u'You do not have access. Please contact your administrator.', 'alert alert-info') return flask.redirect('/')
f229ebbd0f77789784b2e8d05bb643d9b5cb1da1
11,592
def init_module(): """ Initialize user's module handler. :return: wrapper handler. """ original_module, module_path, handler_name = import_original_module() try: handler = original_module for name in module_path.split('.')[1:] + [handler_name]: handler = getattr(handler, name) return handler except AttributeError: raise AttributeError( 'No handler {} in module {}'.format(handler_name, module_path) )
148ed89ea8a9f67c9cef5bc209a803840ff9de56
11,593
from typing import List def process(lines: List[str]) -> str: """ Preprocess a Fortran source file. Args: inputLines The input Fortran file. Returns: Preprocessed lines of Fortran. """ # remove lines that are entirely comments and partial-line comments lines = [ rm_trailing_comment(line) for line in lines if not line_is_comment(line) ] # merge continuation lines chg = True while chg: chg = False i = 0 while i < len(lines): line = lines[i] llstr = line.lstrip() if len(llstr) > 0 and llstr[0] == "&": # continuation character prevline = lines[i - 1] line = llstr[1:].lstrip() prevline = prevline.rstrip() + line lines[i - 1] = prevline lines.pop(i) chg = True i += 1 return "".join(lines)
f3fd3bc75be544cd507dae87b80364c9b1c12f7c
11,594
def _summary(function): """ Derive summary information from a function's docstring or name. The summary is the first sentence of the docstring, ending in a period, or if no dostring is present, the function's name capitalized. """ if not function.__doc__: return f"{function.__name__.capitalize()}." result = [] for word in function.__doc__.split(): result.append(word) if word.endswith("."): break return " ".join(result)
a3e3e45c3004e135c2810a5ec009aa78ef7e7a04
11,595
def findrun(base,dim,boxsize): """ find all files associated with run given base directory and the resolution size and box length """ if not os.path.isdir(base): print base, 'is not a valid directory' sys.exit(1) #retreive all files that match tag and box size #note this will include the initialisation boxes, which #are independent of redshift searchstr='_'+str(dim)+'_'+str(boxsize)+'Mpc' filenames=os.listdir(base) box_files=[] for filename in filenames: if filename.find(searchstr)>=0: box_files.append(os.path.join(base,filename)) return box_files
c14f885943a33df96cda28a4a84fdce332149167
11,596
def demand_mass_balance_c(host_odemand, class_odemand, avail, host_recapture): """Solve Demand Mass Balance equation for class-level Parameters ---------- host_odemand: int Observerd host demand class_odemand: int Observed class demand avail: dict Availability of demand open during period considered host_recapture: float Estimated host level recapture Returns ------- tuple Estimated demand, spill and recapture """ # if observed demand of a class is 0 demand mass balance can't # estimate demand and spill alone without additioanl information demand = spill = recapture = 0 if class_odemand: recapture = host_recapture * class_odemand / host_odemand # availability of demand closed during period considered k = 1 - avail A = np.array([[1, -1], [-k, 1]]) B = np.array([class_odemand - recapture, 0]) demand, spill = solve(A, B) return demand, spill, recapture
7fda78ce632f1a26ec875c37abe5db40615aa351
11,597
from typing import Optional def serve_buffer( data: bytes, offered_filename: str = None, content_type: str = None, as_attachment: bool = True, as_inline: bool = False, default_content_type: Optional[str] = MimeType.FORCE_DOWNLOAD) \ -> HttpResponse: """ Serve up binary data from a buffer. Options as for :func:`serve_file`. """ response = HttpResponse(data) add_http_headers_for_attachment( response, offered_filename=offered_filename, content_type=content_type, as_attachment=as_attachment, as_inline=as_inline, content_length=len(data), default_content_type=default_content_type) return response
aa41df168e3f84468293ca6653c06402e6c395fc
11,598
def get_single_image_results(gt_boxes, pred_boxes, iou_thr): """Calculates number of true_pos, false_pos, false_neg from single batch of boxes. Args: gt_boxes (list of list of floats): list of locations of ground truth objects as [xmin, ymin, xmax, ymax] pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`) and 'scores' iou_thr (float): value of IoU to consider as threshold for a true prediction. Returns: dict: true positives (int), false positives (int), false negatives (int) """ all_pred_indices = range(len(pred_boxes)) all_gt_indices = range(len(gt_boxes)) if len(all_pred_indices) == 0: tp = 0 fp = 0 fn = len(gt_boxes) return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn} if len(all_gt_indices) == 0: tp = 0 fp = len(pred_boxes) fn = 0 return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn} gt_idx_thr = [] pred_idx_thr = [] ious = [] for ipb, pred_box in enumerate(pred_boxes): for igb, gt_box in enumerate(gt_boxes): iou = calc_iou_individual(pred_box, gt_box) if iou > iou_thr: gt_idx_thr.append(igb) pred_idx_thr.append(ipb) ious.append(iou) args_desc = np.argsort(ious)[::-1] if len(args_desc) == 0: # No matches tp = 0 fp = len(pred_boxes) fn = len(gt_boxes) else: gt_match_idx = [] pred_match_idx = [] for idx in args_desc: gt_idx = gt_idx_thr[idx] pr_idx = pred_idx_thr[idx] # If the boxes are unmatched, add them to matches if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx): gt_match_idx.append(gt_idx) pred_match_idx.append(pr_idx) tp = len(gt_match_idx) fp = len(pred_boxes) - len(pred_match_idx) fn = len(gt_boxes) - len(gt_match_idx) return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
ec97189c8c75686aa172292179228621e244982f
11,599
def partition(arr, left, right): """[summary] The point of a pivot value is to select a value, find out where it belongs in the array while moving everything lower than that value to the left, and everything higher to the right. Args: arr ([array]): [Unorderd array] left ([int]): [Left index of the array] right ([int]): [Right index of the array] Returns: [int]: [the value of the lowest element] """ pivot = arr[right] low = left - 1 for current in range(left, right): if arr[current] <= pivot: low += 1 swap(arr, current, low) swap(arr, right, low + 1) return low + 1
30f1448861a7a9fa2f119e31482f1f715c9e1ce0
11,600
def graphatbottleneck(g,m,shallfp=True): """handles the bottleneck transformations for a pure graph ae, return g, compressed, new input, shallfp=True=>convert vector in matrix (with gfromparam), can use redense to add a couple dense layers around the bottleneck (defined by m.redense*)""" comp=ggoparam(gs=g.s.gs,param=g.s.param)([g.X]) if m.shallredense: for e in m.redenseladder: comp=Dense(e,activation=m.redenseactivation,kernel_initializer=m.redenseinit)(comp) inn2=Input(m.redenseladder[-1]) use=inn2 for i in range(len(m.redenseladder)-1,-1,-1): use=Dense(m.redenseladder[i],activation=m.redenseactivation,kernel_initializer=m.redenseinit)(use) use=Dense(g.s.gs*g.s.param,activation=m.redenseactivation,kernel_initializer=m.redenseinit)(use) else: inn2=Input(g.s.gs*g.s.param) use=inn2 if shallfp: taef1=gfromparam(gs=g.s.gs,param=g.s.param)([use]) else: taef1=inn2 g.X=taef1 g.A=None return g,comp,inn2
b46967b40fce669c3e74d52e31f814bbf96ce8c0
11,601
def categorical_onehot_binarizer(feature, feature_scale=None, prefix='columns', dtype='int8'): """Transform between iterable of iterables and a multilabel format, sample is simple categories. Args: feature: pd.Series, sample feature. feature_scale: list, feature categories list. prefix: String to append DataFrame column names. dtype: default np.uint8. Data type for new columns. Only a single dtype is allowed. Returns: Dataframe for onehot binarizer. """ assert not any(feature.isnull()), "`feature' should be not contains NaN" scale = feature.drop_duplicates().tolist() if feature_scale is not None: t = pd.get_dummies(feature.replace({i:'temp_str' for i in set.difference(set(scale), set(feature_scale))}), prefix=prefix, dtype=dtype) if prefix+'_temp_str' in t.columns: t = t.drop([prefix+'_temp_str'], axis=1) for i in set.difference(set(feature_scale), set(scale)): if prefix+'_'+str(i) not in t.columns: t[prefix+'_'+str(i)] = 0 scale = feature_scale t = t[[prefix+'_'+str(i) for i in feature_scale]] else: t = pd.get_dummies(feature, prefix=prefix, dtype=dtype) t = t[[prefix+'_'+str(i) for i in scale]] return t, scale
eb3a2b38d323c72bb298b64ebbd6567d143471fc
11,602
def add_selfloops(adj_matrix: sp.csr_matrix, fill_weight=1.0): """add selfloops for adjacency matrix. >>>add_selfloops(adj, fill_weight=1.0) # return an adjacency matrix with selfloops # return a list of adjacency matrices with selfloops >>>add_selfloops(adj, adj, fill_weight=[1.0, 2.0]) Parameters ---------- adj_matrix: Scipy matrix or Numpy array or a list of them Single or a list of Scipy sparse matrices or Numpy arrays. fill_weight: float scalar, optional. weight of self loops for the adjacency matrix. Returns ------- Single or a list of Scipy sparse matrix or Numpy matrices. See also ---------- graphgallery.functional.AddSelfloops """ def _add_selfloops(adj, w): adj = eliminate_selfloops(adj) if w: return adj + w * sp.eye(adj.shape[0], dtype=adj.dtype, format='csr') else: return adj if gg.is_listlike(fill_weight): return tuple(_add_selfloops(adj_matrix, w) for w in fill_weight) else: return _add_selfloops(adj_matrix, fill_weight)
867bdf380995b6ff48aac9741facd09066ad03bd
11,604
def handle(event, _ctxt): """ Handle the Lambda Invocation """ response = { 'message': '', 'event': event } ssm = boto3.client('ssm') vpc_ids = ssm.get_parameter(Name=f'{PARAM_BASE}/vpc_ids')['Parameter']['Value'] vpc_ids = vpc_ids.split(',') args = { 'vpc_ids': vpc_ids } try: sg_name = ssm.get_parameter(Name=f'{PARAM_BASE}/secgrp_name')['Parameter']['Value'] args['managed_sg_name'] = sg_name except botocore.exceptions.ClientError as ex: if ex.response['Error']['Code'] == 'ParameterNotFound': pass else: print(ex) return response run(**args) return response
8cf0dc52b641bd28b002caef1d97c7e3a60be647
11,605
def _get_indice_map(chisqr_set): """Find element with lowest chisqr at each voxel """ #make chisqr array of dims [x,y,z,0,rcvr,chisqr] chisqr_arr = np.stack(chisqr_set,axis=5) indice_arr = np.argmin(chisqr_arr,axis=5) return indice_arr
9ac00310628d3f45f72542dbfff5345845053acd
11,606
def noct_synthesis(spectrum, freqs, fmin, fmax, n=3, G=10, fr=1000): """Adapt input spectrum to nth-octave band spectrum Convert the input spectrum to third-octave band spectrum between "fc_min" and "fc_max". Parameters ---------- spectrum : numpy.ndarray amplitude rms of the one-sided spectrum of the signal, size (nperseg, nseg). freqs : list List of input frequency , size (nperseg) or (nperseg, nseg). fmin : float Min frequency band [Hz]. fmax : float Max frequency band [Hz]. n : int Number of bands pr octave. G : int System for specifying the exact geometric mean frequencies. Can be base 2 or base 10. fr : int Reference frequency. Shall be set to 1 kHz for audible frequency range, to 1 Hz for infrasonic range (f < 20 Hz) and to 1 MHz for ultrasonic range (f > 31.5 kHz). Outputs ------- spec : numpy.ndarray Third octave band spectrum of signal sig [dB re.2e-5 Pa], size (nbands, nseg). fpref : numpy.ndarray Corresponding preferred third octave band center frequencies, size (nbands). """ # Get filters center frequencies fc_vec, fpref = _center_freq(fmin=fmin, fmax=fmax, n=n, G=G, fr=fr) nband = len(fpref) if len(spectrum.shape) > 1: nseg = spectrum.shape[1] spec = np.zeros((nband, nseg)) if len(freqs.shape) == 1: freqs = np.tile(freqs, (nseg, 1)).T else: nseg = 1 spec = np.zeros((nband)) # Frequency resolution # df = freqs[1:] - freqs[:-1] # df = np.concatenate((df, [df[-1]])) # Get upper and lower frequencies fu = fc_vec * 2**(1/(2*n)) fl = fc_vec / 2**(1/(2*n)) for s in range(nseg): for i in range(nband): if len(spectrum.shape) > 1: # index of the frequencies within the band idx = np.where((freqs[:, s] >= fl[i]) & (freqs[:, s] < fu[i])) spec[i, s] = np.sqrt( np.sum(np.power(np.abs(spectrum[idx,s]), 2))) else: # index of the frequencies within the band idx = np.where((freqs >= fl[i]) & (freqs < fu[i])) spec[i] = np.sqrt(np.sum(np.abs(spectrum[idx])**2)) return spec, fpref
89c6be2be262b153bd63ecf498cf92cf93de9e31
11,608
def get_model_prediction(model_input, stub, model_name='amazon_review', signature_name='serving_default'): """ no error handling at all, just poc""" request = predict_pb2.PredictRequest() request.model_spec.name = model_name request.model_spec.signature_name = signature_name request.inputs['input_input'].CopyFrom(tf.make_tensor_proto(model_input)) response = stub.Predict.future(request, 5.0) # 5 seconds return response.result().outputs["output"].float_val
6de7e35305e0d9fe9fe0b03e3b0ab0c82937778c
11,609
def _make_feature_stats_proto( common_stats, feature_name, q_combiner, num_values_histogram_buckets, is_categorical, has_weights ): """Convert the partial common stats into a FeatureNameStatistics proto. Args: common_stats: The partial common stats associated with a feature. feature_name: The name of the feature. q_combiner: The quantiles combiner used to construct the quantiles histogram for the number of values in the feature. num_values_histogram_buckets: Number of buckets in the quantiles histogram for the number of values per feature. is_categorical: A boolean indicating whether the feature is categorical. has_weights: A boolean indicating whether a weight feature is specified. Returns: A statistics_pb2.FeatureNameStatistics proto. """ common_stats_proto = statistics_pb2.CommonStatistics() common_stats_proto.num_non_missing = common_stats.num_non_missing common_stats_proto.num_missing = common_stats.num_missing common_stats_proto.tot_num_values = common_stats.total_num_values if common_stats.num_non_missing > 0: common_stats_proto.min_num_values = common_stats.min_num_values common_stats_proto.max_num_values = common_stats.max_num_values common_stats_proto.avg_num_values = ( common_stats.total_num_values / common_stats.num_non_missing) # Add num_values_histogram to the common stats proto. num_values_quantiles = q_combiner.extract_output( common_stats.num_values_summary) histogram = quantiles_util.generate_quantiles_histogram( num_values_quantiles, common_stats.min_num_values, common_stats.max_num_values, common_stats.num_non_missing, num_values_histogram_buckets) common_stats_proto.num_values_histogram.CopyFrom(histogram) # Add weighted common stats to the proto. if has_weights: weighted_common_stats_proto = statistics_pb2.WeightedCommonStatistics( num_non_missing=common_stats.weighted_num_non_missing, num_missing=common_stats.weighted_num_missing, tot_num_values=common_stats.weighted_total_num_values) if common_stats.weighted_num_non_missing > 0: weighted_common_stats_proto.avg_num_values = ( common_stats.weighted_total_num_values / common_stats.weighted_num_non_missing) common_stats_proto.weighted_common_stats.CopyFrom( weighted_common_stats_proto) # Create a new FeatureNameStatistics proto. result = statistics_pb2.FeatureNameStatistics() result.name = feature_name # Set the feature type. # If we have a categorical feature, we preserve the type to be the original # INT type. Currently we don't set the type if we cannot infer it, which # happens when all the values are missing. We need to add an UNKNOWN type # to the stats proto to handle this case. if is_categorical: result.type = statistics_pb2.FeatureNameStatistics.INT elif common_stats.type is None: # If a feature is completely missing, we assume the type to be STRING. result.type = statistics_pb2.FeatureNameStatistics.STRING else: result.type = common_stats.type # Copy the common stats into appropriate numeric/string stats. # If the type is not set, we currently wrap the common stats # within numeric stats. if (result.type == statistics_pb2.FeatureNameStatistics.STRING or is_categorical): # Add the common stats into string stats. string_stats_proto = statistics_pb2.StringStatistics() string_stats_proto.common_stats.CopyFrom(common_stats_proto) result.string_stats.CopyFrom(string_stats_proto) else: # Add the common stats into numeric stats. numeric_stats_proto = statistics_pb2.NumericStatistics() numeric_stats_proto.common_stats.CopyFrom(common_stats_proto) result.num_stats.CopyFrom(numeric_stats_proto) return result
16b55556d76f5d5cb01f2dc3142b42a86f85bcb8
11,610
def get_device(): """ Returns the id of the current device. """ c_dev = c_int_t(0) safe_call(backend.get().af_get_device(c_pointer(c_dev))) return c_dev.value
4be37aa83bf822aac794680d9f30fe24edb38231
11,612
def plotLatentsSweep(yhat,nmodels=1): """plotLatentsSweep(yhat): plots model latents and a subset of the corresponding stimuli, generated from sweepCircleLatents() ---e.g.,--- yhat, x = sweepCircleLatents(vae) plotCircleSweep(yhat,x) alternatively, plotLatentsSweep(sweepCircleLatents(vae)) """ # Initialization if type(yhat) is tuple: yhat = yhat[0] # Start a-plottin' fig, ax = plt.subplots(nmodels,4,figsize=(9, 15), dpi= 80, facecolor='w', edgecolor='k', sharey='row',sharex='col') for latentdim in range(4): if nmodels > 1: for imodel in range(nmodels): plt.sca(ax[imodel,latentdim]) plt.plot(yhat[imodel][latentdim*16+np.arange(0,16),:].detach().numpy()) # ax[imodel,latentdim].set_aspect(1./ax[imodel,latentdim].get_data_ratio()) ax[imodel,latentdim].spines['top'].set_visible(False) ax[imodel,latentdim].spines['right'].set_visible(False) if latentdim>0: ax[imodel,latentdim].spines['left'].set_visible(False) # ax[imodel,latentdim].set_yticklabels([]) ax[imodel,latentdim].tick_params(axis='y', length=0) # if imodel<nmodels-1 or latentdim>0: ax[imodel,latentdim].spines['bottom'].set_visible(False) ax[imodel,latentdim].set_xticklabels([]) ax[imodel,latentdim].tick_params(axis='x', length=0) else: imodel=0 plt.sca(ax[latentdim]) plt.plot(yhat[latentdim*16+np.arange(0,16),:].detach().numpy()) ax[latentdim].set_aspect(1./ax[latentdim].get_data_ratio()) ax[latentdim].spines['top'].set_visible(False) ax[latentdim].spines['right'].set_visible(False) if latentdim>0: ax[latentdim].spines['left'].set_visible(False) ax[latentdim].tick_params(axis='y', length=0) # if imodel<nmodels-1 or latentdim>0: ax[latentdim].spines['bottom'].set_visible(False) ax[latentdim].set_xticklabels([]) ax[latentdim].tick_params(axis='x', length=0) return fig, ax
f43ffd9b45981254a550c8b187da649522522dd0
11,613
def calc_lipophilicity(seq, method="mean"): """ Calculates the average hydrophobicity of a sequence according to the Hessa biological scale. Hessa T, Kim H, Bihlmaier K, Lundin C, Boekel J, Andersson H, Nilsson I, White SH, von Heijne G. Nature. 2005 Jan 27;433(7024):377-81 The Hessa scale has been calculated empirically, using the glycosylation assay of TMD insertion. Negative values indicate hydrophobic amino acids with favourable membrane insertion. Other hydrophobicity scales are in the settings folder. They can be generated as follows. hydrophob_scale_path = r"D:\korbinian\korbinian\settings\hydrophobicity_scales.xlsx" df_hs = pd.read_excel(hydrophob_scale_path, skiprows=2) df_hs.set_index("1aa", inplace=True) dict_hs = df_hs.Hessa.to_dict() hessa_scale = np.array([value for (key, value) in sorted(dict_hs.items())]) ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'] Parameters: ----------- seq : string Sequence to be analysed. Gaps (-) and unknown amino acids (x) should be ignored. method : string Method to be used to average the hydrophobicity values over the whole sequence. The hydrophobicity score is positive for polar/charged aa, negative for hydrophobic aa. "sum" will return the sum of the hydrophobicity scores over the sequence "mean" will return the mean of the hydrophobicity scores over the sequence Returns: -------- mean hydrophobicity value for the sequence entered Usage: ------ from korbinian.utils import calc_lipophilicity # for a single sequence s = "SAESVGEVYIKSTETGQYLAG" calc_lipophilicity(s) # for a series of sequences TMD_ser = df2.TM01_SW_match_seq.dropna() hydro = TMD_ser.apply(lambda x : calc_lipophilicity(x)) Notes: ------ %timeit results: for a 20aa seq: 136 µs per loop for a pandas series with 852 tmds: 118 ms per loop """ # hydrophobicity scale hessa_scale = np.array([0.11, -0.13, 3.49, 2.68, -0.32, 0.74, 2.06, -0.6, 2.71, -0.55, -0.1, 2.05, 2.23, 2.36, 2.58, 0.84, 0.52, -0.31, 0.3, 0.68]) # convert to biopython analysis object analysed_seq = ProteinAnalysis(seq) # biopython count_amino_acids returns a dictionary. aa_counts_dict = analysed_seq.count_amino_acids() # get the number of AA residues used to calculated the hydrophobicity # this is not simply the sequence length, as the sequence could include gaps or non-natural AA aa_counts_excluding_gaps = np.array(list(aa_counts_dict.values())) number_of_residues = aa_counts_excluding_gaps.sum() # if there are no residues, don't attempt to calculate a mean. Return np.nan. if number_of_residues == 0: return np.nan # convert dictionary to array, sorted by aa aa_counts_arr = np.array([value for (key, value) in sorted(aa_counts_dict.items())]) multiplied = aa_counts_arr * hessa_scale sum_of_multiplied = multiplied.sum() if method == "mean": return sum_of_multiplied / number_of_residues if method == "sum": return sum_of_multiplied
a8858a62b3c76d466b510507b1ce9f158b5c8c9c
11,614
def get_enrollments(username, include_inactive=False): """Retrieves all the courses a user is enrolled in. Takes a user and retrieves all relative enrollments. Includes information regarding how the user is enrolled in the the course. Args: username: The username of the user we want to retrieve course enrollment information for. include_inactive (bool): Determines whether inactive enrollments will be included Returns: A list of enrollment information for the given user. Examples: >>> get_enrollments("Bob") [ { "created": "2014-10-20T20:18:00Z", "mode": "honor", "is_active": True, "user": "Bob", "course_details": { "course_id": "edX/DemoX/2014T2", "course_name": "edX Demonstration Course", "enrollment_end": "2014-12-20T20:18:00Z", "enrollment_start": "2014-10-15T20:18:00Z", "course_start": "2015-02-03T00:00:00Z", "course_end": "2015-05-06T00:00:00Z", "course_modes": [ { "slug": "honor", "name": "Honor Code Certificate", "min_price": 0, "suggested_prices": "", "currency": "usd", "expiration_datetime": null, "description": null, "sku": null, "bulk_sku": null } ], "invite_only": False } }, { "created": "2014-10-25T20:18:00Z", "mode": "verified", "is_active": True, "user": "Bob", "course_details": { "course_id": "edX/edX-Insider/2014T2", "course_name": "edX Insider Course", "enrollment_end": "2014-12-20T20:18:00Z", "enrollment_start": "2014-10-15T20:18:00Z", "course_start": "2015-02-03T00:00:00Z", "course_end": "2015-05-06T00:00:00Z", "course_modes": [ { "slug": "honor", "name": "Honor Code Certificate", "min_price": 0, "suggested_prices": "", "currency": "usd", "expiration_datetime": null, "description": null, "sku": null, "bulk_sku": null } ], "invite_only": True } } ] """ return _data_api().get_course_enrollments(username, include_inactive)
0cbc9a60929fd06f8f5ca90d6c2458867ae474e7
11,615
async def connections_accept_request(request: web.BaseRequest): """ Request handler for accepting a stored connection request. Args: request: aiohttp request object Returns: The resulting connection record details """ context = request.app["request_context"] outbound_handler = request.app["outbound_message_router"] connection_id = request.match_info["id"] try: connection = await ConnectionRecord.retrieve_by_id(context, connection_id) except StorageNotFoundError: raise web.HTTPNotFound() connection_mgr = ConnectionManager(context) my_endpoint = request.query.get("my_endpoint") or None request = await connection_mgr.create_response(connection, my_endpoint) await outbound_handler(request, connection_id=connection.connection_id) return web.json_response(connection.serialize())
78a469d306c3306f8b9a0ba1a3364f7b30a36f85
11,616
def nearest(x, base=1.): """ Round the inputs to the nearest base. Beware, due to the nature of floating point arithmetic, this maybe not work as you expect. INPUTS x : input value of array OPTIONS base : number to which x should be rounded """ return np.round(x/base)*base
ca1ddcd75c20ea82c18368b548c36ef5207ab77f
11,617
def sort_nesting(list1, list2): """Takes a list of start points and end points and sorts the second list according to nesting""" temp_list = [] while list2 != temp_list: temp_list = list2[:] # Make a copy of list2 instead of reference for i in range(1, len(list1)): if list2[i] > list2[i-1] and list1[i] < list2[i-1]: list2[i-1], list2[i] = list2[i], list2[i-1] return list2
11693e54eeba2016d21c0c23450008e823bdf1c1
11,618
def confusion_matrix(Y_hat, Y, norm=None): """ Calculate confusion matrix. Parameters ---------- Y_hat : array-like List of data labels. Y : array-like List of target truth labels. norm : {'label', 'target', 'all', None}, default=None Normalization on resulting matrix. Must be one of: - 'label' : normalize on labels (columns). - 'target' : normalize on targets (rows). - 'all' : normalize on the entire matrix. - None : No normalization. Returns ------- matrix : ndarray, shape=(target_classes, label_classes) Confusion matrix with target classes as rows and label classes as columns. Classes are in sorted order. """ target_classes = sorted(set(Y)) label_classes = sorted(set(Y_hat)) target_dict = {target_classes[k]: k for k in range(len(target_classes))} label_dict = {label_classes[k]: k for k in range(len(label_classes))} matrix = np.zeros((len(target_classes), len(label_classes))) for label, target in zip(Y_hat, Y): matrix[target_dict[target],label_dict[label]] += 1 if norm == 'label': matrix /= np.max(matrix, axis=0).reshape((1,matrix.shape[1])) elif norm == 'target': matrix /= np.max(matrix, axis=1).reshape((matrix.shape[0],1)) elif norm == 'all': matrix /= np.max(matrix) elif norm is not None: raise ValueError("Norm must be one of {'label', 'target', 'all', None}") return matrix.astype(int)
b1ed79b71cef8cdcaa2cfe06435a3b2a56c659dd
11,619
def reroot(original_node: Tree, new_node: Tree): """ :param original_node: the node in the original tree :param new_node: the new node to give children new_node should have as chldren, the relations of original_node except for new_node's parent """ new_node.children = [ Tree(relation.label) for relation in original_node.relations if relation.label != new_node.parent_label ] for relation in new_node.children: reroot( original_node.find(relation.label), relation, ) return new_node
f37141fc7645dfbab401eb2be3255d917372ef11
11,620
import uuid import time def update_users(): """Sync LDAP users with local users in the DB.""" log_uuid = str(uuid.uuid4()) start_time = time.time() patron_cls = current_app_ils.patron_cls patron_indexer = PatronBaseIndexer() invenio_users_updated_count = 0 invenio_users_added_count = 0 # get all CERN users from LDAP ldap_client = LdapClient() ldap_users = ldap_client.get_primary_accounts() _log_info( log_uuid, "users_fetched_from_ldap", dict(users_fetched=len(ldap_users)), ) if not ldap_users: return 0, 0, 0 # create a map by employeeID for fast lookup ldap_users_emails = set() ldap_users_map = {} for ldap_user in ldap_users: if "mail" not in ldap_user: _log_info( log_uuid, "missing_email", dict(employee_id=ldap_user_get(ldap_user, "employeeID")), is_error=True, ) continue email = ldap_user_get_email(ldap_user) if email not in ldap_users_emails: ldap_person_id = ldap_user_get(ldap_user, "employeeID") ldap_users_map[ldap_person_id] = ldap_user ldap_users_emails.add(email) _log_info( log_uuid, "users_cached", ) remote_accounts = RemoteAccount.query.all() _log_info( log_uuid, "users_fetched_from_invenio", dict(users_fetched=len(remote_accounts)), ) # get all Invenio remote accounts and prepare a list with needed info invenio_users = [] for remote_account in remote_accounts: invenio_users.append( dict( remote_account_id=remote_account.id, remote_account_person_id=remote_account.extra_data[ "person_id" ], remote_account_department=remote_account.extra_data.get( "department" ), user_id=remote_account.user_id, ) ) _log_info( log_uuid, "invenio_users_prepared", ) # STEP 1 # iterate on all Invenio users first, to update outdated info from LDAP # or delete users if not found in LDAP. # # Note: cannot iterate on the db query here, because when a user is # deleted, db session will expire, causing a DetachedInstanceError when # fetching the user on the next iteration for invenio_user in invenio_users: # use `dict.pop` to remove from `ldap_users_map` the users found # in Invenio, so the remaining will be the ones to be added later on ldap_user = ldap_users_map.pop( invenio_user["remote_account_person_id"], None ) if ldap_user: # the imported LDAP user is already in the Invenio db ldap_user_display_name = ldap_user_get(ldap_user, "displayName") user_id = invenio_user["user_id"] user_profile = UserProfile.query.filter_by( user_id=user_id ).one() invenio_full_name = user_profile.full_name ldap_user_department = ldap_user_get(ldap_user, "department") invenio_user_department = invenio_user["remote_account_department"] user = User.query.filter_by(id=user_id).one() ldap_user_email = ldap_user_get_email(ldap_user) invenio_user_email = user.email has_changed = ( ldap_user_display_name != invenio_full_name or ldap_user_department != invenio_user_department or ldap_user_email != invenio_user_email ) if has_changed: _update_invenio_user( invenio_remote_account_id=invenio_user[ "remote_account_id" ], invenio_user_profile=user_profile, invenio_user=user, ldap_user=ldap_user, ) _log_info( log_uuid, "department_updated", dict( user_id=invenio_user["user_id"], previous_department=invenio_user_department, new_department=ldap_user_department, ), ) # re-index modified patron patron_indexer.index(patron_cls(invenio_user["user_id"])) invenio_users_updated_count += 1 db.session.commit() _log_info( log_uuid, "invenio_users_updated_and_deleted", ) # STEP 2 # Import any new LDAP user not in Invenio yet, the remaining new_ldap_users = ldap_users_map.values() if new_ldap_users: importer = LdapUserImporter() for ldap_user in new_ldap_users: user_id = importer.import_user(ldap_user) email = ldap_user_get_email(ldap_user) employee_id = ldap_user_get(ldap_user, "employeeID") _log_info( log_uuid, "user_added", dict(email=email, employee_id=employee_id), ) # index newly added patron patron_indexer.index(patron_cls(user_id)) invenio_users_added_count += 1 db.session.commit() _log_info( log_uuid, "invenio_users_created", ) total_time = time.time() - start_time _log_info(log_uuid, "task_completed", dict(time=total_time)) return ( len(ldap_users), invenio_users_updated_count, invenio_users_added_count, )
8aef4e258629dd6e36b0a8b7b722031316df1154
11,621
def opt(dfs, col='new', a=1, b=3, rlprior=None, clprior=None): """Returns maximum likelihood estimates of the model parameters `r` and `c`. The optimised parameters `r` and `c` refer to the failure count of the model's negative binomial likelihood function and the variance factor introduced by each predictive prior, respectively. Args: dfs: a data frame or list/tuple of data frames containing counts. col: the column containing daily new infection counts. a, b: parameters of the initial predictive beta prime prior. rlprior, clprior: log density functions to be used as priors on `r` and `c` (uniform by default). """ def f(r): return _optc(dfs, r, col, a, b, rlprior, clprior, copy=False)[1] if not isinstance(dfs, list) and not isinstance(dfs, tuple): dfs = [dfs] dfs = [df.copy() for df in dfs] # create copies once, before optimising. # We double r until we pass a local minimum, and then optimize the two # regions that might contain that minimum separately. p, r = 1, 2 while f(p) > f(r): p, r = r, 2*r r1, l1 = _cvxsearch(f, p//2, p) r2, l2 = _cvxsearch(f, p, r) if l1 <= l2: return r1, _optc(dfs, r1, col, a, b, rlprior, clprior, copy=False)[0] else: return r2, _optc(dfs, r2, col, a, b, rlprior, clprior, copy=False)[0]
d40e63892676f18734d3b4789656606e898f69d9
11,622
import json def unpackage_datasets(dirname, dataobject_format=False): """ This function unpackages all sub packages, (i.e. train, valid, test) You should use this function if you want everything args: dirname: directory path that has the train, valid, test folders in it dataobject_format: used for dataobject format """ with open(join(dirname, 'room-data.json')) as f: lm = json.load(f)['Landmarks'] res = {s: unpackage_dataset(join(dirname, s), dataobject_format) for s in ['train', 'valid', 'test']} res['landmarks'] = lm return res
d1748b3729b4177315553eab5075d14ea2edf3a7
11,623
from typing import Sequence from typing import Tuple import cmd from typing import OrderedDict def get_command_view( is_running: bool = False, stop_requested: bool = False, commands_by_id: Sequence[Tuple[str, cmd.Command]] = (), ) -> CommandView: """Get a command view test subject.""" state = CommandState( is_running=is_running, stop_requested=stop_requested, commands_by_id=OrderedDict(commands_by_id), ) return CommandView(state=state)
3eaf1b8845d87c7eb6fef086bd2af3b2dd65409a
11,624
def get_node_ip_addresses(ipkind): """ Gets a dictionary of required IP addresses for all nodes Args: ipkind: ExternalIP or InternalIP or Hostname Returns: dict: Internal or Exteranl IP addresses keyed off of node name """ ocp = OCP(kind=constants.NODE) masternodes = ocp.get(selector=constants.MASTER_LABEL).get("items") workernodes = ocp.get(selector=constants.WORKER_LABEL).get("items") nodes = masternodes + workernodes return { node["metadata"]["name"]: each["address"] for node in nodes for each in node["status"]["addresses"] if each["type"] == ipkind }
622217c12b763c6dbf5c520d90811bdfe374e876
11,626
def house_filter(size, low, high): """ Function that returns the "gold standard" filter. This window is designed to produce low sidelobes for Fourier filters. In essence it resembles a sigmoid function that smoothly goes between zero and one, from short to long time. """ filt = np.zeros(size) def eval_filter(rf, c1, c2, c3, c4): r1 = 1. - rf**2. r2 = r1**2. r3 = r2 * r1 filt = c1 + c2*r1 + c3*r2 + c4*r3 return filt coefficients = { "c1": 0.074, "c2": 0.302, "c3": 0.233, "c4": 0.390 } denom = (high - low + 1.0) / 2. if denom < 0.: raise ZeroDivisionError for i in range(int(low), int(high)): rf = (i + 1) / denom if rf > 1.5: filt[i] = 1. else: temp = eval_filter(rf, **coefficients) if temp < 0.: filt[i] = 1. else: filt[i] = 1. - temp filt[int(high):] = 1. return filt
ef7f3fe3bb4410ce81fcd061e24d6f34a36f3a04
11,628
def PToData(inGFA, data, err): """ Copy host array to data Copys data from GPUFArray locked host array to data * inFA = input Python GPUFArray * data = FArray containing data array * err = Obit error/message stack """ ################################################################ return Obit.GPUFArrayToData (inFA.me, data.me, err.me)
0f943a8340c2587c75f7ba6783f160d5d3bede76
11,629
def maker(sql_connection, echo=False): """ Get an sessionmaker object from a sql_connection. """ engine = get_engine(sql_connection, echo=echo) m = orm.sessionmaker(bind=engine, autocommit=True, expire_on_commit=False) return m
1296fa49058c8a583cf442355534d22b00bdaeea
11,631
def test_auth(request): """Tests authentication worked successfuly.""" return Response({"message": "You successfuly authenticated!"})
59e065687333a4dd612e514e0f8ea459062c7cb3
11,632
def streak_condition_block() -> Block: """ Create block with 'streak' condition, when rotation probability is low and target orientation repeats continuously in 1-8 trials. :return: 'Streak' condition block. """ return Block(configuration.STREAK_CONDITION_NAME, streak_rotations_generator)
769b0f7b9ce8549f4bea75da066814b3e1f8a103
11,633
def resolve_appinstance(request, appinstanceid, permission='base.change_resourcebase', msg=_PERMISSION_MSG_GENERIC, **kwargs): """ Resolve the document by the provided primary key and check the optional permission. """ return resolve_object( request, AppInstance, {'pk': appinstanceid}, permission=permission, permission_msg=msg, **kwargs)
bb17c2a842c4f2fced1bce46bd1d05293a7b0edf
11,634
def statementTVM(pReact): """Use this funciton to produce the TVM statemet""" T,V,mass = pReact.T,pReact.volume,pReact.mass statement="\n{}: T: {:0.2f} K, V: {:0.2f} m^3, mass: {:0.2f} kg".format(pReact.name,T,V,mass) return statement
cda356678d914f90d14905bdcadf2079c9ebfbea
11,635
def fill_NaNs_with_nearest_neighbour(data, lons, lats): """At each depth level and time, fill in NaN values with nearest lateral neighbour. If the entire depth level is NaN, fill with values from level above. The last two dimensions of data are the lateral dimensions. lons.shape and lats.shape = (data.shape[-2], data.shape[-1]) :arg data: the data to be filled :type data: 4D numpy array :arg lons: longitude points :type lons: 2D numpy array :arg lats: latitude points :type lats: 2D numpy array :returns: a 4D numpy array """ filled = data.copy() for t in range(data.shape[0]): for k in range(data.shape[1]): subdata = data[t, k, :, :] mask = np.isnan(subdata) points = np.array([lons[~mask], lats[~mask]]).T valid_data = subdata[~mask] try: filled[t, k, mask] = interpolate.griddata( points, valid_data, (lons[mask], lats[mask]), method='nearest' ) except ValueError: # if the whole depth level is NaN, # set it equal to the level above filled[t, k, :, :] = filled[t, k - 1, :, :] return filled
cacde1f5a7e52535f08cd1154f504fb24293182e
11,636
from resource_management.libraries.functions.default import default from resource_management.libraries.functions.version import compare_versions import json def check_stack_feature(stack_feature, stack_version): """ Given a stack_feature and a specific stack_version, it validates that the feature is supported by the stack_version. :param stack_feature: Feature name to check if it is supported by the stack. For example: "rolling_upgrade" :param stack_version: Version of the stack :return: Will return True if successful, otherwise, False. """ stack_features_config = default("/configurations/cluster-env/stack_features", None) data = _DEFAULT_STACK_FEATURES if not stack_version: return False if stack_features_config: data = json.loads(stack_features_config) for feature in data["stack_features"]: if feature["name"] == stack_feature: if "min_version" in feature: min_version = feature["min_version"] if compare_versions(stack_version, min_version, format = True) < 0: return False if "max_version" in feature: max_version = feature["max_version"] if compare_versions(stack_version, max_version, format = True) >= 0: return False return True return False
e7417738f285d94d666ac8b72d1b8c5079469f02
11,638
def get_random_action_weights(): """Get random weights for each action. e.g. [0.23, 0.57, 0.19, 0.92]""" return np.random.random((1, NUM_ACTIONS))
da929c6a64c87ddf9af22ab17636d0db011c8a45
11,639
import time from datetime import datetime def rpg_radar2nc(data, path, larda_git_path, **kwargs): """ This routine generates a daily NetCDF4 file for the RPG 94 GHz FMCW radar 'LIMRAD94'. Args: data (dict): dictionary of larda containers path (string): path where the NetCDF file is stored """ dt_start = h.ts_to_dt(data['Ze']['ts'][0]) h.make_dir(path) site_name = kwargs['site'] if 'site' in kwargs else 'no-site' cn_version = kwargs['version'] if 'version' in kwargs else 'pyhon' ds_name = f'{path}/{h.ts_to_dt(data["Ze"]["ts"][0]):%Y%m%d}-{site_name}-limrad94.nc' ncvers = '4' repo = git.Repo(larda_git_path) sha = repo.head.object.hexsha with netCDF4.Dataset(ds_name, 'w', format=f'NETCDF{ncvers}') as ds: ds.Convention = 'CF-1.0' ds.location = data['Ze']['paraminfo']['location'] ds.system = data['Ze']['paraminfo']['system'] ds.version = f'Variable names and dimensions prepared for Cloudnet {kwargs["version"]} version' ds.title = 'LIMRAD94 (SLDR) Doppler Cloud Radar, calibrated Input for Cloudnet' ds.institution = 'Leipzig Institute for Meteorology (LIM), Leipzig, Germany' ds.source = '94 GHz Cloud Radar LIMRAD94\nRadar type: Frequency Modulated Continuous Wave,\nTransmitter power 1.5 W typical (solid state ' \ 'amplifier)\nAntenna Type: Bi-static Cassegrain with 500 mm aperture\nBeam width: 0.48deg FWHM' ds.reference = 'W Band Cloud Radar LIMRAD94\nDocumentation and User Manual provided by manufacturer RPG Radiometer Physics GmbH\n' \ 'Information about system also available at https://www.radiometer-physics.de/' ds.calibrations = f'remove Precip. ghost: {kwargs["ghost_echo_1"]}\n, remove curtain ghost: {kwargs["ghost_echo_2"]}\n' \ f'despeckle: {kwargs["despeckle"]}\n, number of standard deviations above noise: {kwargs["NF"]}\n' ds.git_description = f'pyLARDA commit ID {sha}' ds.description = 'Concatenated data files of LIMRAD 94GHz - FMCW Radar, used as input for Cloudnet processing, ' \ 'filters applied: ghost-echo, despeckle, use only main peak' ds.history = 'Created ' + time.ctime(time.time()) ds._FillValue = data['Ze']['paraminfo']['fill_value'] ds.day = dt_start.day ds.month = dt_start.month ds.year = dt_start.year # ds.commit_id = subprocess.check_output(["git", "describe", "--always"]) .rstrip() ds.history = 'Created ' + time.ctime(time.time()) + '\nfilters applied: ghost-echo, despeckle, main peak only' Ze_str = 'Zh' if cn_version == 'python' else 'Ze' vel_str = 'v' if cn_version == 'python' else 'vm' width_str = 'width' if cn_version == 'python' else 'sigma' dim_tupel = ('time', 'range') if cn_version == 'python' else ('range', 'time') n_chirps = len(data['no_av']) ds.createDimension('chirp', n_chirps) ds.createDimension('time', data['Ze']['ts'].size) ds.createDimension('range', data['Ze']['rg'].size) if cn_version == 'matlab': for ivar in ['Ze', 'VEL', 'sw', 'ldr', 'kurt', 'skew']: data[ivar]['var'] = data[ivar]['var'].T # coordinates nc_add_variable( ds, val=94.0, dimension=(), var_name='frequency', type=np.float32, long_name='Radar frequency', units='GHz' ) nc_add_variable( ds, val=256, dimension=(), var_name='Numfft', type=np.float32, long_name='Number of points in FFT', units='' ) nc_add_variable( ds, val=np.mean(data['MaxVel']['var']), dimension=(), var_name='NyquistVelocity', type=np.float32, long_name='Mean (over all chirps) Unambiguous Doppler velocity (+/-)', units='m s-1' ) nc_add_variable( ds, val=data['Ze']['paraminfo']['altitude'], dimension=(), var_name='altitude', type=np.float32, long_name='Height of instrument above mean sea level', units='m' ) nc_add_variable( ds, val=data['Ze']['paraminfo']['coordinates'][0], dimension=(), var_name='latitude', type=np.float32, long_name='latitude', units='degrees_north' ) nc_add_variable( ds, val=data['Ze']['paraminfo']['coordinates'][1], dimension=(), var_name='longitude', type=np.float32, long_name='longitude', units='degrees_east' ) if 'version' in kwargs and cn_version == 'python': nc_add_variable( ds, val=data['no_av'], dimension=('chirp',), var_name='NumSpectraAveraged', type=np.float32, long_name='Number of spectral averages', units='' ) # time and range variable # convert to time since midnight if cn_version == 'python': ts = np.subtract(data['Ze']['ts'], datetime.datetime(dt_start.year, dt_start.month, dt_start.day, 0, 0, 0, tzinfo=timezone.utc).timestamp()) / 3600 ts_str = 'Decimal hours from midnight UTC to the middle of each day' ts_unit = f'hours since {dt_start:%Y-%m-%d} 00:00:00 +00:00 (UTC)' rg = data['Ze']['rg'] / 1000.0 elif cn_version == 'matlab': ts = np.subtract(data['Ze']['ts'], datetime.datetime(2001, 1, 1, 0, 0, 0, tzinfo=timezone.utc).timestamp()) ts_str = 'Seconds since 1st January 2001 00:00 UTC' ts_unit = 'sec' rg = data['Ze']['rg'] else: raise ValueError('Wrong version selected! version to "matlab" or "python"!') nc_add_variable(ds, val=ts, dimension=('time',), var_name='time', type=np.float64, long_name=ts_str, units=ts_unit) nc_add_variable(ds, val=rg, dimension=('range',), var_name='range', type=np.float32, long_name='Range from antenna to the centre of each range gate', units='km') nc_add_variable(ds, val=data['Azm']['var'], dimension=('time',), var_name='azimuth', type=np.float32, long_name='Azimuth angle from north', units='degree') nc_add_variable(ds, val=data['Elv']['var'], dimension=('time',), var_name='elevation', type=np.float32, long_name='elevation angle. 90 degree is vertical direction.', units='degree') # chirp dependent variables nc_add_variable(ds, val=data['MaxVel']['var'][0], dimension=('chirp',), var_name='DoppMax', type=np.float32, long_name='Unambiguous Doppler velocity (+/-)', units='m s-1') # index plus (1 to n) for Matlab indexing nc_add_variable(ds, val=data['rg_offsets'], dimension=('chirp',), var_name='range_offsets', type=np.int32, long_name='chirp sequences start index array in altitude layer array', units='-') # 1D variables nc_add_variable(ds, val=data['bt']['var'], dimension=('time',), var_name='bt', type=np.float32, long_name='Direct detection brightness temperature', units='K') nc_add_variable(ds, val=data['LWP']['var'], dimension=('time',), var_name='lwp', type=np.float32, long_name='Liquid water path', units='g m-2') nc_add_variable(ds, val=data['rr']['var'], dimension=('time',), var_name='rain', type=np.float32, long_name='Rain rate from weather station', units='mm h-1') nc_add_variable(ds, val=data['SurfRelHum']['var'], dimension=('time',), var_name='SurfRelHum', type=np.float32, long_name='Relative humidity from weather station', units='%') # 2D variables nc_add_variable(ds, val=data['Ze']['var'], dimension=dim_tupel, var_name=Ze_str, type=np.float32, long_name='Radar reflectivity factor', units='mm6 m-3', plot_range=data['Ze']['var_lims'], plot_scale='linear', comment='Calibrated reflectivity. Calibration convention: in the absence of attenuation, ' 'a cloud at 273 K containing one million 100-micron droplets per cubic metre will ' 'have a reflectivity of 0 dBZ at all frequencies.') nc_add_variable(ds, val=data['VEL']['var'], dimension=dim_tupel, plot_range=data['VEL']['var_lims'], plot_scale='linear', var_name=vel_str, type=np.float32, long_name='Doppler velocity', units='m s-1', unit_html='m s<sup>-1</sup>', comment='This parameter is the radial component of the velocity, with positive velocities are away from the radar.', folding_velocity=data['MaxVel']['var'].max()) nc_add_variable(ds, val=data['sw']['var'], dimension=dim_tupel, plot_range=data['sw']['var_lims'], lot_scale='logarithmic', var_name=width_str, type=np.float32, long_name='Spectral width', units='m s-1', unit_html='m s<sup>-1</sup>', comment='This parameter is the standard deviation of the reflectivity-weighted velocities in the radar pulse volume.') nc_add_variable(ds, val=data['ldr']['var'], dimension=dim_tupel, plot_range=[-30.0, 0.0], var_name='ldr', type=np.float32, long_name='Linear depolarisation ratio', units='dB', comment='This parameter is the ratio of cross-polar to co-polar reflectivity.') nc_add_variable(ds, val=data['kurt']['var'], dimension=dim_tupel, plot_range=data['kurt']['var_lims'], var_name='kurt', type=np.float32, long_name='Kurtosis', units='linear') nc_add_variable(ds, val=data['skew']['var'], dimension=dim_tupel, plot_range=data['skew']['var_lims'], var_name='Skew', type=np.float32, long_name='Skewness', units='linear') print('save calibrated to :: ', ds_name) return 0
d59a46c2872b6f82e81b54cfca953ebc0bc18a90
11,640
def load_ssl_user_from_request(request): """ Loads SSL user from current request. SSL_CLIENT_VERIFY and SSL_CLIENT_S_DN needs to be set in request.environ. This is set by frontend httpd mod_ssl module. """ ssl_client_verify = request.environ.get('SSL_CLIENT_VERIFY') if ssl_client_verify != 'SUCCESS': raise Unauthorized('Cannot verify client: %s' % ssl_client_verify) username = request.environ.get('SSL_CLIENT_S_DN') if not username: raise Unauthorized('Unable to get user information (DN) from client certificate') user = User.find_user_by_name(username) if not user: user = User.create_user(username=username) g.groups = [] g.user = user return user
ff716c139f57f00345d622a849b714c67468d7bb
11,641
def get_all_users(): """Gets all users""" response = user_info.get_all_users() return jsonify({'Users' : response}), 200
f178c509afdae44831c1ede0cdfee39ca7ea6cec
11,642
def open_mailbox_maildir(directory, create=False): """ There is a mailbox here. """ return lazyMaildir(directory, create=create)
c19e5bf97da7adfe9a37e515f1b46047ced75108
11,643
def TANH(*args) -> Function: """ Returns the hyperbolic tangent of any real number. Learn more: https//support.google.com/docs/answer/3093755 """ return Function("TANH", args)
1bb3dbe8147f366415cf78c39f5cf6df3b80ffca
11,644
def value_frequencies_chart_from_blocking_rules( blocking_rules: list, df: DataFrame, spark: SparkSession, top_n=20, bottom_n=10 ): """Produce value frequency charts for the provided blocking rules Args: blocking_rules (list): A list of blocking rules as specified in a Splink settings dictionary df (DataFrame): Dataframe to profile spark (SparkSession): SparkSession object top_n (int, optional): Number of values with the highest frequencies to display. Defaults to 20. bottom_n (int, optional): Number of values with the lowest frequencies to display. Defaults to 10. Returns: Chart: If Altair is installed, return a chart. If not, then it returns the vega lite chart spec as a dictionary """ col_combinations = blocking_rules_to_column_combinations(blocking_rules) return column_combination_value_frequencies_chart( col_combinations, df, spark, top_n, bottom_n )
e9160c9cd14ced1b904fad67c72c10a027179f7f
11,645
def getOfflineStockDataManifest(): """Returns manifest for the available offline data. If manifest is not found, creates an empty one. Returns: A dict with the manifest. For example: {'STOCK_1': {'first_available_date': datetime(2016, 1, 1), 'last_available_date': datetime(2017, 2, 28)}, 'STOCK_2': {'first_available_date': datetime(2014, 2, 4), 'last_available_date': datetime(2016, 6, 15)}} """ if exists(offlineStockDataManifestPath): with open(offlineStockDataManifestPath) as manifest_file: return JSON.openJson(manifest_file) else: manifest = {} updateOfflineStockDataManifest(manifest) return manifest
40482daa96bf18a843f91bb4af00f37917e51340
11,646
def align_buf(buf: bytes, sample_width: bytes): """In case of buffer size not aligned to sample_width pad it with 0s""" remainder = len(buf) % sample_width if remainder != 0: buf += b'\0' * (sample_width - remainder) return buf
9d4996a8338fe532701ee82843d055d6d747f591
11,647