content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def treynor(rp: np.ndarray, rb: np.ndarray, rf: np.ndarray) -> np.ndarray: """Returns the treynor ratios for all pairs of p portfolios and b benchmarks Args: rp (np.ndarray): p-by-n matrix where the (i, j) entry corresponds to the j-th return of the i-th portfolio rb (np.ndarray): b-by-n matrix where the (i, j) entry corresponds to the j-th return of the i-th benchmark rf (np.ndarray): Scalar risk-free rate (as a 0-D tensor) Returns: np.ndarray: p-by-b matrix where the (i, j) entry corresponds to the treynor ratio for the i-th portfolio and j-th benchmark """ __expect_rp_rb_rf(rp, rb, rf) return kernels.treynor(rp, rb, rf)
dd247f7ea1c710939ac27e626b3f863f49818fec
12,800
def virus_tsne_list(tsne_df, virus_df): """ return data dic """ tsne_df.rename(columns={"Unnamed: 0": "barcode"}, inplace=True) df = pd.merge(tsne_df, virus_df, on="barcode", how="left") df["UMI"] = df["UMI"].fillna(0) tSNE_1 = list(df.tSNE_1) tSNE_2 = list(df.tSNE_2) virus_UMI = list(df.UMI) res = {"tSNE_1": tSNE_1, "tSNE_2": tSNE_2, "virus_UMI": virus_UMI} return res
b8265f3f3d6b602d045a890434322727d8e1adc5
12,801
def sometimes(aug): """ Return a shortcut for iaa.Sometimes :param aug: augmentation method :type aug: iaa.meta.Augmenter :return: wrapped augmentation method :rtype: iaa.meta.Augmenter """ return iaa.Sometimes(0.5, aug)
95f7ece0b1da30c5a4e3be4d1a21e089e11d9036
12,802
def rev_to_b10(letters): """Convert an alphabet number to its decimal representation""" return sum( (ord(letter) - A_UPPERCASE + 1) * ALPHABET_SIZE**i for i, letter in enumerate(reversed(letters.upper())) )
b4850e97754f0404894673a51c1cce930e437f6c
12,803
import os import fnmatch def AddPath(match): """Helper for adding file path for WebRTC header files, ignoring other.""" file_to_examine = match.group(1) + '.h' # TODO(mflodman) Use current directory and find webrtc/. for path, _, files in os.walk('./webrtc'): for filename in files: if fnmatch.fnmatch(filename, file_to_examine): path_name = os.path.join(path, filename).replace('./', '') return '#include "%s"\n' % path_name # No path found, return original string. return '#include "'+ file_to_examine + '"\n'
8afe9abcd29a2a92ed142f743bd1469f823b5b14
12,804
def test_from_rsid(rsids, start_rsid): """Continue collecting publications for rsids in list, beginning with start_rsid Args: rsids (list): list of rsids to collect publications on start_rsid (str): rsid identifier to resume collecting publications on Returns: runtime_rsids (list): [start_rsid, onward...] start_rsid (str): starting rsid start_idx (str): starting rsid index rsids (list): [original list of ALL rsids] """ start_idx = rsids.index(start_rsid) # start_rsid index print(f"STARTING POINT SET TO: | INDEX: {start_idx} / {len(rsids)} | RSID: {rsids[start_idx]}") runtime_rsids = rsids[start_idx:] # runtime rsids return runtime_rsids, start_rsid, start_idx, rsids
bf2be86f28645addc08737e64f08695cd6b3a6d3
12,805
def _average_scada(times, values, nvalues): """ Function which down samples scada values. :param times: Unix times of the data points. :param values: Corresponding sensor value :param nvalues: Number of samples we average over. :return: new time values and """ if len(times) % nvalues: nsamples = (len(times) // nvalues) - 1 else: nsamples = (len(times) // nvalues) res = np.zeros(nsamples, dtype=np.float32) new_times = np.zeros(nsamples, dtype=np.int64) for ind in range(nsamples): res[ind] = np.mean(values[ind * nvalues:(ind + 1) * nvalues]) new_times[ind] = np.mean(times[ind * nvalues:(ind + 1) * nvalues]) return new_times, res
8743e5065741299befe37b230a22512c65001a09
12,806
def main(): """ Test harness """ def game_factory(): """ Creates the game we need """ return Maze(Layout.from_string(Layout.MEDIUM_STR)) bot_factory = PlannedBot trainer = BotTrainer(game_factory, bot_factory, 16, 2, goal_score=13) start_time = time() generations, result = trainer.breed_best_bot() end_time = time() msg = 'After {} generations, the bot {} the game'.format( generations, 'won' if result.finished else 'lost') print msg print 'Elapsed time:', int(end_time - start_time + 0.5), 'seconds' print 'Bot score:', result.score print 'Bot plan:', result.player.moves
0528a4a4c51a4b9491314555d2ccd5c5b9baf328
12,807
def behavior_of(classname): """ Finds and loads the behavior class for C++ (decoded) classname or returns None if there isn't one. Behaviors do not have a required base class, and they may be used with Awkward Array's ``ak.behavior``. The search strategy for finding behavior classes is: 1. Translate the ROOT class name from C++ to Python with :py:func:`~uproot4.model.classname_encode`. For example, ``"ROOT::RThing"`` becomes ``"Model_ROOT_3a3a_RThing"``. 2. Look for a submodule of ``uproot4.behaviors`` without the ``"Model_"`` prefix. For example, ``"ROOT_3a3a_RThing"``. 3. Look for a class in that submodule with the fully encoded name. For example, ``"Model_ROOT_3a3a_RThing"``. See :py:mod:`uproot4.behaviors` for details. """ name = classname_encode(classname) assert name.startswith("Model_") name = name[6:] if name not in globals(): if name in behavior_of._module_names: exec( compile( "import uproot4.behaviors.{0}".format(name), "<dynamic>", "exec" ), globals(), ) module = eval("uproot4.behaviors.{0}".format(name)) behavior_cls = getattr(module, name, None) if behavior_cls is not None: globals()[name] = behavior_cls return globals().get(name)
ce588881e283f53755c7e468de298e6bc360cecc
12,808
import os def _abs_user_path(fpath): """don't overload the ap type""" return os.path.abspath(os.path.expanduser(fpath))
c5ee29b13783afcd5c6ad99d9e751f7ed5db58be
12,809
def adjust(data): """Calculate mean of list of values and subtract the mean of every element in the list, making a new list. Returns tuple of mean, list of adjusted values """ mu = mean(data) return mu, map(lambda x: (x-mu), data)
c0ddf7140dee90903452c16afb2625ded34c4d73
12,810
def clear(): """ Clears the world, and then returns the cleared representation """ myWorld.clear() return jsonify(myWorld.world())
4d999388696986ad9a0a954f3791f0a4795ef69a
12,811
from typing import Optional from typing import TextIO def _create_terminal_writer_factory(output: Optional[TextIO]): """ A factory method for creating a `create_terminal_writer` function. :param output: The receiver of all original pytest output. """ def _create_terminal_writer(config: Config, _file: Optional[TextIO] = None) -> TerminalWriter: file = output if output is not None else get_sink_io() return create_terminal_writer(config, file) return _create_terminal_writer
9c06bd4b10eb5b1dc0e3e4f4f9bdb20074cacf6e
12,812
import typing def filter( f: typing.Callable, stage: Stage = pypeln_utils.UNDEFINED, workers: int = 1, maxsize: int = 0, timeout: float = 0, on_start: typing.Callable = None, on_done: typing.Callable = None, ) -> Stage: """ Creates a stage that filter the data given a predicate function `f`. exactly like python's built-in `filter` function. ```python import pypeln as pl import time from random import random def slow_gt3(x): time.sleep(random()) # <= some slow computation return x > 3 data = range(10) # [0, 1, 2, ..., 9] stage = pl.sync.filter(slow_gt3, data, workers=3, maxsize=4) data = list(stage) # [3, 4, 5, ..., 9] ``` Arguments: f: A function with signature `f(x, **kwargs) -> bool`, where `kwargs` is the return of `on_start` if present. stage: A stage or iterable. workers: This parameter is not used and only kept for API compatibility with the other modules. maxsize: This parameter is not used and only kept for API compatibility with the other modules. timeout: Seconds before stoping the worker if its current task is not yet completed. Defaults to `0` which means its unbounded. on_start: A function with signature `on_start(worker_info?) -> kwargs`, where `kwargs` can be a `dict` of keyword arguments that will be passed to `f` and `on_done`. If you define a `worker_info` argument an object with information about the worker will be passed. This function is executed once per worker at the beggining. on_done: A function with signature `on_done(stage_status?, **kwargs)`, where `kwargs` is the return of `on_start` if present. If you define a `stage_status` argument an object with information about the stage will be passed. This function is executed once per worker when the worker finishes. !!! warning To implement `timeout` we use `stopit.async_raise` which has some limitations for stoping threads. Returns: If the `stage` parameters is given then this function returns a new stage, else it returns a `Partial`. """ if pypeln_utils.is_undefined(stage): return pypeln_utils.Partial( lambda stage: filter( f, stage=stage, workers=workers, maxsize=maxsize, timeout=timeout, on_start=on_start, on_done=on_done, ) ) stage = to_stage(stage) return Filter( f=f, on_start=on_start, on_done=on_done, timeout=timeout, dependencies=[stage], )
741a1d4f941a293b41c98872c59c8bf7e451bba5
12,813
import numpy def function_factory(model, loss, dataset): """A factory to create a function required by tfp.optimizer.lbfgs_minimize. Args: model [in]: an instance of `tf.keras.Model` or its subclasses. loss [in]: a function with signature loss_value = loss(pred_y, true_y). train_x [in]: the input part of training data. train_y [in]: the output part of training data. Returns: A function that has a signature of: loss_value, gradients = f(model_parameters). """ # obtain the shapes of all trainable parameters in the model shapes = tf.shape_n(model.trainable_variables) n_tensors = len(shapes) # we'll use tf.dynamic_stitch and tf.dynamic_partition later, so we need to # prepare required information first count = 0 idx = [] # stitch indices part = [] # partition indices for i, shape in enumerate(shapes): n = numpy.product(shape) idx.append(tf.reshape(tf.range(count, count+n, dtype=tf.int32), shape)) part.extend([i]*n) count += n part = tf.constant(part) @tf.function @tf.autograph.experimental.do_not_convert def assign_new_model_parameters(params_1d): """A function updating the model's parameters with a 1D tf.Tensor. Args: params_1d [in]: a 1D tf.Tensor representing the model's trainable parameters. """ params = tf.dynamic_partition(params_1d, part, n_tensors) for i, (shape, param) in enumerate(zip(shapes, params)): model.trainable_variables[i].assign(tf.reshape(param, shape)) #tf.print(model.trainable_variables[i]) @tf.function def volume_form(x, Omega_Omegabar, mass, restriction): kahler_metric = complex_math.complex_hessian(tf.math.real(model(x)), x) volume_form = tf.math.real(tf.linalg.det(tf.matmul(restriction, tf.matmul(kahler_metric, restriction, adjoint_b=True)))) weights = mass / tf.reduce_sum(mass) factor = tf.reduce_sum(weights * volume_form / Omega_Omegabar) #factor = tf.constant(35.1774, dtype=tf.complex64) return volume_form / factor # now create a function that will be returned by this factory def f(params_1d): """A function that can be used by tfp.optimizer.lbfgs_minimize. This function is created by function_factory. Args: params_1d [in]: a 1D tf.Tensor. Returns: A scalar loss and the gradients w.r.t. the `params_1d`. """ # use GradientTape so that we can calculate the gradient of loss w.r.t. parameters for step, (points, Omega_Omegabar, mass, restriction) in enumerate(dataset): with tf.GradientTape() as tape: # update the parameters in the model assign_new_model_parameters(params_1d) # calculate the loss det_omega = volume_form(points, Omega_Omegabar, mass, restriction) loss_value = loss(Omega_Omegabar, det_omega, mass) # calculate gradients and convert to 1D tf.Tensor grads = tape.gradient(loss_value, model.trainable_variables) grads = tf.dynamic_stitch(idx, grads) # reweight the loss and grads mass_sum = tf.reduce_sum(mass) try: total_loss += loss_value * mass_sum total_grads += grads * mass_sum total_mass += mass_sum except NameError: total_loss = loss_value * mass_sum total_grads = grads * mass_sum total_mass = mass_sum total_loss = total_loss / total_mass total_grads = total_grads / total_mass # print out iteration & loss f.iter.assign_add(1) tf.print("Iter:", f.iter, "loss:", total_loss) # store loss value so we can retrieve later tf.py_function(f.history.append, inp=[total_loss], Tout=[]) return total_loss, total_grads # store these information as members so we can use them outside the scope f.iter = tf.Variable(0) f.idx = idx f.part = part f.shapes = shapes f.assign_new_model_parameters = assign_new_model_parameters f.history = [] return f
2e50b3e085d2a88d76de31ba0fccb49f4f38dd1e
12,814
import copy import time def nn_CPRAND(tensor,rank,n_samples,n_samples_err,factors=None,exact_err=False,it_max=100,err_it_max=20,tol=1e-7,list_factors=False,time_rec=False): """ Add argument n_samples_err CPRAND for CP-decomposition in non negative case, with err_rand return also exact error Parameters ---------- tensor : tensor rank : int n_samples : int sample size n_samples_err : int sample size used for error estimation. The default is 400. factors : list of matrices, optional initial non negative factor matrices. The default is None. exact_err : boolean, optional whether use err or err_rand_fast for terminaison criterion. The default is False. (not useful for this version) it_max : int, optional maximal number of iteration. The default is 100. err_it_max : int, optional maximal of iteration if terminaison critirion is not improved. The default is 20. tol : float, optional error tolerance. The default is 1e-7. list_factors : boolean, optional If true, then return factor matrices of each iteration. The default is False. time_rec : boolean, optional If true, return computation time of each iteration. The default is False. Returns ------- the CP decomposition, number of iteration and exact / estimated termination criterion. list_fac and list_time are optional. """ N=tl.ndim(tensor) # order of tensor norm_tensor=tl.norm(tensor) # norm of tensor if list_factors==True : list_fac=[] if time_rec == True : list_time=[] if (factors==None): factors=svd_init_fac(tensor,rank) if list_factors==True : list_fac.append(copy.deepcopy(factors)) weights=None it=0 err_it=0 ######################################## ######### error initialization ######### ######################################## temp,ind_err=err_rand(tensor,weights,factors,n_samples_err) error=[temp/norm_tensor] min_err=error[len(error)-1] rng = tl.check_random_state(None) while (min_err>tol and it<it_max and err_it<err_it_max): if time_rec == True : tic=time.time() for n in range(N): Zs,indices=sample_khatri_rao(factors,n_samples,skip_matrix=n,random_state=rng) indices_list = [i.tolist() for i in indices] indices_list.insert(n, slice(None, None, None)) indices_list = tuple(indices_list) if (n==0) :sampled_unfolding = tensor[indices_list] else : sampled_unfolding =tl.transpose(tensor[indices_list]) V=tl.dot(tl.transpose(Zs),Zs) W=tl.dot(sampled_unfolding,Zs) # update fac, _, _, _ = hals_nnls(tl.transpose(W), V,tl.transpose(factors[n])) factors[n]=tl.transpose(fac) if list_factors==True : list_fac.append(copy.deepcopy(factors)) it=it+1 ################################ ######### error update ######### ################################ error.append(err_rand(tensor,weights,factors,n_samples_err,ind_err)[0]/norm_tensor) # same indices used as for Random Lesat Square Calculation if (error[len(error)-1]<min_err) : min_err=error[len(error)-1] # err update else : err_it=err_it+1 if time_rec == True : toc=time.time() list_time.append(toc-tic) if time_rec == True and list_factors==True: return(weights,factors,it,error,list_fac,list_time) if list_factors==True : return(weights,factors,it,error,list_fac) if time_rec==True : return(weights,factors,it,error,list_time) return(weights,factors,it,error)
8cd3402407a54579ef279efd8b3459c34933c9cb
12,815
def get_base_url(host_name, customer_id): """ :arg host_name: the host name of the IDNow gateway server :arg customer_id: your customer id :returns the base url of the IDNow API and the selected customer """ return 'https://{0}/api/v1/{1}'.format(host_name, customer_id)
5a24a87f597cf01c61ab6a01202b2e01e3b00bf8
12,816
def sample_category(user, **params): """Create and return a sample category""" defaults = { 'name': 'Sample category', 'persian_title': 'persian', 'parent_category': None } defaults.update(params) return Category.objects.create(user=user, **defaults)
ec013f1b699c4ae76acb0c78819da875b2453846
12,817
from typing import Dict def lindbladian_average_infid_set( propagators: dict, instructions: Dict[str, Instruction], index, dims, n_eval ): """ Mean average fidelity over all gates in propagators. Parameters ---------- propagators : dict Contains unitary representations of the gates, identified by a key. index : int Index of the qubit(s) in the Hilbert space to be evaluated dims : list List of dimensions of qubits proj : boolean Project to computational subspace Returns ------- tf.float64 Mean average fidelity """ infids = [] for gate, propagator in propagators.items(): perfect_gate = instructions[gate].get_ideal_gate(dims) infid = lindbladian_average_infid(perfect_gate, propagator, index, dims) infids.append(infid) return tf.reduce_mean(infids)
71fcc97afc80bae0e53aea2fafd30b8279f76d08
12,818
def edit(request, course_id): """ Teacher form for editing a course """ course = get_object_or_404(Course, id=course_id) courseForm = CourseForm(request.POST or None, instance=course) if request.method == 'POST': # Form was submitted if courseForm.is_valid(): courseForm.save() messages.add_message(request, messages.SUCCESS, f'The course {course.code} - {course.title} was altered!') return redirect('course:index') return render(request, 'course/edit.html', {'form': courseForm})
d4f39a26598108d9d5f03ad18fa6de26d88d849d
12,819
def _build_ontology_embedded_list(): """ Helper function intended to be used to create the embedded list for ontology. All types should implement a function like this going forward. """ synonym_terms_embed = DependencyEmbedder.embed_defaults_for_type(base_path='synonym_terms', t='ontology_term') definition_terms_embed = DependencyEmbedder.embed_defaults_for_type(base_path='definition_terms', t='ontology_term') return synonym_terms_embed + definition_terms_embed
2245b82313e26ba741200e24d323f6aa6b9741e0
12,820
def interp1d(x,y,xi,axis=None,extrap=True): """ Args: x (uniformly sampled vector/array): sampled x values y (array): sampled y values xi (array): x values to interpolate onto axis (int): axis along which to interpolate. extrap (bool): if True, use linear extrapolation based on the extreme values. If false, nearest neighbour is used for extrapolation instead. """ x=np.asarray(x) if axis is None: axis=get_axis(x) return mathx.interp1d_lin_reg(zero(x,axis),delta(x,axis),y,xi,axis,extrap)
081c4f5156cc653804cbd770edaf01ecdb426a51
12,821
import threading def _back_operate( servicer, callback, work_pool, transmission_pool, utility_pool, termination_action, ticket, default_timeout, maximum_timeout): """Constructs objects necessary for back-side operation management. Also begins back-side operation by feeding the first received ticket into the constructed _interfaces.ReceptionManager. Args: servicer: An interfaces.Servicer for servicing operations. callback: A callable that accepts packets.BackToFrontPackets and delivers them to the other side of the operation. Execution of this callable may take any arbitrary length of time. work_pool: A thread pool in which to execute customer code. transmission_pool: A thread pool to use for transmitting to the other side of the operation. utility_pool: A thread pool for utility tasks. termination_action: A no-arg behavior to be called upon operation completion. ticket: The first packets.FrontToBackPacket received for the operation. default_timeout: A length of time in seconds to be used as the default time alloted for a single operation. maximum_timeout: A length of time in seconds to be used as the maximum time alloted for a single operation. Returns: The _interfaces.ReceptionManager to be used for the operation. """ lock = threading.Lock() with lock: termination_manager = _termination.back_termination_manager( work_pool, utility_pool, termination_action, ticket.subscription) transmission_manager = _transmission.back_transmission_manager( lock, transmission_pool, callback, ticket.operation_id, termination_manager, ticket.subscription) operation_context = _context.OperationContext( lock, ticket.operation_id, packets.Kind.SERVICER_FAILURE, termination_manager, transmission_manager) emission_manager = _emission.back_emission_manager( lock, termination_manager, transmission_manager) ingestion_manager = _ingestion.back_ingestion_manager( lock, work_pool, servicer, termination_manager, transmission_manager, operation_context, emission_manager) expiration_manager = _expiration.back_expiration_manager( lock, termination_manager, transmission_manager, ingestion_manager, ticket.timeout, default_timeout, maximum_timeout) reception_manager = _reception.back_reception_manager( lock, termination_manager, transmission_manager, ingestion_manager, expiration_manager) termination_manager.set_expiration_manager(expiration_manager) transmission_manager.set_ingestion_and_expiration_managers( ingestion_manager, expiration_manager) operation_context.set_ingestion_and_expiration_managers( ingestion_manager, expiration_manager) emission_manager.set_ingestion_manager_and_expiration_manager( ingestion_manager, expiration_manager) ingestion_manager.set_expiration_manager(expiration_manager) reception_manager.receive_packet(ticket) return reception_manager
46999af151338d0d8b15704e913801d9f2c80696
12,822
from typing import Tuple import datasets def load_train_val_data( data_dir: str, batch_size: int, training_fraction: float) -> Tuple[DataLoader, DataLoader]: """ Returns two DataLoader objects that wrap training and validation data. Training and validation data are extracted from the full original training data, split according to training_fraction. """ full_train_data = datasets.FashionMNIST(data_dir, train=True, download=False, transform=ToTensor()) full_train_len = len(full_train_data) train_len = int(full_train_len * training_fraction) val_len = full_train_len - train_len (train_data, val_data) = random_split(dataset=full_train_data, lengths=[train_len, val_len]) train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True) val_loader = DataLoader(val_data, batch_size=batch_size, shuffle=True) return (train_loader, val_loader)
9cc0c67532e5d77fa8653d43c4f537137905767c
12,823
def match(input_string, start_node): """匹配字符串 input_string :: 需要配备的字符串 start_node :: NFA起始节点 return :: True | False """ # 初始化运行状态的状态集合: 起始节点+空转移能到达的节点 current_state_set = [start_node] next_state_set = closure(current_state_set) # 循环读入字符生成状态集合 for i, ch in enumerate(input_string): # 读入一个字符后的状态集合+空转移能到达的节点 current_state_set = move(next_state_set, ch) next_state_set = closure(current_state_set) # 状态集合为空,返回False if next_state_set is None: return False # 读入最后一个字符且存在接受状态的返回True if has_accepted_state(next_state_set) and i == len(input_string) - 1: return True return False
fdc7c971cfeb3d0b13716ca1017c6557889d3f52
12,824
def _H_to_h(H): """Converts CIECAM02/CAM02-UCS hue composition (H) to raw hue angle (h).""" x0 = H % 400 * 360 / 400 h, _, _ = fmin_l_bfgs_b(lambda x: abs(h_to_H(x) - H), x0, approx_grad=True) return h % 360
a9ccf1ec14b467b8a5e05b5e71141a4113cf0c07
12,825
def filter_df_merge(cpu_df, filter_column=None): """ process cpu data frame, merge by 'model_name', 'batch_size' Args: cpu_df ([type]): [description] """ if not filter_column: raise Exception( "please assign filter_column for filter_df_merge function") df_lists = [] filter_column_lists = [] for k, v in cpu_df.groupby(filter_column, dropna=True): filter_column_lists.append(k) df_lists.append(v) final_output_df = df_lists[-1] # merge same model for i in range(len(df_lists) - 1): left_suffix = cpu_df[filter_column].unique()[0] right_suffix = df_lists[i][filter_column].unique()[0] print(left_suffix, right_suffix) if not pd.isnull(right_suffix): final_output_df = pd.merge( final_output_df, df_lists[i], how='left', left_on=['model_name', 'batch_size'], right_on=['model_name', 'batch_size'], suffixes=('', '_{0}_{1}'.format(filter_column, right_suffix))) # rename default df columns origin_column_names = list(cpu_df.columns.values) origin_column_names.remove(filter_column) suffix = final_output_df[filter_column].unique()[0] for name in origin_column_names: final_output_df.rename( columns={name: "{0}_{1}_{2}".format(name, filter_column, suffix)}, inplace=True) final_output_df.rename( columns={ filter_column: "{0}_{1}_{2}".format(filter_column, filter_column, suffix) }, inplace=True) final_output_df.sort_values( by=[ "model_name_{0}_{1}".format(filter_column, suffix), "batch_size_{0}_{1}".format(filter_column, suffix) ], inplace=True) return final_output_df
bc0e147ada18cbbb3e8450f8764d80be3ca32315
12,826
def MRP2Euler121(q): """ MRP2Euler121(Q) E = MRP2Euler121(Q) translates the MRP vector Q into the (1-2-1) euler angle vector E. """ return EP2Euler121(MRP2EP(q))
9cd8da8d38ad668b928ed896004611e85571be0d
12,827
def nlayer(depth=64): """Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = NLayer_D(depth=depth) return model
b8e57716e6b9576de9524cf730309885a79d0bfa
12,828
import typing def deserialize( value: ElementTree.Element, cipher: PSCryptoProvider, **kwargs: typing.Any, ) -> typing.Optional[typing.Union[bool, PSObject]]: """Deserialize CLIXML to a Python object. Deserializes a CLIXML XML Element from .NET to a Python object. Args: value: The CLIXML XML Element to deserialize to a Python object. cipher: The Runspace Pool cipher to use for SecureStrings. kwargs: Optional parameters to sent to the FromPSObjectForRemoting method on classes that use that. Returns: Optional[Union[bool, PSObject]]: The CLIXML as an XML Element object. """ return _Serializer(cipher, **kwargs).deserialize(value)
96b53a08c5c8273f29e108e020c40ab806c0949c
12,829
import requests def is_url_ok(url: str) -> bool: """Check if the given URL is down.""" try: r = requests.get(url) return r.status_code == 200 except Exception: return False
97e0ba4b609282ef0dc166f0f0407e4aacdf30b2
12,830
def calculate_pair_energy_np(coordinates, i_particle, box_length, cutoff): """ Calculate interaction energy of particle w/ its environment (all other particles in sys) Parameters ---------------- coordinates : list the coordinates for all particles in sys i_particle : int particle number for which to calculate energy cutoff : float simulation cutoff. beyond distances, interactions aren't calculated box length : float length of simultion box. assumes cubic boc Returns --------------- float pairwise interaction energy of ith particle w/all other particles in sys """ particle_distances = calculate_distance_np(coordinates[i_particle], coordinates[i_particle+1:], box_length) particle_distances_filtered = particle_distances[particle_distances < cutoff] return calculate_LJ_np(particle_distances_filtered).sum()
c626d1398312e42fd72d70c9b23d397fce5070fd
12,831
def lwhere(mappings, **cond): """Selects mappings containing all pairs in cond.""" return list(where(mappings, **cond))
ade55be28f75ae082833948306c43e4070525f7e
12,832
import re def get_number(message, limit=4): """ convert Chinese to pinyin and extract useful numbers attention: 1. only for integer 2. before apply this method, the message should be preprocessed input: message: the message you want to extract numbers from. limit: limit the length of number sequence """ words = pinyin.get_pinyin(message).split('-') numbers = [] tmp = '' count = 0 for w in words: if re.search(r'\W', w, re.A): for s in list(w): if s in special_char.keys(): count += 1 tmp += special_char[s] else: if count >= limit: numbers.append(tmp) count = 0 tmp = '' elif w in pinyin2number.keys(): count += 1 tmp += pinyin2number[w] else: if count >= limit: numbers.append(tmp) count = 0 tmp = '' if count >= limit: numbers.append(tmp) return numbers
ae1cc6886a4a2931baa61fcb201ffa67f70aecf6
12,833
import traceback import time import sqlite3 import os def get_db_connection(path, timeout=30, okay_to_create=False): """ Returns a properly configured SQLite database connection. :param path: path to DB :param timeout: timeout for connection :param okay_to_create: if True, create the DB if it doesn't exist :returns: DB connection object """ try: connect_time = time.time() conn = sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection, timeout=timeout) if path != ':memory:' and not okay_to_create: # attempt to detect and fail when connect creates the db file stat = os.stat(path) if stat.st_size == 0 and stat.st_ctime >= connect_time: os.unlink(path) raise DatabaseConnectionError(path, 'DB file created by connect?') conn.row_factory = sqlite3.Row conn.text_factory = str with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = NORMAL') cur.execute('PRAGMA count_changes = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = DELETE') conn.create_function('chexor', 3, chexor) except sqlite3.DatabaseError: raise DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout) return conn
ad15cb2352a6d92edfe71fd741494fcacb723a50
12,834
import sqlite3 def create_connection(db_file): """ Creates a database connection to the SQLite database specified by the db_file :param db_file: database file :return: Connection object or None """ conn = None try: conn = sqlite3.connect(db_file) except Error as e: print(e) return conn
37571690b5e970fc4344ee1d5d449b16cfc15896
12,835
def convex_hull(poly): """ ratio of the convex hull area to the area of the shape itself Altman's A_3 measure, from Neimi et al 1991. """ chull = to_shapely_geom(poly).convex_hull return poly.area / chull.area
0ed9b4803b87b4138cb5490b153376aae6e71e99
12,836
from typing import Dict from typing import List from typing import Tuple import torch def create_scifact_annotations( claims, corpus, tokenizer, class_to_id: Dict[str, int], neutral_class: str ) -> List[SciFactAnnotation]: """Create a SciFactAnnotation for each claim - evidence/cited document pair.""" def get_abstract_and_encoding( doc_id, ) -> Tuple[List[List[str]], List[torch.IntTensor]]: doc = [d for d in corpus if d["doc_id"] == int(doc_id)] assert len(doc) == 1 abstract = doc[0]["abstract"] encoding = [ torch.IntTensor(tokenizer.encode(sentence, add_special_tokens=False)) for sentence in abstract ] return abstract, encoding annotations = [] for c in claims: # Convert Interventions, Comparator, and Outcomes tokens to encodings intervention = torch.IntTensor(tokenizer.convert_tokens_to_ids(c["i_tokens"])) comparator = torch.IntTensor(tokenizer.convert_tokens_to_ids(c["c_tokens"])) outcome = torch.IntTensor(tokenizer.convert_tokens_to_ids(c["o_tokens"])) evidence = c["evidence"] # Handle claims with no evidence (label is NOT_ENOUGH_INFO) if not evidence: cited_doc_id = c["cited_doc_ids"][0] abstract, encoded_abstract = get_abstract_and_encoding(cited_doc_id) rationale_id = class_to_id[neutral_class] s_ann = SciFactAnnotation( claim_id=int(c["id"]), doc_id=int(cited_doc_id), sentences=abstract, encoded_sentences=encoded_abstract, rationale_sentences=[], i=intervention, c=comparator, o=outcome, rationale_class=neutral_class, rationale_id=rationale_id, ) annotations.append(s_ann) # Create a SciFact Annotation for each evidence document else: for doc_id, doc_rationales in evidence.items(): abstract, encoded_abstract = get_abstract_and_encoding(doc_id) rationale_class = doc_rationales[0]["label"] rationale_id = class_to_id[rationale_class] # extract all rationale sentence indices from the document rationale_sentences = [] for rationale in doc_rationales: rationale_sentences.extend(rationale["sentences"]) s_ann = SciFactAnnotation( claim_id=int(c["id"]), doc_id=int(doc_id), sentences=abstract, encoded_sentences=encoded_abstract, rationale_sentences=rationale_sentences, i=intervention, c=comparator, o=outcome, rationale_class=rationale_class, rationale_id=rationale_id, ) annotations.append(s_ann) return annotations
0a38b572bac113d6aa0a47e7628a5cc9fec85f16
12,837
import os def epsilon(tagfile): """Compute the total epsilon factor for each event Compute the flatfield correction from the P-flat and L-flat reference files (PFLTFILE and LFLTFILE respectively). Parameters ---------- tagfile, str input STIS time-tag data file Returns ------- epsilon, np.ndarray array of epsilons """ print("Calculating Epsilon") with fits.open(tagfile) as hdu: epsilon_out = np.ones(hdu[1].data['time'].shape) #-- Flatfield correction for ref_flat in ['PFLTFILE', 'LFLTFILE']: reffile = expand_refname(hdu[0].header[ref_flat]) print('FLATFIELD CORRECTION {}: {}'.format(ref_flat, reffile)) if not os.path.exists(reffile): print("{} not found, correction not performed".format(reffile)) return np.ones(len(hdu[1].data)) with fits.open(reffile) as image_hdu: image = image_hdu[1].data if not image.shape == (2048, 2048): x_factor = 2048 // image.shape[1] y_factor = 2048 // image.shape[0] print('Enlarging by {},{}'.format(x_factor, y_factor)) image = enlarge(image, x_factor, y_factor) #--indexing is 1 off if 'AXIS1' in hdu[1].data.names: epsilon_out *= map_image(image, hdu[1].data['AXIS1'] - 1, hdu[1].data['AXIS2'] - 1) else: epsilon_out *= map_image(image, hdu[1].data['XCORR'].astype(np.int32) - 1, hdu[1].data['YCORR'].astype(np.int32) - 1) return epsilon_out
a2170036c9047d0137fcd5a2564eeb404c88ad35
12,838
import math def sort_by_value(front, values): """ This function sorts the front list according to the values :param front: List of indexes of elements in the value :param values: List of values. Can be longer than the front list :return: """ copied_values = values.copy() # Copy so we can modify it sorted_list = [] while len(sorted_list) != len(front): min_value = copied_values.index(min(copied_values)) if min_value in front: sorted_list.append(min_value) copied_values[min_value] = math.inf return sorted_list
2d259ebbc0117f9aa043d78394b6423e596f176e
12,839
import os def save_exposure(fitstbl, frame, spectrograph, science_path, par, caliBrate, all_spec2d, all_specobjs): """ Save the outputs from extraction for a given exposure Args: frame (:obj:`int`): 0-indexed row in the metadata table with the frame that has been reduced. all_spec2d(:class:`pypeit.spec2dobj.AllSpec2DObj`): sci_dict (:obj:`dict`): Dictionary containing the primary outputs of extraction basename (:obj:`str`): The root name for the output file. Returns: None or SpecObjs: All of the objects saved to disk """ # TODO: Need some checks here that the exposure has been reduced? # Get the basename basename = fitstbl.construct_basename(frame) # Determine the headers row_fitstbl = fitstbl[frame] # Need raw file header information rawfile = fitstbl.frame_paths(frame) head2d = fits.getheader(rawfile, ext=spectrograph.primary_hdrext) # Check for the directory if not os.path.isdir(science_path): os.makedirs(science_path) subheader = spectrograph.subheader_for_spec(row_fitstbl, head2d) # 1D spectra if all_specobjs.nobj > 0: # Spectra outfile1d = os.path.join(science_path, 'spec1d_{:s}.fits'.format(basename)) all_specobjs.write_to_fits(subheader, outfile1d, update_det=par['rdx']['detnum'], slitspatnum=par['rdx']['slitspatnum']) # Info outfiletxt = os.path.join(science_path, 'spec1d_{:s}.txt'.format(basename)) all_specobjs.write_info(outfiletxt, spectrograph.pypeline) else: outfile1d = None # 2D spectra outfile2d = os.path.join(science_path, 'spec2d_{:s}.fits'.format(basename)) # Build header pri_hdr = all_spec2d.build_primary_hdr(head2d, spectrograph, redux_path=par['rdx']['redux_path'], master_key_dict=caliBrate.master_key_dict, master_dir=caliBrate.master_dir, subheader=subheader) # Write all_spec2d.write_to_fits(outfile2d, pri_hdr=pri_hdr, update_det=par['rdx']['detnum']) return outfile2d, outfile1d
2e4ee240fc5d2e5607966aa3d77d50777f300e81
12,840
def get_cell_area(self, indices=[]): """Return the area of the cells on the outer surface. Parameters ---------- self : MeshVTK a MeshVTK object indices : list list of the points to extract (optional) Returns ------- areas: ndarray Area of the cells """ surf = self.get_surf(indices) return surf.compute_cell_sizes(area=True)["Area"]
518416acfae67f1b6e1280d5fd903d311d57f4d8
12,841
def _to_dataarray(origins, sources, values): """ Converts grid_search inputs to DataArray """ origin_dims = ('origin_idx',) origin_coords = [np.arange(len(origins))] origin_shape = (len(origins),) source_dims = sources.dims source_coords = sources.coords source_shape = sources.shape return MTUQDataArray(**{ 'data': np.reshape(values, source_shape + origin_shape), 'coords': source_coords + origin_coords, 'dims': source_dims + origin_dims, })
d7ac153f4e872e55ab55ddb76dfcf994e4523443
12,842
from typing import Optional from typing import List from pathlib import Path import inspect import pprint import tempfile import warnings import json def package(metadata: Metadata, requirements: Optional[List[str]] = None, path: Optional[str] = None): """Packages the chatbot into a single archive for deployment. Performs some preliminary checks on the metadata. Creates a _package.zip file in the directory containing the file that contains the bot class unless a path is provided. :param metadata: :param requirements: :param path: :return: """ bot_file = Path(inspect.getfile(metadata.input_class)) print("Running verification checks on metadata.") metadata.verify(bot_file) metadata_dict = { 'name': metadata.name, 'imageUrl': metadata.image_url, 'color': metadata.color, 'developerUid': metadata.developer_uid, 'description': metadata.description, 'inputFile': bot_file.stem, 'inputClass': metadata.input_class.__name__, 'memory': metadata.memory, } print("Prepared metadata:") pprint.pprint(metadata_dict) print("Preparing temporary directory...") with tempfile.TemporaryDirectory() as temp_dir: # Copy files in bot directory def ignore(src, names): ignore_list = [] for name in names: # e.g .git folder is not wanted if name.startswith('.') or name.startswith('_package.zip'): warnings.warn( f"Ignoring files which start with '.': {name}.", RuntimeWarning ) ignore_list.append(name) if name == "main.py": raise RuntimeError("Bot root directory cannot contain a main.py file.") return ignore_list copytree(bot_file.parent, temp_dir, ignore=ignore) # Write metadata.json with (Path(temp_dir) / "metadata.json").open("w") as f: json.dump(metadata_dict, f) # Write requirements.txt if requirements: write_valid_requirements_file(Path(temp_dir) / "requirements.txt", requirements) # Create zip if path is None: path = bot_file.parent / "_package.zip" else: path = Path(path) with path.open("wb") as f: zipfile_from_folder(temp_dir, f) print(f"Created zip package at {path}.")
0fb974eef4c36bc5fa0e5366eb1bf4634585025a
12,843
def warp_grid(grid: tf.Tensor, theta: tf.Tensor) -> tf.Tensor: """ Perform transformation on the grid. - grid_padded[i,j,k,:] = [i j k 1] - grid_warped[b,i,j,k,p] = sum_over_q (grid_padded[i,j,k,q] * theta[b,q,p]) :param grid: shape = (dim1, dim2, dim3, 3), grid[i,j,k,:] = [i j k] :param theta: parameters of transformation, shape = (batch, 4, 3) :return: shape = (batch, dim1, dim2, dim3, 3) """ grid_size = grid.get_shape().as_list() # grid_padded[i,j,k,:] = [i j k 1], shape = (dim1, dim2, dim3, 4) grid_padded = tf.concat([grid, tf.ones(grid_size[:3] + [1])], axis=3) # grid_warped[b,i,j,k,p] = sum_over_q (grid_padded[i,j,k,q] * theta[b,q,p]) # shape = (batch, dim1, dim2, dim3, 3) grid_warped = tf.einsum("ijkq,bqp->bijkp", grid_padded, theta) return grid_warped
570c3acb6c57aff18b27deaa2ab5401e0fac23b6
12,844
async def makenotifyrole(guild): """Make the notify role in the given guild. :type guild: discord.Guild :rtype: None | discord.Role :param guild: Guild instance to create the role in. :return: The created role, possibly None if the creation failed. """ userrole = None try: # The bot should have the ping any role perm, so the role doesn't need to be mentionable userrole = await guild.create_role(reason="Role created for notification", name=notifyrolename) except discord.Forbidden: # May not have permission pass # This should leave userrole as none return userrole
1da1eea0a1d510abdf21bc532f6c1d4ab6d41140
12,845
def mape(forecast: Forecast, target: Target) -> np.ndarray: """ Calculate MAPE. This method accepts one or many timeseries. For multiple timeseries pass matrix (N, M) where N is number of timeseries and M is number of time steps. :param forecast: Predicted values. :param target: Target values. :return: Same shape array with sMAPE calculated for each time step of each timeseries. """ return 100 * np.abs(forecast - target) / target
47d68499aa351b70d466940d7f3722566cf67568
12,846
def reverse_weighted_graph(graph): """ Function for reverting direction of the graph (weights still the same) Args: graph: graph representation as Example: {1: {2: 1, 3: 5}, 2: {3: 2}, 4: {1: 2}} Returns: reversed graph Examples: >>> reverse_weighted_graph({1: {2: 1, 3: 5}, 2: {3: 2}, 4: {1: 2}}) defaultdict(<class 'dict'>, {2: {1: 1}, 3: {1: 5, 2: 2}, 1: {4: 2}}) """ rev_graph = defaultdict(dict) for node, neighborhood in graph.items(): for adj, weight in neighborhood.items(): rev_graph[adj].update(({node: weight})) return rev_graph
100e05bf3b5e937133321673531103c7abd94bdb
12,847
def clean_bin(): """permanently deletes entries - crud delete""" mongo.db.bin.remove() mongo.db.bin.insert({'_id': ObjectId()}) return redirect(url_for('get_bin', data_requested="teams"))
bb1cb957112826710572bb5930dd1683d4295997
12,848
def correct_by_threshold(img, threshold): """ correct the fMRI RSA results by threshold Parameters ---------- img : array A 3-D array of the fMRI RSA results. The shape of img should be [nx, ny, nz]. nx, ny, nz represent the shape of the fMRI-img. threshold : int The number of voxels used in correction. If threshold=n, only the similarity clusters consisting more than n voxels will be visualized. Returns ------- img : array A 3-D array of the fMRI RSA results after correction. The shape of img should be [nx, ny, nz]. nx, ny, nz represent the shape of the fMRI-img. """ if len(np.shape(img)) != 3: return "Invalid input" sx = np.shape(img)[0] sy = np.shape(img)[1] sz = np.shape(img)[2] nsmall = 1 while nsmall*nsmall*nsmall < threshold: nsmall = nsmall + 1 nlarge = nsmall + 2 for i in range(sx-nlarge+1): for j in range(sy-nlarge+1): for k in range(sz-nlarge+1): listlarge = list(np.reshape(img[i:i+nlarge, j:j+nlarge, k:k+nlarge], [nlarge*nlarge*nlarge])) if listlarge.count(0) < nlarge*nlarge*nlarge: index1 = 0 for l in range(nlarge): for m in range(nlarge): if img[i + l, j + m, k] == 0: index1 = index1 + 1 if img[i + l, j + m, k + nlarge - 1] == 0: index1 = index1 + 1 for l in range(nlarge-1): for m in range(nlarge-2): if img[i + l, j, k + m] == 0: index1 = index1 + 1 if img[i, j + l + 1, k + m] == 0: index1 = index1 + 1 if img[i + nlarge - 1, j + l, k + m] == 0: index1 = index1 + 1 if img[i + l + 1, j + nlarge - 1, k + m] == 0: index1 = index1 + 1 nex = nlarge * nlarge * nlarge - nsmall * nsmall * nsmall if index1 == nex: unit = img[i+1:i+1+nsmall, j+1:j+1+nsmall, k+1:k+1+nsmall] unit = np.reshape(unit, [nsmall*nsmall*nsmall]) list_internal = list(unit) index2 = nsmall*nsmall*nsmall-list_internal.count(0) if index2 < threshold: img[i+1:i+1+nsmall, j] for l in range(nsmall): for m in range(nsmall): for p in range(nsmall): img[i+1:i+1+nsmall, j+1:j+1+nsmall, k+1:k+1+nsmall] = np.zeros([nsmall, nsmall, nsmall]) print("finished correction") return img
67750aba6d03d82d9e41d2d53a82550e5a68a3e2
12,849
def config_date(dut, date): """ :param dut: :param date: :return: """ st.log("config date") command = "date --set='{}'".format(date) st.config(dut, command) return True
055db1a0ddb4d640d154aae4dec29e3845d7dfb8
12,850
def read_dicom(): """Read in DICOM series""" dicomPath = join(expanduser('~'), 'Documents', 'SlicerDICOMDatabase', 'TCIALocal', '0', 'images', '') reader = sitk.ImageSeriesReader() seriesIDread = reader.GetGDCMSeriesIDs(dicomPath)[1] dicomFilenames = reader.GetGDCMSeriesFileNames(dicomPath, seriesIDread) reader.SetFileNames(dicomFilenames) return reader.Execute()
64c4aae3c1cc0e31d6db46e741a3ecc52be580cc
12,851
import codecs import os def read_file(*file_paths): """Read text file.""" with codecs.open(os.path.join(ROOT_DIR, *file_paths), 'r') as fp: return fp.read()
485c69428bbff5fc0aaf3c1a9bafe2b6863a7bcd
12,852
import os import toml def load_config(): """Load the config file and validate contents.""" filename = os.path.join(user_config_dir("timetagger_cli"), config_fname) if not os.path.isfile(filename): raise RuntimeError("Config not set, run 'timetagger setup' first.") with open(filename, "rb") as f: config = toml.loads(f.read().decode()) if "api_url" not in config: raise RuntimeError("No api_url set in config. Run 'timetagger setup' to fix.") if not config["api_url"].startswith(("http://", "https://")): raise RuntimeError( "The api_url must start with 'http://' or 'https://'. Run 'timetagger setup' to fix." ) if "api_token" not in config: raise RuntimeError("No api_token set in config. Run 'timetagger setup' to fix.") return config
7440fb2a96fe360bcf99bef0085ecfa8a07f2aaa
12,853
import subprocess def zfs_upgrade_list(supported: bool = False) -> str: """ zfs upgrade [-v] Displays a list of file systems that are not the most recent version. -v Displays ZFS filesystem versions supported by the current software. The current ZFS filesystem version and all previous supported versions are displayed, along with an explanation of the features provided with each version. """ call_args = [] if supported: call_args.append("-v") command = _Command("upgrade", call_args) try: return command.run() except subprocess.CalledProcessError as e: raise RuntimeError(f"Failed to list upgradeable filesystems\n{e.output}\n")
a9665900425a9d6c9398762999e8c76193d9ee85
12,854
def L_model_backward(AL, Y, caches): """ 完成L层神经网络模型后向传播计算 Arguments: AL -- 模型输出值 Y -- 真实值 caches -- 包含Relu和Sigmoid激活函数的linear_activation_forward()中每一个cache Returns: grads -- 包含所有梯度的字典 grads["dA" + str(l)] = ... grads["dW" + str(l)] = ... grads["db" + str(l)] = ... """ grads = {} L = len(caches) # the number of layers m = AL.shape[1] Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL # 初始化后向传播计算 dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # L层神经网络梯度. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"] current_cache = caches[L - 1] grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = \ linear_activation_backward(dAL, current_cache, activation="sigmoid") for l in reversed(range(L - 1)): # 第L层: (RELU -> LINEAR) 梯度 current_cache = caches[l] dA_prev_temp, dW_temp, db_temp = \ linear_activation_backward(grads["dA" + str(l + 2)], current_cache, activation="relu") grads["dA" + str(l + 1)] = dA_prev_temp grads["dW" + str(l + 1)] = dW_temp grads["db" + str(l + 1)] = db_temp return grads
ef296179d51e8c4b8be474414f65f812b6f8ffb0
12,855
from sys import path def findbps(reads, output, bowtie_options, motif, length, threshold, strand): """ Input: reads: str of name of file where single-end, stranded RNA-seq reads in fastq format are located output:str of desired basename of output files bowtie_options: str of bowtie options you wish to be used for alignment of reads after splitting. See the bowtie manual. Recommend "-y -p 2 -v 0 -X 5000 -m 1 <index>" motif: list of dictionaries representing 5'ss motif position weight matrix. Each dictionary has a key for each nucleotide, with a float of the probability as keys. length:int of the lowest acceptable number of bases used to align a fragment of a read. threshold: float of the lowest acceptable probability that a sequence would be sampled from the given martrix in order to attempt mapping. Recommend 0.0 unless many false positives strand:str either 'first' if reads are first-stranded or 'second' if reads are second-stranded Output: output + '.bed': A file in paired-end bed format with information about the reads with a valid alignment. output + '_no_alignment.fastq': Reads with no valid alignment in the paired-end tab-delimited format described in the bowtie manual split as they were attempted to be aligned. """ #gets the name of the directory of this file directory = path.dirname(path.realpath(__file__)) #make these arguments into strings so they can be passed to fp_checker.py motif = '"' + dumps(motif) + '"' length = str(length) threshold = str(threshold) #this process splits each read at the most likely 5'SS based on the # given weight matrix and sends them to bowtie to be mapped # see fp_checker.py for further details fp_checker = Popen('python ' + directory + '/fp_checker.py ' + motif +' '+ length +' '+ threshold +' '+ strand, stdin = open(reads,'r'), stdout = PIPE, shell = True) #this process maps each split read to the given genome bowtie = Popen('bowtie --ff ' + bowtie_options + ' --12 - --un ' + output+'_no_alignment.fastq', stdin = fp_checker.stdout, stdout = PIPE, shell = True) fp_checker.stdout.close() #this process converts the bowtie output into a bed file # see make_bed.py for further details make_bed = Popen('python ' + directory + '/make_bed.py', stdin = bowtie.stdout, stdout = open(output + ".bed",'w'), shell = True) bowtie.stdout.close() make_bed.wait() return 0
eee9d313359b9ce00d67e27682038e9ec0c2f630
12,856
def Cnot(idx0: int = 0, idx1: int = 1) -> Operator: """Controlled Not between idx0 and idx1, controlled by |1>.""" return ControlledU(idx0, idx1, PauliX())
a087aa4d7fb22343523a8b6114a7b50eea971e21
12,857
def init_sql_references(conn): """ Utility function to get references from SQL. The returned objects conveniently identify users based on kb_name or user hashkey """ # get kb_names to kb_id kb_ref = pds.read_sql("""SELECT id, kb_name, directory_id FROM dbo.kb_raw""", conn) get_kb_dir_id = kb_ref.loc[:,['kb_name', 'directory_id']].set_index('kb_name').to_dict()['directory_id'] get_kb_raw_id = kb_ref.loc[:,['kb_name', 'id']].set_index('kb_name').to_dict()['id'] # get kb permissions permissions = pds.read_sql("SELECT hashkey, kb_name, user_id FROM dbo.users \ LEFT JOIN dbo.kb_directory ON dbo.users.id = dbo.kb_directory.user_id \ LEFT JOIN kb_raw ON dbo.kb_directory.id = dbo.kb_raw.directory_id \ ", conn) permissions = pd.DataFrame(np.array(permissions), columns = ['hashkey', 'kb_name', 'user_id']).set_index('hashkey') return get_kb_dir_id, get_kb_raw_id, permissions
3f9874632d50cd8a483d75573cc1d63561f253d2
12,858
def inoptimal_truncation_square_root(A, B, C, k, check_stability=False): """Use scipy to perform balanced truncation Use scipy to perform balanced truncation on a linear state-space system. This method is the natural application of scipy and inoptimal performance wise compared to `truncation_square_root_trans_matrix` See also ----- truncation_square_root_trans_matrix """ if check_stability and not isStable(A): raise ValueError("This doesn't seem to be a stable system!") AH = A.transpose().conj() P = linalg.solve_lyapunov(A, -np.dot(B, B.transpose().conj())) Q = linalg.solve_lyapunov(AH, -np.dot(C.transpose().conj(), C)) U = linalg.cholesky(P).transpose().conj() L = linalg.cholesky(Q) W, Sigma, V = linalg.svd(np.dot(U.transpose().conj(), L), full_matrices=False, overwrite_a=True, check_finite=False) W1 = W[:, :k] Sigma1 = Sigma[:k] V1 = V[:, :k] Sigma1_pow_neg_half = np.diag(Sigma1**-.5) T1 = np.dot(Sigma1_pow_neg_half, np.dot(V1.transpose().conj(), L.transpose().conj())) Ti1 = np.dot(np.dot(U, W1), Sigma1_pow_neg_half) return k, np.dot(T1, np.dot(A, Ti1)), np.dot(T1, B), np.dot(C, Ti1), \ Sigma, Ti1, T1
3c4fa1ac73f22f5e07d49314e1cf3d3b022349e8
12,859
def _tessellate_bed(chrom: str, chromStart: int, chromEnd: int, window_size: int) -> pd.DataFrame: """Return tessellated pandas dataframe splitting given window. Parameters ----------------------- chrom: str, Chromosome containing given window. chromStart: int, Position where the window starts. chromEnd: int, Position where the window ends. window_size: int Target window size. Returns ----------------------- Returns a pandas DataFrame in bed-like format containing the tessellated windows. """ return pd.DataFrame([ { "chrom": chrom, "chromStart": chromStart + window_size*i, "chromEnd": chromStart + window_size*(i+1), } for i in range((chromEnd - chromStart)//window_size) ])
706b031069dd334bc6f364e077398ced56b152a8
12,860
import os import pathlib def _replace_variables(dictionary): """Replace environment variables in a nested dict.""" for path in _walk(dictionary): value = path.pop() if isinstance(value, str) and _ispath(value): value = os.path.expandvars(value) value = pathlib.Path(value) last_key = path.pop() sub_dict = dictionary for key in path: sub_dict = sub_dict[key] sub_dict[last_key] = value return dictionary
0e753dad54a14d931a1b8075f5e14c0670245090
12,861
def compute_locksroot(locks: PendingLocksState) -> Locksroot: """Compute the hash representing all pending locks The hash is submitted in TokenNetwork.settleChannel() call. """ return Locksroot(keccak(b"".join(locks.locks)))
05c4996a9cc837939c662ef419e36421cb00033d
12,862
from typing import Union from typing import List def flatten(text: Union[str, List[str]], separator: str = None) -> str: """ Flattens the text item to a string. If the input is a string, that same string is returned. Otherwise, the text is joined together with the separator. Parameters ---------- text : Union[str, List[str]] The text to flatten separator : str, default=None The separator to join the list with. If `None`, the separator will be " " Returns ------- str The flattened text """ separator = separator or " " if isinstance(text, list): return separator.join(text) return text
3980e0d0d14ac5764c4c5844ab3a943d1971d0ad
12,863
def convert_byte32_arr_to_hex_arr(byte32_arr): """ This function takes in an array of byte32 strings and returns an array of hex strings. Parameters: byte32_arr Strings to convert from a byte32 array to a hex array """ hex_ids = [] for byte32_str in byte32_arr: hex_ids = hex_ids + [byte32_str.hex()] return hex_ids
9185c1e98b6eb10a42714e1fc53ebaed88997a82
12,864
import sys import tqdm def process_ring(cat, ref, pairs, ringpairs, area, radius, sigma, sigma_init=None, gamma=None, niter=10, nextr=100, mid=True, printprogress=True, printerror=False): """ Estimate omega with robust algorithm in rings. Obtain optimal estimate from best ring. Internal function to process pairs that are already split into rings. :param cat: Input catalog dataframe with (x,y,z) coordinates. :param ref: Reference catalog dataframe with (x,y,z) coordinates. :param area: Area of the footprint, units in steradians. :param radius: Separation radius threshold. :param sigma: True value of sigma, the astrometric uncertainty of the catalog. :param sigma_init: If not None, assign a large initial value for sigma. :param gamma: Fraction of good matches among all pairs. If None, will be computed in estimation. :param niter: Min number of iterations for the convergence. :param nextr: Max number of additional iterations for the convergence. :param mid: Boolean value, indicate if reference as midpoints of the two catalogs :param prinprogress: Boolean value, if true shows progress bar. :param printerror: Boolean value, indicate if track error. :type cat: pandas.DataFrame :type ref: pandas.DataFrame :type area: float :type radius: float :type sigma: float :type sigma_init: None or float :type gamma: None or float :type niter: int :type nextr: int :type mid: bool :type printprogress: bool :type printerror: bool :returns: (bestomega, bestpairs, bestwt) omega: 3D transformation vector estimated in the optimal ring by robust algorithm, bestpairs: pairs in the optimal ring, bestwt: robust weights for bestpairs. """ sigma_init = sigma_init or 25 * sigma # heuristic estimate for convergence parameter nrings = len(ringpairs) if printprogress: print(f"Split {pairs.shape[0]} pairs into {nrings} overlapping rings") print(f"process_ring: sigma {sigma} sigma_init {sigma_init}") # gamma = gamma or min(cat.shape[0],ref.shape[0]) / pairs.shape[0] if not gamma: # count just sources actually included in pairs # this makes a difference when search radius is small and many sources don't match n1 = (np.bincount(pairs[:,0])!=0).sum() n2 = (np.bincount(pairs[:,1])!=0).sum() gamma = min(n1,n2) / pairs.shape[0] # increase gamma because expected match is higher in the correct ring #gfac = pairs.shape[0] / np.mean([x.shape[0] for x in ringpairs]) #gamma = gamma * gfac #if printprogress and gfac != 1: # print(f"Increased gamma by factor {gfac:.2f} to {gamma}") # Initial best sum(weight)=0 bestwtsum = 0.0 bestomega = None bestring = nrings if printprogress: # print progress bar (but disable on non-TTY output) # disable = None # print progress bar disable = False else: # do not print progress bar disable = True sys.stdout.flush() loop = tqdm(total=nrings, position=0, leave=False, disable=disable) # loop over all pairs to find optimal omega estimate for iring in range(nrings): rpairs = ringpairs[iring] # paired catalog and reference in ring r,c = getRC(cat, ref, rpairs, mid) # estimate omega using robust algorithm try: omega, w = rob_est(r, c, sigma, gamma, area, sigma_init=sigma_init, niter=niter, nextr=nextr, printerror=printerror, verbose=printprogress>1) except SingularMatrixError as e: if printerror: print(e) print('continuing to next ring') continue # Sum of weights is the number of good pairs wtsum = w.sum() if wtsum > bestwtsum: bestring = iring bestpairs = rpairs bestomega = omega bestwtsum = wtsum bestwt = w if not printerror: loop.set_description("Computing...".format(iring)) loop.update(1) loop.close() if bestomega is None: if printerror: print("process_ring: no solution found") return np.zeros(3), np.zeros((0,2),dtype=int), np.zeros(0,dtype=float) return bestomega, bestpairs, bestwt
871488f467825d5226a19dc6840e9c145f7754d8
12,865
from typing import Union from typing import Tuple def backtest_loop( start_time: Union[pd.Timestamp, str], end_time: Union[pd.Timestamp, str], trade_strategy: BaseStrategy, trade_executor: BaseExecutor, ) -> Tuple[PortfolioMetrics, Indicator]: """backtest function for the interaction of the outermost strategy and executor in the nested decision execution please refer to the docs of `collect_data_loop` Returns ------- portfolio_metrics: PortfolioMetrics it records the trading portfolio_metrics information indicator: Indicator it computes the trading indicator """ return_value = {} for _decision in collect_data_loop(start_time, end_time, trade_strategy, trade_executor, return_value): pass return return_value.get("portfolio_metrics"), return_value.get("indicator")
74620671f0e37b7439d15d76e0e3e92b8984a608
12,866
import functools def failOnNonTransient(func): """Only allow function execution when immutable is transient.""" @functools.wraps(func) def wrapper(inst, *args, **kwargs): # make the call fail if the object is not transient if inst.__im_state__ != interfaces.IM_STATE_TRANSIENT: raise AttributeError('Cannot update locked immutable object.') return func(inst, *args, **kwargs) return wrapper
46b94385084a6b7dae9149cfe8864b94df3ed5ea
12,867
def text_has_emoji(text): """判断文本中是否包含emoji""" for character in text: if character in emoji.UNICODE_EMOJI: return True return False
8fd0cfb2aed42a6b149f29ffea5d65bc901c5353
12,868
def rod_faces(n1, n2, xform, dim1, dim2): # validated """ defines points in a circle with triangle based end caps """ # 4,8,12,16,... becomes 5,9,13,17,... thetas = np.radians(np.linspace(0., 360., 17)) ntheta = len(thetas) nfaces = 0 all_faces = [] points_list = [] x = np.zeros(ntheta) for nid, dim in [(n1, dim1), (n2, dim2)]: radius, = dim y = radius * np.cos(thetas) z = radius * np.sin(thetas) xyz = np.vstack([x, y, z]).T assert xyz.shape == (ntheta, 3), xyz.shape pointsi = np.dot(xyz, xform) + nid points_list.append(pointsi) # the tri_cap is made from points that aren't defined yet # (the n1/n2 end points) tris = tri_cap(ntheta) # we need to use the tolist because we're going to # combine quads and tris (the elements have different # lengths) all_faces += (nfaces + tris).tolist() nfaces += tris.shape[0] # the main cylinder uses the points defined independent # of the points n1/n2 faces = elements_from_quad(2, ntheta) all_faces += faces.tolist() # used by the tri_caps points_list.append(n1) points_list.append(n2) points = np.vstack(points_list) return all_faces, points, points.shape[0]
306fdde57121f497d6ef263c2caea187bfc7af10
12,869
def xfork(): """ xfork() is similar to fork but doesn't throw an OSError exception. Returns -1 on error, otherwise it returns the same value as fork() does. """ try: ret = fork() except OSError: ret = -1 return ret
1bc0c16a2d71e4e1607d45af485a7c2999fbe631
12,870
import re def cigar_segment_bounds(cigar, start): """ Determine the start and end positions on a chromosome of a non-no-matching part of an RNA-seq read based on a read's cigar string. cigar string meaning: http://bioinformatics.cvr.ac.uk/blog/tag/cigar-string/ Example: '50M25N50M' with start = 100 -> [100, 149, 175, 224]. Note that start and end integers are inclusive, i.e. all positions at or between 100 and 149 and at or between 175 and 224 are covered by reads. :param cigar: str a read's cigar string, e.g. "49M165N51M" :param start: int a read's start position on a chromosome :return: list of integers representing cigar match start, end points, in order of matching subsequences """ # if CIGAR string is a single full match (i.e. "<positive integer>M") # extract length of the match, return match segment. full_match = re.match(r'(\d+)M$', cigar) if full_match is not None: extension = int(cigar[:(full_match.span()[-1] - 1)]) - 1 return [start, start + extension] # break up cigar string into list of 2-tuples (letter indicative of match/no match, run length integer). cigar_split = [(v, int(k)) for k, v in re.findall(r'(\d+)([A-Z]?)', cigar)] # initialize parse params. # Allow for "hard clipping" where aligned read can start with non-matching region (https://bit.ly/2K6TJ5Y) augment = False any_match = False # output storage. match_idx_list = list() for idx in range(len(cigar_split)): segment = cigar_split[idx] if segment[0] == 'M': any_match = True extension = segment[1] - 1 # end of a match run is inclusive. augment = True match_idx_list += [start, start + extension] # append a match run to output. else: if augment: extension = segment[1] + 1 augment = False else: extension = segment[1] start += extension # if no matching regions found, throw error. if not any_match: raise ValueError('CIGAR string {0} has no matching region.'.format(cigar)) return match_idx_list
c870dfb9b11e2fd1df9fb347528252f114b8d70f
12,871
def augument(data_dir, img_path, steering_angle, range_x=100, range_y=10): """ Generate an augumented image and adjust steering angle. (The steering angle is associated with the image) """ image, steering_angle = choose_image(data_dir, img_path, steering_angle) image, steering_angle = random_flip(image, steering_angle) image, steering_angle = random_translate(image, steering_angle, range_x, range_y) image = random_shadow(image) image = random_brightness(image) return image, steering_angle
1eafb5ea4ed024e6bab4008155c8364e8a480b8f
12,872
def ldns_buffer_limit(*args): """LDNS buffer.""" return _ldns.ldns_buffer_limit(*args)
d7a4c3c50ffd6db98d78a6a092c256bd1e0e3c11
12,873
import os import sqlite3 import sys def language(): """ Loads languages. :return: None """ if os.path.isfile(omw_db): omw_connection = sqlite3.connect(omw_db) cursor = omw_connection.cursor() known = dict() cursor.execute("""SELECT id, iso639 from lang""") for (lid, l3) in cursor: known[l3] = lid for l3 in "eng cmn".split(): # for l3 in "eng als arb bul cmn dan ell fas fin fra heb hrv ita jpn cat eus glg spa ind zsm nno nob pol por slv swe tha aar afr aka amh asm aze bam bel ben bod bos bre ces cor cym deu dzo epo est ewe fao ful gla gle glv guj hau hin hun hye ibo iii ina isl kal kan kat kaz khm kik kin kir kor lao lav lin lit lub lug mal mar mkd mlg mlt mon mya nbl nde nep nld oci ori orm pan pus roh ron run rus sag sin slk sme sna som sot srp ssw swa tam tel tgk tir ton tsn tso tur ukr urd uzb ven vie xho yor zul ang arz ast chr fry fur grc hat hbs ido kur lat ltg ltz mri nan nav rup san scn srd tat tgl tuk vol yid yue".split(): if l3 in known: ### already in continue l = languages.get(part3=l3) if l.part1: ### use the two letter code if it exists bcp47 = l.part1 else: bcp47 = l3 # INSERT LANG DATA (CODES AND NAMES) u = 'omw' cursor.execute("""INSERT INTO lang (bcp47, iso639, u) VALUES (?,?,?)""", (bcp47, l3, u)) cursor.execute("""SELECT MAX(id) FROM lang""") lang_id = cursor.fetchone()[0] cursor.execute("""INSERT INTO lang_name (lang_id, in_lang_id, name, u) VALUES (?,?,?,?)""", (lang_id, known['eng'], l.name, u)) omw_connection.commit() omw_connection.close() sys.stdout.write('Loading languages finished\n') else: sys.stdout.write('Unable to find database (%s) file\n' % omw_db) return None
486971b6357b472833a3b71e6935d986dee9629e
12,874
import os import ntpath import urllib def test_data_folder(): """ This fixture returns path to folder with shared test resources among all tests """ data_dir = os.path.join(script_dir, "testdata") if not os.path.exists(data_dir): os.mkdir(data_dir) files_to_download = ["https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/messi5.jpg", "https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/basketball1.png", "https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/Megamind.avi", "https://github.com/ARM-software/ML-zoo/raw/master/models/object_detection/ssd_mobilenet_v1/tflite_uint8/ssd_mobilenet_v1.tflite", "https://git.mlplatform.org/ml/ethos-u/ml-embedded-evaluation-kit.git/plain/resources/kws/samples/yes.wav", "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-speech-sdk/master/sampledata/audiofiles/myVoiceIsMyPassportVerifyMe04.wav" ] for file in files_to_download: path, filename = ntpath.split(file) file_path = os.path.join(data_dir, filename) if not os.path.exists(file_path): print("\nDownloading test file: " + file_path + "\n") urllib.request.urlretrieve(file, file_path) return data_dir
df590eecb07fe15ed88635e0b307739868b082d5
12,875
def _call_godot(environment, source, arguments, target): """Runs the Godot executable with the specified command line arguments @param environment Environment in which the Godot executable will be run @param source Input files that will be involved @param arguments Arguments that will be passed to the Godot executable @param target Output files that should result from the call""" if 'GODOT_EXECUTABLE' in environment: godot_excutable = environment['GODOT_EXECUTABLE'] else: if 'GODOT_VERSION' in environment: godot_version = environment['GODOT_VERSION'] else: godot_version = _default_godot_version godot_executable = _find_godot_executable(godot_version) #environment['GODOT_EXECUTABLE'] = godot_executable #if source is None: # source = godot_executable return environment.Command( target, source, '"' + godot_executable + '" ' + arguments )
4320a6af9d2d1f8e8a06494df201c9c4a6f2416b
12,876
def random_seeded(func): """ Decorator that uses the `random_seed` parameter from functions to seed the RNG. """ @wraps(func) def wrapper(*args, random_seed: int = None, **kwargs): _RNG.seed(random_seed) return func(*args, **kwargs) return wrapper
1bf572625092680fb996b34469a9a990627acd59
12,877
def getCRS(station_name=None, crs=None, autoCreate=True): """ Method to get CRS code for the give station name. This method may not scale nicely for a production environment. Use a proper DB instead. @param station_name: Some characters for the station name. @param crs: CRS code if known @param autoCreate: Boolean to indicate if the sqlite DB should be created if not exist. """ # Create the SQLite DB of CRS if not found already. This can be turned off # by passing autoCreate = False. if not os.path.exists(CRS_SQLITE_DB) and autoCreate: print "Attempting to create CRS DB for first run ..." recreateDB() fetchFromUrl() conn = sqlite3.connect(CRS_SQLITE_DB) c = conn.cursor() if station_name: c.execute('SELECT * from crstab where station_name like "%%%s%%"' %station_name.lower()) elif crs: c.execute('SELECT * from crstab where crs = "%s"' %crs.lower()) else: return None ret = c.fetchall() c.close() conn.close() return ret
e44cda3f0299cc5cc57c2574debe011809e716e6
12,878
def _initialize_object_from_dict(object_dict, parent=None): """Initialize a python object from dict.""" provider = object_dict['provider'] args = object_dict.get('args') or [] kwargs = object_dict.get('kwargs') or {} obj = _get_object_by_referance(provider) if parent is not None: kwargs.update({'parent': parent}) return obj(*args, **kwargs)
a6fb19c0db1e839514d19df50e223bf98a2241f8
12,879
def from_hdf(in_path, index=None, keypoints=True, descriptors=True): """ For a given node, load the keypoints and descriptors from a hdf5 file. The keypoints and descriptors kwargs support returning only keypoints or descriptors. The index kwarg supports returning a subset of the data. Parameters ---------- in_path : str handle to the file key : str An optional path into the HDF5. For example key='image_name', will search /image_name/descriptors for the descriptors. index : iterable an h5py accepted indexer to pull only a subset of the keypoints off disk. Default is None to pull all keypoints. keypoints : bool if True (default) return the keypoints descriptors : bool if True (default) return the descriptors Returns ------- keypoints : DataFrame A pandas dataframe of keypoints. descriptors : ndarray A numpy array of descriptors """ if isinstance(in_path, str): hdf = io_hdf.HDFDataset(in_path, mode='r') else: hdf = in_path outd = '/descriptors' outk = '/keypoints' if index is not None: index=np.asarray(index) # The indices into HDF have to be sorted lists. When indices get passed in # they are frequently ordered, so this pulls the data using the sorted # index and then reorders the data. i = np.argsort(index) ii = np.argsort(i) # Is is important to use sorted() so that an in-place sort is NOT used. if descriptors: desc = hdf[outd][index[i].tolist()] desc = desc[ii] if keypoints: raw_kps = hdf[outk][index[i].tolist()] raw_kps = raw_kps[ii] else: # Unlike numpy hdf does not handle NoneType as a proxy for `:` if descriptors: desc = hdf[outd][:] if keypoints: raw_kps = hdf[outk][:] if keypoints: index = raw_kps['index'] clean_kps = utils.remove_field_name(raw_kps, 'index') columns = clean_kps.dtype.names allkps = pd.DataFrame(data=clean_kps, columns=columns, index=index) if isinstance(in_path, str): hdf = None if keypoints and descriptors: return allkps, desc elif keypoints: return allkps else: return desc
2ec00092e04dcd41c7a263781b8a5f7e8d888e5f
12,880
def main(cfg): """Solve the CVRP problem.""" # Instantiate the data problem. data = create_data_model(cfg) print(data) if len(data['distance_matrix'])==0: result = { "solution":False, "error-message":"unable to calculate distance matrix" } return result # Create the routing index manager. manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']), data['num_vehicles'], data['depot']) # Create Routing Model. routing = pywrapcp.RoutingModel(manager) # Create and register a transit callback. def distance_callback(from_index, to_index): """Returns the distance between the two nodes.""" # Convert from routing variable Index to distance matrix NodeIndex. from_node = manager.IndexToNode(from_index) to_node = manager.IndexToNode(to_index) return data['distance_matrix'][from_node][to_node] transit_callback_index = routing.RegisterTransitCallback(distance_callback) # Define cost of each arc. routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index) def demand_callback(from_index): """Returns the demand of the node.""" # Convert from routing variable Index to demands NodeIndex. from_node = manager.IndexToNode(from_index) return data['demands'][from_node] # Add Distance constraint. dimension_name = 'Distance' routing.AddDimension( transit_callback_index, 0, # no slack 7200, # vehicle maximum travel distance True, # start cumul to zero dimension_name) demand_callback_index = routing.RegisterUnaryTransitCallback(demand_callback) routing.AddDimensionWithVehicleCapacity( demand_callback_index, 0, # null capacity slack data['vehicle_capacities'], # vehicle maximum capacities True, # start cumul to zero 'Capacity') # Setting first solution heuristic. search_parameters = pywrapcp.DefaultRoutingSearchParameters() search_parameters.first_solution_strategy = ( routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC) # Solve the problem. solution = routing.SolveWithParameters(search_parameters) # Return solution dictionary if solution: return get_solution(data, manager, routing, solution) else: result = { "solution":False } return result
a33c1df5462e9af2eb508b7e2803dfd371609656
12,881
def get_corners(p, fov): """Get corners relative to DSS coordinates. xy coords anti-clockwise""" c = np.array([[0, 0], fov[::-1]]) # lower left, upper right xy # corners = np.c_[c[0], c[:, 1], c[1], c[::-1, 0]].T # / clockwise yx corners = np.c_[c[0], c[::-1, 0], c[1], c[:, 1]].T # / clockwise xy corners = trans.rigid(corners, p) return corners
e66e4dfd8eb26dc2caacd2e59c64de5d85bc7d10
12,882
from typing import Dict from typing import Any from typing import Tuple from typing import List def mixnet_m( num_classes: int = 1000, multiplier: float = 1.0, divisor: int = 8, min_depth: int = None, dataset: str = "IMAGENET", ) -> Dict[str, Any]: """Build MixNet-M.""" if dataset == "IMAGENET": medium: Tuple[List[Any], ...] = ( [24, 24, 1, 1, 1, None, False], [24, 32, 3, 2, 6, None, False], [32, 32, 1, 1, 3, None, False], [32, 40, 4, 2, 6, 0.5, True], [40, 40, 2, 1, 6, 0.5, True], [40, 40, 2, 1, 6, 0.5, True], [40, 40, 2, 1, 6, 0.5, True], [40, 80, 3, 2, 6, 0.25, True], [80, 80, 4, 1, 6, 0.25, True], [80, 80, 4, 1, 6, 0.25, True], [80, 80, 4, 1, 6, 0.25, True], [80, 120, 1, 1, 6, 0.5, True], [120, 120, 4, 1, 3, 0.5, True], [120, 120, 4, 1, 3, 0.5, True], [120, 120, 4, 1, 3, 0.5, True], [120, 200, 4, 2, 6, 0.5, True], [200, 200, 4, 1, 6, 0.5, True], [200, 200, 4, 1, 6, 0.5, True], [200, 200, 4, 1, 6, 0.5, True], ) stem = round_filters(24, multiplier) stem_stride = 2 last_out_channels = round_filters(200, multiplier) head = round_filters(1536, multiplier=1.0) elif dataset == "CIFAR100": medium = ( [24, 24, 1, 1, 1, None, False], [24, 32, 3, 1, 6, None, False], [32, 32, 1, 1, 3, None, False], [32, 40, 4, 2, 6, 0.5, True], [40, 40, 2, 1, 6, 0.5, True], [40, 40, 2, 1, 6, 0.5, True], [40, 40, 2, 1, 6, 0.5, True], [40, 80, 3, 2, 6, 0.25, True], [80, 80, 4, 1, 6, 0.25, True], [80, 80, 4, 1, 6, 0.25, True], [80, 80, 4, 1, 6, 0.25, True], [80, 120, 1, 1, 6, 0.5, True], [120, 120, 4, 1, 3, 0.5, True], [120, 120, 4, 1, 3, 0.5, True], [120, 120, 4, 1, 3, 0.5, True], [120, 200, 4, 2, 6, 0.5, True], [200, 200, 4, 1, 6, 0.5, True], [200, 200, 4, 1, 6, 0.5, True], [200, 200, 4, 1, 6, 0.5, True], ) stem = round_filters(24, multiplier) stem_stride = 1 last_out_channels = round_filters(200, multiplier) head = round_filters(1536, multiplier=1.0) else: raise NotImplementedError for line in medium: line[0] = round_filters(line[0], multiplier) line[1] = round_filters(line[1], multiplier) return dict( stem=stem, stem_stride=stem_stride, head=head, last_out_channels=last_out_channels, block_args=medium, dropout=0.25, num_classes=num_classes, )
839852df3bc535613093c752addc6aed64e61e5b
12,883
import functools import asyncio def no_block(func): """Turns a blocking function into a non-blocking coroutine function.""" @functools.wraps(func) async def no_blocking_handler(*args, **kwargs): partial = functools.partial(func, *args, **kwargs) return await asyncio.get_event_loop().run_in_executor(None, partial) return no_blocking_handler
5681fe7275a89c522384b28f9473fded8bba846b
12,884
def wgan_g_loss(scores_fake): """ Input: - scores_fake: Tensor of shape (N,) containing scores for fake samples Output: - loss: Tensor of shape (,) giving WGAN generator loss """ return -scores_fake.mean()
089561b47059a4bf07bf878012ce650cd6e34b4f
12,885
import time def centroid_avg(stats): """ Read centroid X and Y 10x and return mean of centroids. stats : stats method of ophyd camera object to use, e.g. cam_8.stats4 Examples -------- centroid_avg(cam_8.stats4) centroidY = centroid_avg(cam_8.stats4)[1] """ centroidXArr = np.zeros(10) centroidYArr = np.zeros(10) for i in range(0, 10): centroidXArr[i] = stats.centroid.x.get() centroidYArr[i] = stats.centroid.y.get() # print('Centroid X = {:.6g} px'.format(centroidXArr[i]), ', Centroid Y = {:.6g} px'.format(centroidYArr[i])) time.sleep(0.2) CentroidX = centroidXArr.mean() CentroidY = centroidYArr.mean() print('Mean centroid X = {:.6g} px'.format(CentroidX)) print('Mean centroid Y = {:.6g} px'.format(CentroidY)) return CentroidX, CentroidY
5fb1715ab77858084f25400bd8c2508689b57cc1
12,886
def get_address_host_port(addr, strict=False): """ Get a (host, port) tuple out of the given address. For definition of strict check parse_address ValueError is raised if the address scheme doesn't allow extracting the requested information. >>> get_address_host_port('tcp://1.2.3.4:80') ('1.2.3.4', 80) """ scheme, loc = parse_address(addr, strict=strict) backend = registry.get_backend(scheme) try: return backend.get_address_host_port(loc) except NotImplementedError: raise ValueError( "don't know how to extract host and port for address %r" % (addr,) )
a0ec20c347becc6f403b9ee121d127fee41c6b0d
12,887
def get_ua_list(): """ 获取ua列表 """ with open('zhihu_spider/misc/ua_list.txt', 'r') as f: return [x.replace('\n', '') for x in f.readlines()]
6ebcf5d85650ad6644ccdf48aafed0160bd52ec0
12,888
import time def measure_time(func): """add time measure decorator to the functions""" def func_wrapper(*args, **kwargs): start_time = time.time() a = func(*args, **kwargs) end_time = time.time() #print("time in seconds: " + str(end_time-start_time)) return end_time - start_time return func_wrapper
e9fb4c1b7260cfe686204b50cbe46f27f25c467a
12,889
def generate_dummy_targets(bounds, label, n_points, field_keys=[], seed=1): """ Generate dummy points with randomly generated positions. Points are generated on node 0 and distributed to other nodes if running in parallel. Parameters ---------- bounds : tuple of float Bounding box to generate targets within, of format (xmin, ymin, xmax, ymax). label : str Label to assign generated targets. n_points : int Number of points to generate field_keys : list of str, optional List of keys to add to `fields` property. seed : int, optional Random number generator seed. Returns ------- Targets A collection of randomly generated targets. """ if mpiops.chunk_index == 0: rnd = np.random.RandomState(seed) def _generate_points(lower, upper, limit): new_points = [] while len(new_points) < limit: new_point = rnd.uniform(lower, upper) new_points.append(new_point) return new_points new_lons = _generate_points(bounds[0], bounds[2], n_points) new_lats = _generate_points(bounds[1], bounds[3], n_points) lonlats = np.column_stack([sorted(new_lons), sorted(new_lats)]) labels = np.full(lonlats.shape[0], label) if field_keys: fields = {k: np.zeros(n_points) for k in field_keys} else: fields = {} _logger.info("Generated %s dummy targets", len(lonlats)) # Split for distribution lonlats = np.array_split(lonlats, mpiops.chunks) labels = np.array_split(labels, mpiops.chunks) split_fields = {k: np.array_split(v, mpiops.chunks) for k, v in fields.items()} fields = [{k: v[i] for k, v in split_fields.items()} for i in range(mpiops.chunks)] else: lonlats, labels, fields = None, None, None lonlats = mpiops.comm.scatter(lonlats, root=0) labels = mpiops.comm.scatter(labels, root=0) fields = mpiops.comm.scatter(fields, root=0) return Targets(lonlats, labels, fields)
6986161499aa62c3e0a9bea4367886dc51736c74
12,890
from typing import List def load_numbers_sorted(txt: str) -> List[int]: """ファイルから番号を読み込みソートしてリストを返す Args: txt (str): ファイルのパス Returns: List[int]: 番号のリスト """ numbers = [] with open(txt) as f: numbers = sorted(map(lambda e: int(e), f)) return numbers
6f10badd417a2ceefefa9f28a5c40583ea077d43
12,891
import os import sys import logging def start_logger(log_directory_path): """To set up the log file folder and default configuration give a log directory path Args: log_directory_path (str): The directory path to create a folder containing the log files. Returns: logger (object): A logger object used for the MSOrganiser software. """ logdirectory = os.path.join(log_directory_path,"logfiles") try: os.makedirs(logdirectory, exist_ok=True) except Exception as e: print("Unable to create log directory in " + logdirectory + " due to this error message",flush=True) print(e,flush=True) sys.exit(-1) logger = logging.getLogger("MSOrganiser") logger.setLevel(logging.INFO) # create file handler (fh) logfilename = os.path.join(logdirectory , 'Test_Log') fh = TimedRotatingFileHandler(logfilename, when='midnight', interval=1, backupCount=2) # create a logging format formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) # add the handlers to the logger logger.addHandler(fh) return logger
2fb4ec745368277d3e2ee2c1b8f98abfe47ad650
12,892
def translate_pt(p, offset): """Translates point p=(x,y) by offset=(x,y)""" return (p[0] + offset[0], p[1] + offset[1])
9fdc578d461219e9e5d1b557b9fde3d7a0946815
12,893
import os def inference(hypes, images, train=True): """Build the MNIST model up to where it may be used for inference. Args: images: Images placeholder, from inputs(). train: whether the network is used for train of inference Returns: softmax_linear: Output tensor with the computed logits. """ vgg16_npy_path = os.path.join(hypes['dirs']['data_dir'], 'weights', "vgg16.npy") vgg_fcn = fcn8_vgg.FCN8VGG(vgg16_npy_path=vgg16_npy_path) vgg_fcn.wd = hypes['wd'] vgg_fcn.build(images, train=train, num_classes=2, random_init_fc8=True) logits = {} logits['images'] = images if hypes['arch']['fcn_in'] == 'pool5': logits['fcn_in'] = vgg_fcn.pool5 elif hypes['arch']['fcn_in'] == 'fc7': logits['fcn_in'] = vgg_fcn.fc7 else: raise NotImplementedError logits['feed2'] = vgg_fcn.pool4 logits['feed4'] = vgg_fcn.pool3 logits['fcn_logits'] = vgg_fcn.upscore32 logits['deep_feat'] = vgg_fcn.pool5 logits['early_feat'] = vgg_fcn.conv4_3 return logits
f612becb6a6fe3bfa808ccee2ef0462b600fcf23
12,894
def truncate(sequence): """ Do nothing. Just a placeholder. """ string = str(sequence) return string.split()[0]
2e8eeffb08d6d3d5d6ad5e6a83e596ec61a2eea2
12,895
def unbind(port: int) -> dict: """Request browser port unbinding. Parameters ---------- port: int Port number to unbind. """ return {"method": "Tethering.unbind", "params": {"port": port}}
c980eaa28e29dd44139035f0c8882d2960322328
12,896
import os def get_files(dir="."): """ Gets all the files recursivly from a given base directory. Args: dir (str): The base directory path. Returns: list: A list that contains all files. """ folder_queue = [dir] files = set() while(folder_queue): next_folder = folder_queue.pop(0) with os.scandir(next_folder) as it: for entry in it: if entry.is_file(): files.add(entry) else: folder_queue.append(entry.path) files = list(files) return files
4c29efa262c2b1be04952beb9acb6a2d8b622a3a
12,897
def xy_to_ellipse(x,Vx,y,Vy): """ Takes the Cartesian variables. This function returns the particle's position relative to an ellipse and parameters of the ellipse. Returns a,e,theta,theta_E """ # radius using x and y r = np.sqrt(x ** 2 + y ** 2) # speed of the particle V = np.sqrt(Vx ** 2 + Vy ** 2) # angular momentum per mass h = x * Vy - y * Vx # energy per mass u = (V ** 2) / 2. - 4. * (np.pi ** 2) / r # semi-major axis a = -2. * ((np.pi) ** 2) / u # eccentricity of the elliptical orbit, added absolute value e = np.sqrt(np.abs(1 - ((h / (2. * np.pi)) ** 2 )/ a)) # theta theta = np.arctan2(y,x) # theta_E, compute e*cos(theta - thetaE) first buff = a * (1. - e ** 2) / r - 1. # divide buff/e and output 0 if it is a circular orbit buff_cos = np.divide(buff, e, out=np.zeros_like(buff), where=(e > np.power(10.,-5.))) #to make sure that arccos takes values less than 1 and greater than -1 buff_cos[buff_cos < -1.] = -1. buff_cos[buff_cos > 1.] = 1. delta = np.arccos(buff_cos) # change the sign if the radial velocity is negative delta *= np.power(-1.,(x * Vx + y * Vy) < 0.) thetaE = theta - delta # set thetaE to 0 if it is a circular orbit thetaE *= (e > np.power(10.,-5.)) # fix to add 2pi or subtract 2pi if thetaE isn't between -pi and pi thetaE -= (thetaE > np.pi) * 2 * np.pi thetaE += (thetaE < -np.pi) * 2 * np.pi return a,e,theta,thetaE
2606a81899431349adc419b04d87063f2e75936a
12,898
from typing import List from typing import Dict from typing import OrderedDict def leak_dictionary_by_ignore_sha( policy_breaks: List[PolicyBreak], ) -> Dict[str, List[PolicyBreak]]: """ leak_dictionary_by_ignore_sha sorts matches and incidents by first appearance in file. sort incidents by first appearance on file, file wide matches have no index so give it -1 so they get bumped to the top :return: Dictionary with line number as index and a list of matches that start on said line. """ policy_breaks.sort( key=lambda x: min( # type: ignore (match.index_start if match.index_start else -1 for match in x.matches) ) ) sha_dict: Dict[str, List[PolicyBreak]] = OrderedDict() for policy_break in policy_breaks: policy_break.matches.sort(key=lambda x: x.index_start if x.index_start else -1) ignore_sha = get_ignore_sha(policy_break) sha_dict.setdefault(ignore_sha, []).append(policy_break) return sha_dict
d94bc10b8f2d94eee639bd94e75ad5835d9b6f1a
12,899