text
stringlengths
78
104k
score
float64
0
0.18
def add_from_file(self, filename, handler_decorator=None): """ Wrapper around add() that reads the handlers from the file with the given name. The file is a Python script containing a list named 'commands' of tuples that map command names to handlers. :type filename: str :param filename: The name of the file containing the tuples. :type handler_decorator: function :param handler_decorator: A function that is used to decorate each of the handlers in the file. """ args = {} execfile(filename, args) commands = args.get('commands') if commands is None: raise Exception(filename + ' has no variable named "commands"') elif not hasattr(commands, '__iter__'): raise Exception(filename + ': "commands" is not iterable') for key, handler in commands: if handler_decorator: handler = handler_decorator(handler) self.add(key, handler)
0.001925
def get_login_credentials(args): """ Gets the login credentials from the user, if not specified while invoking the script. @param args: arguments provided to the script. """ if not args.username: args.username = raw_input("Enter Username: ") if not args.password: args.password = getpass.getpass("Enter Password: ")
0.011594
def load_bookmarks_without_file(filename): """Load all bookmarks but those from a specific file.""" bookmarks = _load_all_bookmarks() return {k: v for k, v in bookmarks.items() if v[0] != filename}
0.004785
def handle(client_message, handle_event_imap_invalidation=None, handle_event_imap_batch_invalidation=None, to_object=None): """ Event handler """ message_type = client_message.get_message_type() if message_type == EVENT_IMAPINVALIDATION and handle_event_imap_invalidation is not None: key = None if not client_message.read_bool(): key = client_message.read_data() handle_event_imap_invalidation(key=key) if message_type == EVENT_IMAPBATCHINVALIDATION and handle_event_imap_batch_invalidation is not None: keys_size = client_message.read_int() keys = [] for _ in range(0, keys_size): keys_item = client_message.read_data() keys.append(keys_item) handle_event_imap_batch_invalidation(keys=keys)
0.005
def add_flag_values(self, entry, flag): ''' Adds flag value to applicable compounds ''' if flag in self.flags: self.flags[flag].append(entry)
0.011834
def tuple_args(fn): """ args ํŒŒ์‹ฑ ์œ ํ‹ธ function fun(p1, p2, ...pn, **kwargs) or fun([p1, p2, ..], **kwargs) ex) ์ƒ˜ํ”Œ:: @tuple_arg def f(args, **kwargs): for d in args: print d f(1,2,3) => f([1,2,3]) :param function fn: :return: """ @wraps(fn) def wrapped(*args, **kwargs): if len(args) == 1: if isinstance(args[0], tuple): return fn(args[0], **kwargs) elif isinstance(args[0], list): return fn(tuple(args[0]), **kwargs) return fn(args, **kwargs) return wrapped
0.001605
def Barr_1981(Re, eD): r'''Calculates Darcy friction factor using the method in Barr (1981) [2]_ as shown in [1]_. .. math:: \frac{1}{\sqrt{f_d}} = -2\log\left\{\frac{\epsilon}{3.7D} + \frac{4.518\log(\frac{Re}{7})}{Re\left[1+\frac{Re^{0.52}}{29} \left(\frac{\epsilon}{D}\right)^{0.7}\right]}\right\} Parameters ---------- Re : float Reynolds number, [-] eD : float Relative roughness, [-] Returns ------- fd : float Darcy friction factor [-] Notes ----- No range of validity specified for this equation. Examples -------- >>> Barr_1981(1E5, 1E-4) 0.01849836032779929 References ---------- .. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence and Combustion 90, no. 1 (January 1, 2013): 1-27. doi:10.1007/s10494-012-9419-7 .. [2] Barr, Dih, and Colebrook White."Technical Note. Solutions Of The Colebrook-White Function For Resistance To Uniform Turbulent Flow." ICE Proceedings 71, no. 2 (January 6, 1981): 529-35. doi:10.1680/iicep.1981.1895. ''' fd = (-2*log10(eD/3.7 + 4.518*log10(Re/7.)/(Re*(1+Re**0.52/29*eD**0.7))))**-2 return fd
0.001516
def count_markers_samples(prefix, file_type): """Counts the number of markers and samples in plink file. :param prefix: the prefix of the files. :param file_type: the file type. :type prefix: str :type file_type: str :returns: the number of markers and samples (in a tuple). """ # The files that will need counting sample_file = None marker_file = None if file_type == "bfile": # Binary files (.bed, .bim and .fam) sample_file = prefix + ".fam" marker_file = prefix + ".bim" elif file_type == "file": # Pedfile (.ped and .map) sample_file = prefix + ".ped" marker_file = prefix + ".map" elif file_type == "tfile": # Transposed pedfile (.tped and .tfam) sample_file = prefix + ".tfam" marker_file = prefix + ".tped" # Counting (this may take some time) nb_samples = 0 with open(sample_file, "r") as f: for line in f: nb_samples += 1 nb_markers = 0 with open(marker_file, "r") as f: for line in f: nb_markers += 1 return nb_markers, nb_samples
0.000878
def _fake_modifyinstance(self, namespace, **params): """ Implements a server responder for :meth:`~pywbem.WBEMConnection.CreateInstance` Modify a CIM instance in the local repository. Raises: CIMError: CIM_ERR_ALREADY_EXISTS, CIM_ERR_INVALID_CLASS """ if self._repo_lite: raise CIMError( CIM_ERR_NOT_SUPPORTED, "ModifyInstance not supported when repo_lite set.") # Validate namespace instance_repo = self._get_instance_repo(namespace) modified_instance = deepcopy(params['ModifiedInstance']) property_list = params['PropertyList'] # Return if empty property list if property_list is not None and not property_list: return if modified_instance is not None and not modified_instance: return if not isinstance(modified_instance, CIMInstance): raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("The ModifiedInstance parameter is not a valid " "CIMInstance. Rcvd type={0}", type(modified_instance))) # Classnames in instance and path must match if modified_instance.classname != modified_instance.path.classname: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("ModifyInstance classname in path and instance do " "not match. classname={0!A}, path.classname={1!A}", modified_instance.classname, modified_instance.path.classname)) # Get class including properties from superclasses from repo try: target_class = self.GetClass(modified_instance.classname, namespace=namespace, LocalOnly=False, IncludeQualifiers=True, IncludeClassOrigin=True) except CIMError as ce: if ce.status_code == CIM_ERR_NOT_FOUND: raise CIMError( CIM_ERR_INVALID_CLASS, _format("Cannot modify instance because its creation " "class {0!A} does not exist in namespace {1!A}.", modified_instance.classname, namespace)) raise # get key properties and all class props cl_props = [p.name for p in six.itervalues(target_class.properties)] key_props = [p.name for p in six.itervalues(target_class.properties) if 'key' in p.qualifiers] # Get original instance in repo. Does not copy the orig instance. mod_inst_path = modified_instance.path.copy() if modified_instance.path.namespace is None: mod_inst_path.namespace = namespace orig_instance_tup = self._find_instance(mod_inst_path, instance_repo) if orig_instance_tup[0] is None: raise CIMError( CIM_ERR_NOT_FOUND, _format("Original Instance {0!A} not found in namespace {1!A}", modified_instance.path, namespace)) original_instance = orig_instance_tup[1] # Remove duplicate properties from property_list if property_list: if len(property_list) != len(set(property_list)): property_list = list(set(property_list)) # Test that all properties in modified instance and property list # are in the class if property_list: for p in property_list: if p not in cl_props: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("Property {0!A} in PropertyList not in class " "{1!A}", p, modified_instance.classname)) for p in modified_instance: if p not in cl_props: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("Property {0!A} in ModifiedInstance not in class " "{1!A}", p, modified_instance.classname)) # Set the class value for properties in the property list but not # in the modified_instance. This sets just the value component. mod_inst_props = set(modified_instance.keys()) new_props = mod_inst_props.difference(set(cl_props)) if new_props: for new_prop in new_props: modified_instance[new_prop] = \ target_class.properties[new_prop].value # Remove all properties that do not change value between original # instance and modified instance for p in list(modified_instance): if original_instance[p] == modified_instance[p]: del modified_instance[p] # Confirm no key properties in remaining modified instance for p in key_props: if p in modified_instance: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("ModifyInstance cannot modify key property {0!A}", p)) # Remove any properties from modified instance not in the property_list if property_list: for p in list(modified_instance): if p not in property_list: del modified_instance[p] # Exception if property in instance but not class or types do not # match for pname in modified_instance: if pname not in target_class.properties: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("Property {0!A} specified in ModifiedInstance is " "not exposed by class {1!A} in namespace {2!A}", pname, target_class.classname, namespace)) cprop = target_class.properties[pname] iprop = modified_instance.properties[pname] if iprop.is_array != cprop.is_array \ or iprop.type != cprop.type \ or iprop.array_size != cprop.array_size: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("Instance and class property name={0!A} type " "or other attributes do not match: " "instance={1!A}, class={2!A}", pname, iprop, cprop)) # Modify the value of properties in the repo with those from # modified instance index = orig_instance_tup[0] instance_repo[index].update(modified_instance.properties) return
0.000294
def get_name_by_preorder( self, preorder_hash ): """ Given a name preorder hash, get the associated name record. (It may be expired or revoked) """ cur = self.db.cursor() return namedb_get_name_by_preorder_hash( cur, preorder_hash )
0.021429
def putstats(pfile, handle, statdicts): """ puts stats from pickles into a dictionary """ ## load in stats with open(pfile, 'r') as infile: filestats, samplestats = pickle.load(infile) ## get dicts from statdicts tuple perfile, fsamplehits, fbarhits, fmisses, fdbars = statdicts ## pull new stats #handle = os.path.splitext(os.path.basename(handle))[0] perfile[handle] += filestats ## update sample stats samplehits, barhits, misses, dbars = samplestats fsamplehits.update(samplehits) fbarhits.update(barhits) fmisses.update(misses) fdbars.update(dbars) ## repack the tuple and return statdicts = perfile, fsamplehits, fbarhits, fmisses, fdbars return statdicts
0.009434
def build_schema(self, fields): """ Build the schema from fields. :param fields: A list of fields in the index :returns: list of dictionaries Each dictionary has the keys field_name: The name of the field index type: what type of value it is 'multi_valued': if it allows more than one value 'column': a number identifying it 'type': the type of the field 'multi_valued': 'false', 'column': 0} """ content_field_name = '' schema_fields = [ {'field_name': ID, 'type': 'text', 'multi_valued': 'false', 'column': 0}, {'field_name': DJANGO_ID, 'type': 'integer', 'multi_valued': 'false', 'column': 1}, {'field_name': DJANGO_CT, 'type': 'text', 'multi_valued': 'false', 'column': 2}, ] self._columns[ID] = 0 self._columns[DJANGO_ID] = 1 self._columns[DJANGO_CT] = 2 column = len(schema_fields) for field_name, field_class in sorted(list(fields.items()), key=lambda n: n[0]): if field_class.document is True: content_field_name = field_class.index_fieldname if field_class.indexed is True: field_data = { 'field_name': field_class.index_fieldname, 'type': 'text', 'multi_valued': 'false', 'column': column, } if field_class.field_type == 'date': field_data['type'] = 'date' elif field_class.field_type == 'datetime': field_data['type'] = 'datetime' elif field_class.field_type == 'integer': field_data['type'] = 'integer' elif field_class.field_type == 'float': field_data['type'] = 'float' elif field_class.field_type == 'boolean': field_data['type'] = 'boolean' elif field_class.field_type == 'ngram': field_data['type'] = 'ngram' elif field_class.field_type == 'edge_ngram': field_data['type'] = 'edge_ngram' if field_class.is_multivalued: field_data['multi_valued'] = 'true' schema_fields.append(field_data) self._columns[field_data['field_name']] = column column += 1 return content_field_name, schema_fields
0.001141
def convert_dense(builder, layer, input_names, output_names, keras_layer): """Convert a dense layer from keras to coreml. Parameters keras_layer: layer ---------- A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ # Get input and output names input_name, output_name = (input_names[0], output_names[0]) has_bias = keras_layer.bias # Get the weights from keras W = keras_layer.get_weights ()[0].T Wb = keras_layer.get_weights ()[1].T if has_bias else None builder.add_inner_product(name = layer, W = W, b = Wb, input_channels = keras_layer.input_dim, output_channels = keras_layer.output_dim, has_bias = has_bias, input_name = input_name, output_name = output_name)
0.030303
def layer_permutation(self, layer_partition, layout, qubit_subset): """Find a swap circuit that implements a permutation for this layer. The goal is to swap qubits such that qubits in the same two-qubit gates are adjacent. Based on Sergey Bravyi's algorithm. The layer_partition is a list of (qu)bit lists and each qubit is a tuple (qreg, index). The layout is a dict mapping qubits in the circuit to qubits in the coupling graph and represents the current positions of the data. The qubit_subset is the subset of qubits in the coupling graph that we have chosen to map into. The coupling is a CouplingGraph. TRIALS is the number of attempts the randomized algorithm makes. Returns: success_flag, best_circ, best_d, best_layout, trivial_flag If success_flag is True, then best_circ contains a DAGCircuit with the swap circuit, best_d contains the depth of the swap circuit, and best_layout contains the new positions of the data qubits after the swap circuit has been applied. The trivial_flag is set if the layer has no multi-qubit gates. """ if self.seed is None: self.seed = np.random.randint(0, np.iinfo(np.int32).max) rng = np.random.RandomState(self.seed) rev_layout = {b: a for a, b in layout.items()} gates = [] for layer in layer_partition: if len(layer) > 2: raise TranspilerError("Layer contains >2 qubit gates") elif len(layer) == 2: gates.append(tuple(layer)) # Can we already apply the gates? dist = sum([self.coupling_map.distance(layout[g[0]][1], layout[g[1]][1]) for g in gates]) if dist == len(gates): circ = DAGCircuit() circ.add_qreg(QuantumRegister(self.coupling_map.size(), "q")) return True, circ, 0, layout, bool(gates) # Begin loop over trials of randomized algorithm n = self.coupling_map.size() best_d = sys.maxsize # initialize best depth best_circ = None # initialize best swap circuit best_layout = None # initialize best final layout QR = QuantumRegister(self.coupling_map.size(), "q") for _ in range(self.trials): trial_layout = layout.copy() rev_trial_layout = rev_layout.copy() # SWAP circuit constructed this trial trial_circ = DAGCircuit() trial_circ.add_qreg(QR) # Compute Sergey's randomized distance xi = {} for i in self.coupling_map.physical_qubits: xi[(QR, i)] = {} for i in self.coupling_map.physical_qubits: i = (QR, i) for j in self.coupling_map.physical_qubits: j = (QR, j) scale = 1 + rng.normal(0, 1 / n) xi[i][j] = scale * self.coupling_map.distance(i[1], j[1]) ** 2 xi[j][i] = xi[i][j] # Loop over depths d up to a max depth of 2n+1 d = 1 # Circuit for this swap slice circ = DAGCircuit() circ.add_qreg(QR) # Identity wire-map for composing the circuits identity_wire_map = {(QR, j): (QR, j) for j in range(n)} while d < 2 * n + 1: # Set of available qubits qubit_set = set(qubit_subset) # While there are still qubits available while qubit_set: # Compute the objective function min_cost = sum([xi[trial_layout[g[0]]][trial_layout[g[1]]] for g in gates]) # Try to decrease objective function progress_made = False # Loop over edges of coupling graph for e in self.coupling_map.get_edges(): e = [(QR, edge) for edge in e] # Are the qubits available? if e[0] in qubit_set and e[1] in qubit_set: # Try this edge to reduce the cost new_layout = trial_layout.copy() new_layout[rev_trial_layout[e[0]]] = e[1] new_layout[rev_trial_layout[e[1]]] = e[0] rev_new_layout = rev_trial_layout.copy() rev_new_layout[e[0]] = rev_trial_layout[e[1]] rev_new_layout[e[1]] = rev_trial_layout[e[0]] # Compute the objective function new_cost = sum([xi[new_layout[g[0]]][new_layout[g[1]]] for g in gates]) # Record progress if we succceed if new_cost < min_cost: progress_made = True min_cost = new_cost opt_layout = new_layout rev_opt_layout = rev_new_layout opt_edge = e # Were there any good choices? if progress_made: qubit_set.remove(opt_edge[0]) qubit_set.remove(opt_edge[1]) trial_layout = opt_layout rev_trial_layout = rev_opt_layout circ.apply_operation_back( SwapGate(), [(opt_edge[0][0], opt_edge[0][1]), (opt_edge[1][0], opt_edge[1][1])], []) else: break # We have either run out of qubits or failed to improve # Compute the coupling graph distance_qubits dist = sum([self.coupling_map.distance(trial_layout[g[0]][1], trial_layout[g[1]][1]) for g in gates]) # If all gates can be applied now, we are finished # Otherwise we need to consider a deeper swap circuit if dist == len(gates): trial_circ.compose_back(circ, identity_wire_map) break # Increment the depth d += 1 # Either we have succeeded at some depth d < dmax or failed dist = sum([self.coupling_map.distance(trial_layout[g[0]][1], trial_layout[g[1]][1]) for g in gates]) if dist == len(gates): if d < best_d: best_circ = trial_circ best_layout = trial_layout best_d = min(best_d, d) if best_circ is None: return False, None, None, None, False return True, best_circ, best_d, best_layout, False
0.001139
def _EccZmaxRperiRap(self,*args,**kwargs): """ NAME: EccZmaxRperiRap (_EccZmaxRperiRap) PURPOSE: evaluate the eccentricity, maximum height above the plane, peri- and apocenter in the Staeckel approximation INPUT: Either: a) R,vR,vT,z,vz[,phi]: 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity) 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity) b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument OUTPUT: (e,zmax,rperi,rap) HISTORY: 2017-12-15 - Written - Bovy (UofT) """ if len(args) == 5: #R,vR.vT, z, vz R,vR,vT, z, vz= args elif len(args) == 6: #R,vR.vT, z, vz, phi R,vR,vT, z, vz, phi= args else: self._parse_eval_args(*args) R= self._eval_R vR= self._eval_vR vT= self._eval_vT z= self._eval_z vz= self._eval_vz Lz= R*vT Phi= _evaluatePotentials(self._pot,R,z) E= Phi+vR**2./2.+vT**2./2.+vz**2./2. thisERL= -numpy.exp(self._ERLInterp(Lz))+self._ERLmax thisERa= -numpy.exp(self._ERaInterp(Lz))+self._ERamax if isinstance(R,numpy.ndarray): indx= ((E-thisERa)/(thisERL-thisERa) > 1.)\ *(((E-thisERa)/(thisERL-thisERa)-1.) < 10.**-2.) E[indx]= thisERL[indx] indx= ((E-thisERa)/(thisERL-thisERa) < 0.)\ *((E-thisERa)/(thisERL-thisERa) > -10.**-2.) E[indx]= thisERa[indx] indx= (Lz < self._Lzmin) indx+= (Lz > self._Lzmax) indx+= ((E-thisERa)/(thisERL-thisERa) > 1.) indx+= ((E-thisERa)/(thisERL-thisERa) < 0.) indxc= True^indx ecc= numpy.empty(R.shape) zmax= numpy.empty(R.shape) rperi= numpy.empty(R.shape) rap= numpy.empty(R.shape) if numpy.sum(indxc) > 0: u0= numpy.exp(self._logu0Interp.ev(Lz[indxc], (_Efunc(E[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))/(_Efunc(thisERL[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc])))) sinh2u0= numpy.sinh(u0)**2. thisEr= self.Er(R[indxc],z[indxc],vR[indxc],vz[indxc], E[indxc],Lz[indxc],sinh2u0,u0) thisEz= self.Ez(R[indxc],z[indxc],vR[indxc],vz[indxc], E[indxc],Lz[indxc],sinh2u0,u0) thisv2= self.vatu0(E[indxc],Lz[indxc],u0,self._delta*numpy.sinh(u0),retv2=True) cos2psi= 2.*thisEr/thisv2/(1.+sinh2u0) #latter is cosh2u0 cos2psi[(cos2psi > 1.)*(cos2psi < 1.+10.**-5.)]= 1. indxCos2psi= (cos2psi > 1.) indxCos2psi+= (cos2psi < 0.) indxc[indxc]= True^indxCos2psi#Handle these two cases as off-grid indx= True^indxc psi= numpy.arccos(numpy.sqrt(cos2psi[True^indxCos2psi])) coords= numpy.empty((3,numpy.sum(indxc))) coords[0,:]= (Lz[indxc]-self._Lzmin)/(self._Lzmax-self._Lzmin)*(self._nLz-1.) y= (_Efunc(E[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))/(_Efunc(thisERL[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc])) coords[1,:]= y*(self._nE-1.) coords[2,:]= psi/numpy.pi*2.*(self._npsi-1.) ecc[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._eccFiltered, coords, order=3, prefilter=False))-10.**-10.) rperi[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._rperiFiltered, coords, order=3, prefilter=False))-10.**-10.)*(numpy.exp(self._rperiLzInterp(Lz[indxc]))-10.**-5.) # We do rap below with zmax #Switch to Ez-calculated psi sin2psi= 2.*thisEz[True^indxCos2psi]/thisv2[True^indxCos2psi]/(1.+sinh2u0[True^indxCos2psi]) #latter is cosh2u0 sin2psi[(sin2psi > 1.)*(sin2psi < 1.+10.**-5.)]= 1. indxSin2psi= (sin2psi > 1.) indxSin2psi+= (sin2psi < 0.) indxc[indxc]= True^indxSin2psi#Handle these two cases as off-grid indx= True^indxc psiz= numpy.arcsin(numpy.sqrt(sin2psi[True^indxSin2psi])) newcoords= numpy.empty((3,numpy.sum(indxc))) newcoords[0:2,:]= coords[0:2,True^indxSin2psi] newcoords[2,:]= psiz/numpy.pi*2.*(self._npsi-1.) zmax[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._zmaxFiltered, newcoords, order=3, prefilter=False))-10.**-10.)*(numpy.exp(self._zmaxLzInterp(Lz[indxc]))-10.**-5.) rap[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._rapFiltered, newcoords, order=3, prefilter=False))-10.**-10.)*(numpy.exp(self._rapLzInterp(Lz[indxc]))-10.**-5.) if numpy.sum(indx) > 0: eccindiv, zmaxindiv, rperiindiv, rapindiv=\ self._aA.EccZmaxRperiRap(R[indx], vR[indx], vT[indx], z[indx], vz[indx], **kwargs) ecc[indx]= eccindiv zmax[indx]= zmaxindiv rperi[indx]= rperiindiv rap[indx]= rapindiv else: ecc,zmax,rperi,rap= self.EccZmaxRperiRap(numpy.array([R]), numpy.array([vR]), numpy.array([vT]), numpy.array([z]), numpy.array([vz]), **kwargs) return (ecc[0],zmax[0],rperi[0],rap[0]) ecc[ecc < 0.]= 0. zmax[zmax < 0.]= 0. rperi[rperi < 0.]= 0. rap[rap < 0.]= 0. return (ecc,zmax,rperi,rap)
0.025217
def access_token(self): """ Retrieve and cache an access token to authenticate API calls. :return: An access token string. """ if self._cached_access_token is not None: return self._cached_access_token resp = self._request(endpoint='access_token', data={'grant_type': 'client_credentials', 'scope': 'basic user'}, auth=(self.api_username, self.api_key)) self._cached_access_token = resp['access_token'] return self._cached_access_token
0.005556
def load_coef(filename): """Loads a file that was saved with save_coef.""" with open(filename) as f: lines = f.readlines() lst = lines[0].split(',') nmax = int(lst[0]) mmax = int(lst[1]) L = (nmax + 1) + mmax * (2 * nmax - mmax + 1); vec = np.zeros(L, dtype=np.complex128) lines.pop(0) for n, line in enumerate(lines): lst = line.split(',') re = float(lst[0]) im = float(lst[1]) vec[n] = re + 1j * im return sp.ScalarCoefs(vec, nmax, mmax)
0.008636
def get_item_size(self, content): """ Get the max size (width and height) for the elements of a list of strings as a QLabel. """ strings = [] if content: for rich_text in content: label = QLabel(rich_text) label.setTextFormat(Qt.PlainText) strings.append(label.text()) fm = label.fontMetrics() return (max([fm.width(s) * 1.3 for s in strings]), fm.height())
0.004049
def _datastore_api(self): """Getter for a wrapped API object.""" if self._datastore_api_internal is None: if self._use_grpc: self._datastore_api_internal = make_datastore_api(self) else: self._datastore_api_internal = HTTPDatastoreAPI(self) return self._datastore_api_internal
0.005618
def _compute_dynamic_properties(self, builder): """Update from the DatasetBuilder.""" # Fill other things by going over the dataset. splits = self.splits for split_info in utils.tqdm( splits.values(), desc="Computing statistics...", unit=" split"): try: split_name = split_info.name # Fill DatasetFeatureStatistics. dataset_feature_statistics, schema = get_dataset_feature_statistics( builder, split_name) # Add the statistics to this split. split_info.statistics.CopyFrom(dataset_feature_statistics) # Set the schema at the top-level since this is independent of the # split. self.as_proto.schema.CopyFrom(schema) except tf.errors.InvalidArgumentError: # This means there is no such split, even though it was specified in the # info, the least we can do is to log this. logging.error(("%s's info() property specifies split %s, but it " "doesn't seem to have been generated. Please ensure " "that the data was downloaded for this split and re-run " "download_and_prepare."), self.name, split_name) raise # Set splits to trigger proto update in setter self._set_splits(splits)
0.004615
def obfn_dfd(self): r"""Compute data fidelity term :math:`(1/2) \| D X B - S \|_2^2`. """ DXBf = sl.dot(self.B, sl.inner(self.Df, self.obfn_fvarf(), axis=self.cri.axisM), axis=self.cri.axisC) Ef = DXBf - self.Sf return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
0.007916
def predict_y(self, xq, sigma=None, k=None, **kwargs): """Provide an prediction of xq in the output space @param xq an array of float of length dim_x """ sigma = sigma or self.sigma k = k or self.k dists, index = self.dataset.nn_x(xq, k = k) w = self._weights(dists, sigma*sigma) return np.sum([wi*self.dataset.get_y(idx) for wi, idx in zip(w, index)], axis = 0)
0.016355
def set_identifiers(self, data): """ Sets the identifier(s) within the instance data. The identifier name(s) is/are determined from the ``ResourceDetails`` instance hanging off the class itself. :param data: The value(s) to be set. :param data: dict """ for id_info in self._details.identifiers: var_name = id_info['var_name'] self._data[var_name] = data.get(var_name) # FIXME: This needs to likely kick off invalidating/rebuilding # relations. # For now, just remove them all. This is potentially inefficient # but is nicely lazy if we don't need them & prevents stale data # for the moment. self._relations = {}
0.002564
def _to_bel_lines_footer(graph) -> Iterable[str]: """Iterate the lines of a BEL graph's corresponding BEL script's footer. :param pybel.BELGraph graph: A BEL graph """ unqualified_edges_to_serialize = [ (u, v, d) for u, v, d in graph.edges(data=True) if d[RELATION] in UNQUALIFIED_EDGES and EVIDENCE not in d ] isolated_nodes_to_serialize = [ node for node in graph if not graph.pred[node] and not graph.succ[node] ] if unqualified_edges_to_serialize or isolated_nodes_to_serialize: yield '###############################################\n' yield 'SET Citation = {"PubMed","Added by PyBEL","29048466"}' yield 'SET SupportingText = "{}"'.format(PYBEL_AUTOEVIDENCE) for u, v, data in unqualified_edges_to_serialize: yield '{} {} {}'.format(u.as_bel(), data[RELATION], v.as_bel()) for node in isolated_nodes_to_serialize: yield node.as_bel() yield 'UNSET SupportingText' yield 'UNSET Citation'
0.000947
def origin_west_asia(origin): """\ Returns if the origin is located in Western Asia. Holds true for the following countries: * Armenia * Azerbaijan * Bahrain * Cyprus * Georgia * Iraq * Israel * Jordan * Kuwait * Lebanon * Oman * Qatar * Saudi Arabia * Syria * Turkey * United Arab Emirates * Yemen `origin` The origin to check. """ return origin_armenia(origin) or origin_azerbaijan(origin) \ or origin_bahrain(origin) or origin_cyprus(origin) \ or origin_georgia(origin) or origin_georgia(origin) \ or origin_iraq(origin) or origin_israel(origin) \ or origin_jordan(origin) or origin_kuwait(origin) \ or origin_lebanon(origin) or origin_oman(origin) \ or origin_qatar(origin) or origin_saudi_arabia(origin) \ or origin_syria(origin) or origin_turkey(origin) \ or origin_united_arab_emirates(origin) or origin_yemen(origin)
0.008318
def setAnimation(self,obj,animation,transition=None,force=False): """ Sets the animation to be used by the object. See :py:meth:`Actor.setAnimation()` for more information. """ self.ensureModelData(obj) data = obj._modeldata # Validity check if animation not in self.modeldata["animations"]: raise ValueError("There is no animation of name '%s' for model '%s'"%(animation,self.modelname)) if data.get("_anidata",{}).get("anitype",None)==animation and not force: return # animation is already running # Cache the obj to improve readability anim = self.modeldata["animations"][animation] # Set to default if not set if transition is None: transition = anim.default_jt # Notify the animation to allow it to initialize itself anim.startAnimation(data,transition) # initialize animation data if "_anidata" not in data: data["_anidata"]={} adata = data["_anidata"] adata["anitype"]=animation if "_schedfunc" in adata: # unschedule the old animation, if any # prevents clashing and crashes pyglet.clock.unschedule(adata["_schedfunc"]) # Schedule the animation function def schedfunc(*args): # This function is defined locally to create a closure # The closure stores the local variables, e.g. anim and data even after the parent function has finished # Note that this may also prevent the garbage collection of any objects defined in the parent scope anim.tickEntity(data) # register the function to pyglet pyglet.clock.schedule_interval(schedfunc,1./(anim.kps if anim.atype=="keyframes" else 60)) # save it for later for de-initialization adata["_schedfunc"] = schedfunc
0.016048
def resolve_dependency_graph(self, target): """ resolves the build order for interdependent build targets Assumes no cyclic dependencies """ targets = self.deep_dependendants(target) # print "deep dependants:", targets return sorted(targets, cmp=lambda a, b: 1 if b in self.deep_dependendants(a) else -1 if a in self.deep_dependendants(b) else 0)
0.004008
def open(self, file_path): """ Open a SQLite database file. :param str file_path: SQLite database file path to open. """ from simplesqlite import SimpleSQLite if self.is_opened(): if self.stream.database_path == abspath(file_path): self._logger.logger.debug( "database already opened: {}".format(self.stream.database_path) ) return self.close() self._stream = SimpleSQLite(file_path, "w")
0.005545
def verify(self): '''Verifies the message data based on rules and restrictions defined in the Postmark API docs. There can be no more than 20 recipients in total. NOTE: This does not check that your attachments total less than 10MB, you must do that yourself. ''' if self.to is None: raise MessageError('"to" is required') if self.html is None and self.text is None: err = 'At least one of "html" or "text" must be provided' raise MessageError(err) self._verify_headers() self._verify_attachments() if (MAX_RECIPIENTS_PER_MESSAGE and len(self.recipients) > MAX_RECIPIENTS_PER_MESSAGE): err = 'No more than {0} recipients accepted.' raise MessageError(err.format(MAX_RECIPIENTS_PER_MESSAGE))
0.002361
def print_error(input, err, scanner): """This is a really dumb long function to print error messages nicely.""" p = err.pos # Figure out the line number line = input[:p].count('\n') print err.msg + " on line " + repr(line + 1) + ":" # Now try printing part of the line text = input[max(p - 80, 0): p + 80] p = p - max(p - 80, 0) # Strip to the left i = text[:p].rfind('\n') j = text[:p].rfind('\r') if i < 0 or (0 <= j < i): i = j if 0 <= i < p: p = p - i - 1 text = text[i + 1:] # Strip to the right i = text.find('\n', p) j = text.find('\r', p) if i < 0 or (0 <= j < i): i = j if i >= 0: text = text[:i] # Now shorten the text while len(text) > 70 and p > 60: # Cut off 10 chars text = "..." + text[10:] p = p - 7 # Now print the string, along with an indicator print '> ', text print '> ', ' ' * p + '^' print 'List of nearby tokens:', scanner
0.001965
def get_deep_focus(self, startfrom=None): """return the bottom most focussed widget of the widget tree""" if not startfrom: startfrom = self.current_buffer if 'get_focus' in dir(startfrom): focus = startfrom.get_focus() if isinstance(focus, tuple): focus = focus[0] if isinstance(focus, urwid.Widget): return self.get_deep_focus(startfrom=focus) return startfrom
0.004219
def disable_dataset(self, dataset=None, **kwargs): """ Disable a 'dataset'. Datasets that are enabled will be computed during :meth:`run_compute` and included in the cost function during :meth:`run_fitting`. If compute is not provided, the dataset will be disabled across all compute options. :parameter str dataset: name of the dataset :parameter **kwargs: any other tags to do the filter (except dataset or context) :return: :class:`phoebe.parameters.parameters.ParameterSet` of the disabled dataset """ kwargs['context'] = 'compute' kwargs['dataset'] = dataset kwargs['qualifier'] = 'enabled' self.set_value_all(value=False, **kwargs) self._add_history(redo_func='disable_dataset', redo_kwargs={'dataset': dataset}, undo_func='enable_dataset', undo_kwargs={'dataset': dataset}) return self.get_dataset(dataset=dataset)
0.001894
def emulate(self, instruction): """ Emulate a single instruction. """ # The emulation might restart if Unicorn needs to bring in a memory map # or bring a value from Manticore state. while True: self.reset() # Establish Manticore state, potentially from past emulation # attempts for base in self._should_be_mapped: size, perms = self._should_be_mapped[base] self._emu.mem_map(base, size, perms) for address, values in self._should_be_written.items(): for offset, byte in enumerate(values, start=address): if issymbolic(byte): from ..native.cpu.abstractcpu import ConcretizeMemory raise ConcretizeMemory(self._cpu.memory, offset, 8, "Concretizing for emulation") self._emu.mem_write(address, b''.join(values)) # Try emulation self._should_try_again = False self._step(instruction) if not self._should_try_again: break
0.001701
def _force_disconnect_action(self, action): """Forcibly disconnect a device. Args: action (ConnectionAction): the action object describing what we are forcibly disconnecting """ conn_key = action.data['id'] if self._get_connection_state(conn_key) == self.Disconnected: return data = self._get_connection(conn_key) # If there are any operations in progress, cancel them cleanly if data['state'] == self.Connecting: callback = data['action'].data['callback'] callback(data['connection_id'], self.id, False, 'Unexpected disconnection') elif data['state'] == self.Disconnecting: callback = data['action'].data['callback'] callback(data['connection_id'], self.id, True, None) elif data['state'] == self.InProgress: callback = data['action'].data['callback'] if data['microstate'] == 'rpc': callback(False, 'Unexpected disconnection', 0xFF, None) elif data['microstate'] == 'open_interface': callback(False, 'Unexpected disconnection') elif data['microstate'] == 'close_interface': callback(False, 'Unexpected disconnection') connection_id = data['connection_id'] internal_id = data['internal_id'] del self._connections[connection_id] del self._int_connections[internal_id]
0.002044
def list_all_payments(cls, **kwargs): """List Payments Return a list of Payments This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_payments(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Payment] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_payments_with_http_info(**kwargs) else: (data) = cls._list_all_payments_with_http_info(**kwargs) return data
0.002342
def write_summary(all_procs, summary_file): """ Write a summary of all run processes to summary_file in tab-delimited format. """ if not summary_file: return with summary_file: writer = csv.writer(summary_file, delimiter='\t', lineterminator='\n') writer.writerow(('directory', 'command', 'start_time', 'end_time', 'run_time', 'exit_status', 'result')) rows = ((p.working_dir, ' '.join(p.command), p.start_time, p.end_time, p.running_time, p.return_code, p.status) for p in all_procs) writer.writerows(rows)
0.003333
def export(self, **kwargs): """ Generate audio file from composition. :param str. filename: Output filename (no extension) :param str. filetype: Output file type (only .wav supported for now) :param integer samplerate: Sample rate of output audio :param integer channels: Channels in output audio, if different than originally specified :param bool. separate_tracks: Also generate audio file for each track in composition :param int min_length: Minimum length of output array (in frames). Will zero pad extra length. :param bool. adjust_dynamics: Automatically adjust dynamics (will document later) """ # get optional args filename = kwargs.pop('filename', 'out') filetype = kwargs.pop('filetype', 'wav') adjust_dynamics = kwargs.pop('adjust_dynamics', False) samplerate = kwargs.pop('samplerate', None) channels = kwargs.pop('channels', self.channels) separate_tracks = kwargs.pop('separate_tracks', False) min_length = kwargs.pop('min_length', None) if samplerate is None: samplerate = np.min([track.samplerate for track in self.tracks]) encoding = 'pcm16' to_mp3 = False if filetype == 'ogg': encoding = 'vorbis' elif filetype == 'mp3': filetype = 'wav' to_mp3 = True if separate_tracks: # build the separate parts of the composition if desired for track in self.tracks: out = self.build(track_list=[track], adjust_dynamics=adjust_dynamics, min_length=min_length, channels=channels) out_file = Sndfile("%s-%s.%s" % (filename, track.name, filetype), 'w', Format(filetype, encoding=encoding), channels, samplerate) out_file.write_frames(out) out_file.close() # always build the complete composition out = self.build(adjust_dynamics=adjust_dynamics, min_length=min_length, channels=channels) out_filename = "%s.%s" % (filename, filetype) out_file = Sndfile(out_filename, 'w', Format(filetype, encoding=encoding), channels, samplerate) out_file.write_frames(out) out_file.close() if LIBXMP and filetype == "wav": xmp = libxmp.XMPMeta() ns = libxmp.consts.XMP_NS_DM p = xmp.get_prefix_for_namespace(ns) xpath = p + 'Tracks' xmp.append_array_item(ns, xpath, None, array_options={"prop_value_is_array": True}, prop_value_is_struct=True) xpath += '[1]/' + p xmp.set_property(ns, xpath + "trackName", "CuePoint Markers") xmp.set_property(ns, xpath + "trackType", "Cue") xmp.set_property(ns, xpath + "frameRate", "f%d" % samplerate) for i, lab in enumerate(self.labels): xmp.append_array_item(ns, xpath + "markers", None, array_options={"prop_value_is_array": True}, prop_value_is_struct=True) xmp.set_property(ns, xpath + "markers[%d]/%sname" % (i + 1, p), lab.name) xmp.set_property(ns, xpath + "markers[%d]/%sstartTime" % (i + 1, p), str(lab.sample(samplerate))) xmpfile = libxmp.XMPFiles(file_path=out_filename, open_forupdate=True) if xmpfile.can_put_xmp(xmp): xmpfile.put_xmp(xmp) xmpfile.close_file() if to_mp3: wav_to_mp3(out_filename, delete_wav=True) return out
0.004005
def verify(self, data): r"""Does the given `data` hash to the digest in this `Multihash`? >>> import hashlib >>> data = b'foo' >>> hash = hashlib.sha1(data) >>> mh = Multihash.from_hash(hash) >>> mh.verify(data) True >>> mh.verify(b'foobar') False Application-specific hash functions are also supported (see `FuncReg`). """ digest = _do_digest(data, self.func) return digest[:len(self.digest)] == self.digest
0.003831
def run(self, *args): """Remove unique identities or identities from the registry. By default, it removes the unique identity identified by <identifier>. To remove an identity, set <identity> parameter. """ params = self.parser.parse_args(args) identifier = params.identifier identity = params.identity code = self.remove(identifier, identity) return code
0.00464
def integrate_converge(self, crit=1e-4, verbose=True): """Integrates the model until model states are converging. :param crit: exit criteria for difference of iterated solutions [default: 0.0001] :type crit: float :param bool verbose: information whether total elapsed time should be printed [default: True] :Example: :: >>> import climlab >>> model = climlab.EBM() >>> model.global_mean_temperature() Field(11.997968598413685) >>> model.integrate_converge() Total elapsed time is 10.0 years. >>> model.global_mean_temperature() Field(14.288155406577301) """ # implemented by m-kreuzer for varname, value in self.state.items(): value_old = copy.deepcopy(value) self.integrate_years(1,verbose=False) while np.max(np.abs(value_old-value)) > crit : value_old = copy.deepcopy(value) self.integrate_years(1,verbose=False) if verbose == True: print("Total elapsed time is %s years." % str(self.time['days_elapsed']/const.days_per_year))
0.004484
def removeByIndex(self, index): """removes a user from the invitation list by position""" if index < len(self._invites) -1 and \ index >=0: self._invites.remove(index)
0.019417
def validate_header(fields, # type: Sequence[FieldSpec] column_names # type: Sequence[str] ): # type: (...) -> None """ Validate the `column_names` according to the specification in `fields`. :param fields: The `FieldSpec` objects forming the specification. :param column_names: A sequence of column identifier. :raises FormatError: When the number of columns or the column identifiers don't match the specification. """ if len(fields) != len(column_names): msg = 'Header has {} columns when {} are expected.'.format( len(column_names), len(fields)) raise FormatError(msg) for f, column in zip(fields, column_names): if f.identifier != column: msg = "Column has identifier '{}' when '{}' is expected.".format( column, f.identifier) raise FormatError(msg)
0.001053
def CIC(M, K): """ A functional form implementation of a cascade of integrator comb (CIC) filters. Parameters ---------- M : Effective number of taps per section (typically the decimation factor). K : The number of CIC sections cascaded (larger K gives the filter a wider image rejection bandwidth). Returns ------- b : FIR filter coefficients for a simple direct form implementation using the filter() function. Notes ----- Commonly used in multirate signal processing digital down-converters and digital up-converters. A true CIC filter requires no multiplies, only add and subtract operations. The functional form created here is a simple FIR requiring real coefficient multiplies via filter(). Mark Wickert July 2013 """ if K == 1: b = np.ones(M) else: h = np.ones(M) b = h for i in range(1, K): b = signal.convolve(b, h) # cascade by convolving impulse responses # Make filter have unity gain at DC return b / np.sum(b)
0.006635
def power_configuration(name, policy=None, delayType=None, delayValue=None): ''' Ensures that the power configuration is configured on the system. This is only available on some C-Series servers. .. versionadded:: 2019.2.0 name: The name of the module function to execute. policy(str): The action to be taken when chassis power is restored after an unexpected power loss. This can be one of the following: reset: The server is allowed to boot up normally when power is restored. The server can restart immediately or, optionally, after a fixed or random delay. stay-off: The server remains off until it is manually restarted. last-state: The server restarts and the system attempts to restore any processes that were running before power was lost. delayType(str): If the selected policy is reset, the restart can be delayed with this option. This can be one of the following: fixed: The server restarts after a fixed delay. random: The server restarts after a random delay. delayValue(int): If a fixed delay is selected, once chassis power is restored and the Cisco IMC has finished rebooting, the system waits for the specified number of seconds before restarting the server. Enter an integer between 0 and 240. SLS Example: .. code-block:: yaml reset_power: cimc.power_configuration: - policy: reset - delayType: fixed - delayValue: 0 power_off: cimc.power_configuration: - policy: stay-off ''' ret = _default_ret(name) power_conf = __salt__['cimc.get_power_configuration']() req_change = False try: power_dict = power_conf['outConfigs']['biosVfResumeOnACPowerLoss'][0] if policy and power_dict['vpResumeOnACPowerLoss'] != policy: req_change = True elif policy == "reset": if power_dict['delayType'] != delayType: req_change = True elif power_dict['delayType'] == "fixed": if str(power_dict['delay']) != str(delayValue): req_change = True else: ret['result'] = False ret['comment'] = "The power policy must be specified." return ret if req_change: update = __salt__['cimc.set_power_configuration'](policy, delayType, delayValue) if update['outConfig']['biosVfResumeOnACPowerLoss'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting power configuration." return ret ret['changes']['before'] = power_conf ret['changes']['after'] = __salt__['cimc.get_power_configuration']() ret['comment'] = "Power settings modified." else: ret['comment'] = "Power settings already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting power settings." log.error(err) return ret ret['result'] = True return ret
0.001209
def version_object_and_next(string, retries=0): # type: (str, int) -> VersionThing """ Try three parsing strategies, favoring semver, then pep440, then whatev. :param string: :return: """ if retries > 2: raise JiggleVersionException( "Can't parse, ran out of retries: " + unicode(string) ) if string == "" or string is None: raise JiggleVersionException("No version string, can only use default logic.") if string[0] == "v": string = string[1:] try: version = semantic_version.Version(string) next_version = version.next_patch() _ = semantic_version.Version(unicode(string)) return version, next_version, "semantic_version" except: logger.debug("Not sem_ver:" + unicode(string)) try: version = parver.Version.parse(string) next_version = version.bump_dev() _ = parver.Version.parse(unicode(next_version)) return version, next_version, "pep440 (parver)" except: try: logger.debug("Not par_ver:" + unicode(string)) # Version.supported_version_schemes = [Pep440VersionScheme, Simple4VersionScheme] version = versio_version.Version(string, scheme=Simple4VersionScheme) version.bump() return ( versio_version.Version(string, scheme=Simple4VersionScheme), version, "simple-4 part (versio)", ) except: # let above libs try first before we do primitive clean up work retries += 1 if "a" in string: return version_object_and_next(string.replace("a", ".a"), retries) elif "b" in string: return version_object_and_next(string.replace("b", ".b"), retries) elif len(string.split(".")) == 1: # convert 2 part to 3 part. return version_object_and_next(string + ".0.0", retries) elif len(string.split(".")) == 2: # convert 2 part to 3 part, e.g. 1.1 -> 1.1.0 return version_object_and_next(string + ".0", retries) elif string.isnumeric() and "." not in string: # e.g. "21" -> "21.0.0" return version_object_and_next(string + ".0.0", retries) else: logger.debug("Not versio:" + unicode(string)) # versio only does pep440 by default # print(versio.version.Version.supported_version_schemes) raise
0.004049
def _set_linecard(self, v, load=False): """ Setter method for linecard, mapped from YANG variable /global_lc_holder/linecard (container) If this variable is read-only (config: false) in the source YANG file, then _set_linecard is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_linecard() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=linecard.linecard, is_container='container', presence=False, yang_name="linecard", rest_name="linecard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Config linecard for the specified slot', u'display-when': u'((/local-node/swbd-number = "1000") or (/local-node/swbd-number = "1001") or (/local-node/swbd-number = "1002") or (/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001"))', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-linecard-management', defining_module='brocade-linecard-management', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """linecard must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=linecard.linecard, is_container='container', presence=False, yang_name="linecard", rest_name="linecard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Config linecard for the specified slot', u'display-when': u'((/local-node/swbd-number = "1000") or (/local-node/swbd-number = "1001") or (/local-node/swbd-number = "1002") or (/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001"))', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-linecard-management', defining_module='brocade-linecard-management', yang_type='container', is_config=True)""", }) self.__linecard = t if hasattr(self, '_set'): self._set()
0.00466
def set_boolean(self, option, value): """Set a boolean option. Args: option (str): name of option. value (bool): value of the option. Raises: TypeError: Value must be a boolean. """ if not isinstance(value, bool): raise TypeError("%s must be a boolean" % option) self.options[option] = str(value).lower()
0.004728
def output_file_name(self): """Name of the file where plugin's output should be written to.""" safe_path = re.sub(r":|/", "_", self.source_urn.Path().lstrip("/")) return "results_%s%s" % (safe_path, self.output_file_extension)
0.004184
def chunks(l, n): """Yield successive n-sized chunks from l.""" if n: for i in xrange(0, len(l), n): yield l[i:i + n]
0.013793
def fitNull(self, init_method='emp_cov'): """ fit null model """ self.null = self.mtSet1.fitNull(cache=False, factr=self.factr, init_method=init_method) self.null['NLL'] = self.null['NLL0'] self.mtSet2.null = copy.copy(self.null) return self.null
0.01049
def get_probability_grammar(self): """ A method that returns probability grammar """ # Creating valid word expression for probability, it is of the format # wor1 | var2 , var3 or var1 var2 var3 or simply var word_expr = Word(alphanums + '-' + '_') + Suppress(Optional("|")) + Suppress(Optional(",")) word_expr2 = Word(initChars=printables, excludeChars=[',', ')', ' ', '(']) + Suppress(Optional(",")) # creating an expression for valid numbers, of the format # 1.00 or 1 or 1.00. 0.00 or 9.8e-5 etc num_expr = Word(nums + '-' + '+' + 'e' + 'E' + '.') + Suppress(Optional(",")) probability_expr = Suppress('probability') + Suppress('(') + OneOrMore(word_expr) + Suppress(')') optional_expr = Suppress('(') + OneOrMore(word_expr2) + Suppress(')') probab_attributes = optional_expr | Suppress('table') cpd_expr = probab_attributes + OneOrMore(num_expr) return probability_expr, cpd_expr
0.005976
def planner(self, *, resource=''): """ Get an instance to read information from Microsoft planner """ if not isinstance(self.protocol, MSGraphProtocol): # TODO: Custom protocol accessing OneDrive/Sharepoint Api fails here raise RuntimeError( 'planner api only works on Microsoft Graph API') return Planner(parent=self, main_resource=resource)
0.007353
def modify_db_instance(DBInstanceIdentifier=None, AllocatedStorage=None, DBInstanceClass=None, DBSubnetGroupName=None, DBSecurityGroups=None, VpcSecurityGroupIds=None, ApplyImmediately=None, MasterUserPassword=None, DBParameterGroupName=None, BackupRetentionPeriod=None, PreferredBackupWindow=None, PreferredMaintenanceWindow=None, MultiAZ=None, EngineVersion=None, AllowMajorVersionUpgrade=None, AutoMinorVersionUpgrade=None, LicenseModel=None, Iops=None, OptionGroupName=None, NewDBInstanceIdentifier=None, StorageType=None, TdeCredentialArn=None, TdeCredentialPassword=None, CACertificateIdentifier=None, Domain=None, CopyTagsToSnapshot=None, MonitoringInterval=None, DBPortNumber=None, PubliclyAccessible=None, MonitoringRoleArn=None, DomainIAMRoleName=None, PromotionTier=None, EnableIAMDatabaseAuthentication=None): """ Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. See also: AWS API Documentation Examples This example immediately changes the specified settings for the specified DB instance. Expected Output: :example: response = client.modify_db_instance( DBInstanceIdentifier='string', AllocatedStorage=123, DBInstanceClass='string', DBSubnetGroupName='string', DBSecurityGroups=[ 'string', ], VpcSecurityGroupIds=[ 'string', ], ApplyImmediately=True|False, MasterUserPassword='string', DBParameterGroupName='string', BackupRetentionPeriod=123, PreferredBackupWindow='string', PreferredMaintenanceWindow='string', MultiAZ=True|False, EngineVersion='string', AllowMajorVersionUpgrade=True|False, AutoMinorVersionUpgrade=True|False, LicenseModel='string', Iops=123, OptionGroupName='string', NewDBInstanceIdentifier='string', StorageType='string', TdeCredentialArn='string', TdeCredentialPassword='string', CACertificateIdentifier='string', Domain='string', CopyTagsToSnapshot=True|False, MonitoringInterval=123, DBPortNumber=123, PubliclyAccessible=True|False, MonitoringRoleArn='string', DomainIAMRoleName='string', PromotionTier=123, EnableIAMDatabaseAuthentication=True|False ) :type DBInstanceIdentifier: string :param DBInstanceIdentifier: [REQUIRED] The DB instance identifier. This value is stored as a lowercase string. Constraints: Must be the identifier for an existing DB instance Must contain from 1 to 63 alphanumeric characters or hyphens First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens :type AllocatedStorage: integer :param AllocatedStorage: The new storage capacity of the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless ApplyImmediately is set to true for this request. MySQL Default: Uses existing setting Valid Values: 5-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. Type: Integer MariaDB Default: Uses existing setting Valid Values: 5-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. Type: Integer PostgreSQL Default: Uses existing setting Valid Values: 5-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. Type: Integer Oracle Default: Uses existing setting Valid Values: 10-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. SQL Server Cannot be modified. If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance. :type DBInstanceClass: string :param DBInstanceClass: The new compute and memory capacity of the DB instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action. Note that not all instance classes are available in all regions for all DB engines. Passing a value for this setting causes an outage during the change and is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request. Default: Uses existing setting Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large :type DBSubnetGroupName: string :param DBSubnetGroupName: The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance is not in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Updating the VPC for a DB Instance . Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you specify true for the ApplyImmediately parameter. Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Example: mySubnetGroup :type DBSecurityGroups: list :param DBSecurityGroups: A list of DB security groups to authorize on this DB instance. Changing this setting does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens (string) -- :type VpcSecurityGroupIds: list :param VpcSecurityGroupIds: A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible. Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens (string) -- :type ApplyImmediately: boolean :param ApplyImmediately: Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance. If this parameter is set to false , changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and will be applied on the next call to RebootDBInstance , or the next failure reboot. Review the table of parameters in Modifying a DB Instance and Using the Apply Immediately Parameter to see the impact that setting ApplyImmediately to true or false has for each modified parameter and to determine when the changes will be applied. Default: false :type MasterUserPassword: string :param MasterUserPassword: The new password for the DB instance master user. Can be any printable ASCII character except '/', ''', or '@'. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response. Default: Uses existing setting Constraints: Must be 8 to 41 alphanumeric characters (MySQL, MariaDB, and Amazon Aurora), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server). Note Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked. :type DBParameterGroupName: string :param DBParameterGroupName: The name of the DB parameter group to apply to the DB instance. Changing this setting does not result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window. Default: Uses existing setting Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance. :type BackupRetentionPeriod: integer :param BackupRetentionPeriod: The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible. Default: Uses existing setting Constraints: Must be a value from 0 to 35 Can be specified for a MySQL Read Replica only if the source is running MySQL 5.6 Can be specified for a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5 Cannot be set to 0 if the DB instance is a source to Read Replicas :type PreferredBackupWindow: string :param PreferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: Must be in the format hh24:mi-hh24:mi Times should be in Universal Time Coordinated (UTC) Must not conflict with the preferred maintenance window Must be at least 30 minutes :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes :type MultiAZ: boolean :param MultiAZ: Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. Constraints: Cannot be specified if the DB instance is a Read Replica. :type EngineVersion: string :param EngineVersion: The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. For major version upgrades, if a non-default DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family. For a list of valid engine versions, see CreateDBInstance . :type AllowMajorVersionUpgrade: boolean :param AllowMajorVersionUpgrade: Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version. :type LicenseModel: string :param LicenseModel: The license model for the DB instance. Valid values: license-included | bring-your-own-license | general-public-license :type Iops: integer :param Iops: The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. Default: Uses existing setting Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect. SQL Server Setting the IOPS value for the SQL Server database engine is not supported. Type: Integer If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance. :type OptionGroupName: string :param OptionGroupName: Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted. Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance :type NewDBInstanceIdentifier: string :param NewDBInstanceIdentifier: The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string. Constraints: Must contain from 1 to 63 alphanumeric characters or hyphens First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens :type StorageType: string :param StorageType: Specifies the storage type to be associated with the DB instance. Valid values: standard | gp2 | io1 If you specify io1 , you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified; otherwise standard :type TdeCredentialArn: string :param TdeCredentialArn: The ARN from the Key Store with which to associate the instance for TDE encryption. :type TdeCredentialPassword: string :param TdeCredentialPassword: The password for the given ARN from the Key Store in order to access the device. :type CACertificateIdentifier: string :param CACertificateIdentifier: Indicates the certificate that needs to be associated with the instance. :type Domain: string :param Domain: The Active Directory Domain to move the instance to. Specify none to remove the instance from its current domain. The domain must be created prior to this operation. Currently only a Microsoft SQL Server instance can be created in a Active Directory Domain. :type CopyTagsToSnapshot: boolean :param CopyTagsToSnapshot: True to copy all tags from the DB instance to snapshots of the DB instance; otherwise false. The default is false. :type MonitoringInterval: integer :param MonitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 :type DBPortNumber: integer :param DBPortNumber: The port number on which the database accepts connections. The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance. Your database will restart when you change the DBPortNumber value regardless of the value of the ApplyImmediately parameter. MySQL Default: 3306 Valid Values: 1150-65535 MariaDB Default: 3306 Valid Values: 1150-65535 PostgreSQL Default: 5432 Valid Values: 1150-65535 Type: Integer Oracle Default: 1521 Valid Values: 1150-65535 SQL Server Default: 1433 Valid Values: 1150-65535 except for 1434 , 3389 , 47001 , 49152 , and 49152 through 49156 . Amazon Aurora Default: 3306 Valid Values: 1150-65535 :type PubliclyAccessible: boolean :param PubliclyAccessible: Boolean value that indicates if the DB instance has a publicly resolvable DNS name. Set to True to make the DB instance Internet-facing with a publicly resolvable DNS name, which resolves to a public IP address. Set to False to make the DB instance internal with a DNS name that resolves to a private IP address. PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be true in order for it to be publicly accessible. Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter. Default: false :type MonitoringRoleArn: string :param MonitoringRoleArn: The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess . For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring . If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. :type DomainIAMRoleName: string :param DomainIAMRoleName: The name of the IAM role to use when making API calls to the Directory Service. :type PromotionTier: integer :param PromotionTier: A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster . Default: 1 Valid Values: 0 - 15 :type EnableIAMDatabaseAuthentication: boolean :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false. You can enable IAM database authentication for the following database engines For MySQL 5.6, minor version 5.6.34 or higher For MySQL 5.7, minor version 5.7.16 or higher Default: false :rtype: dict :return: { 'DBInstance': { 'DBInstanceIdentifier': 'string', 'DBInstanceClass': 'string', 'Engine': 'string', 'DBInstanceStatus': 'string', 'MasterUsername': 'string', 'DBName': 'string', 'Endpoint': { 'Address': 'string', 'Port': 123, 'HostedZoneId': 'string' }, 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'PreferredBackupWindow': 'string', 'BackupRetentionPeriod': 123, 'DBSecurityGroups': [ { 'DBSecurityGroupName': 'string', 'Status': 'string' }, ], 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'DBParameterGroups': [ { 'DBParameterGroupName': 'string', 'ParameterApplyStatus': 'string' }, ], 'AvailabilityZone': 'string', 'DBSubnetGroup': { 'DBSubnetGroupName': 'string', 'DBSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ], 'DBSubnetGroupArn': 'string' }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'DBInstanceClass': 'string', 'AllocatedStorage': 123, 'MasterUserPassword': 'string', 'Port': 123, 'BackupRetentionPeriod': 123, 'MultiAZ': True|False, 'EngineVersion': 'string', 'LicenseModel': 'string', 'Iops': 123, 'DBInstanceIdentifier': 'string', 'StorageType': 'string', 'CACertificateIdentifier': 'string', 'DBSubnetGroupName': 'string' }, 'LatestRestorableTime': datetime(2015, 1, 1), 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'ReadReplicaSourceDBInstanceIdentifier': 'string', 'ReadReplicaDBInstanceIdentifiers': [ 'string', ], 'ReadReplicaDBClusterIdentifiers': [ 'string', ], 'LicenseModel': 'string', 'Iops': 123, 'OptionGroupMemberships': [ { 'OptionGroupName': 'string', 'Status': 'string' }, ], 'CharacterSetName': 'string', 'SecondaryAvailabilityZone': 'string', 'PubliclyAccessible': True|False, 'StatusInfos': [ { 'StatusType': 'string', 'Normal': True|False, 'Status': 'string', 'Message': 'string' }, ], 'StorageType': 'string', 'TdeCredentialArn': 'string', 'DbInstancePort': 123, 'DBClusterIdentifier': 'string', 'StorageEncrypted': True|False, 'KmsKeyId': 'string', 'DbiResourceId': 'string', 'CACertificateIdentifier': 'string', 'DomainMemberships': [ { 'Domain': 'string', 'Status': 'string', 'FQDN': 'string', 'IAMRoleName': 'string' }, ], 'CopyTagsToSnapshot': True|False, 'MonitoringInterval': 123, 'EnhancedMonitoringResourceArn': 'string', 'MonitoringRoleArn': 'string', 'PromotionTier': 123, 'DBInstanceArn': 'string', 'Timezone': 'string', 'IAMDatabaseAuthenticationEnabled': True|False } } :returns: CreateDBInstance DeleteDBInstance ModifyDBInstance """ pass
0.003689
def timelines(fig, y, xstart, xstop, color='b'): """Plot timelines at y from xstart to xstop with given color.""" fig.hlines(y, xstart, xstop, color, lw=4) fig.vlines(xstart, y+0.03, y-0.03, color, lw=2) fig.vlines(xstop, y+0.03, y-0.03, color, lw=2)
0.003759
def decode_values(fct): ''' Decode base64 encoded responses from Consul storage ''' def inner(*args, **kwargs): ''' decorator ''' data = fct(*args, **kwargs) if 'error' not in data: for result in data: result['Value'] = base64.b64decode(result['Value']) return data return inner
0.002857
def _write_bond_information(xml_file, structure, ref_distance, ref_energy): """Write the bonds in the system. Parameters ---------- xml_file : file object The file object of the hoomdxml file being written structure : parmed.Structure Parmed structure object ref_distance : float, default=1.0 Reference distance for conversion to reduced units ref_energy : float, default=1.0 Reference energy for conversion to reduced units """ unique_bond_types = set() xml_file.write('<bond>\n') for bond in structure.bonds: t1, t2 = bond.atom1.type, bond.atom2.type if t1 == '' or t2 == '': t1, t2 = bond.atom1.name, bond.atom2.name t1, t2 = sorted([t1, t2]) try: bond_type = ('-'.join((t1, t2)), bond.type.k, bond.type.req) except AttributeError: # no forcefield applied, bond.type is None bond_type = ('-'.join((t1, t2)), 0.0, 0.0) unique_bond_types.add(bond_type) xml_file.write('{} {} {}\n'.format( bond_type[0], bond.atom1.idx, bond.atom2.idx)) xml_file.write('</bond>\n') xml_file.write('<bond_coeffs>\n') xml_file.write('<!-- type k r_eq -->\n') for bond_type, k, req in unique_bond_types: xml_file.write('{} {} {}\n'.format(bond_type, k * 2.0 / ref_energy * ref_distance**2.0, req/ref_distance)) xml_file.write('</bond_coeffs>\n')
0.00138
def clear(self, domain=None, path=None, name=None): """Clear some cookies. Invoking this method without arguments will clear all cookies. If given a single argument, only cookies belonging to that domain will be removed. If given two arguments, cookies belonging to the specified path within that domain are removed. If given three arguments, then the cookie with the specified name, path and domain is removed. Raises KeyError if no matching cookie exists. """ if name is not None: if (domain is None) or (path is None): raise ValueError( "domain and path must be given to remove a cookie by name") del self._cookies[domain][path][name] elif path is not None: if domain is None: raise ValueError( "domain must be given to remove cookies by path") del self._cookies[domain][path] elif domain is not None: del self._cookies[domain] else: self._cookies = {}
0.001817
def write(self): """ Writes the ``.sln`` file to disk. """ filters = { 'MSGUID': lambda x: ('{%s}' % x).upper(), 'relslnfile': lambda x: os.path.relpath(x, os.path.dirname(self.FileName)) } context = { 'sln': self } return self.render(self.__jinja_template__, self.FileName, context, filters)
0.010204
def delete_ptr_records(self, device, ip_address=None): """ Deletes the PTR records for the specified device. If 'ip_address' is supplied, only the PTR records with that IP address will be deleted. """ device_type = self._resolve_device_type(device) href, svc_name = self._get_ptr_details(device, device_type) uri = "/rdns/%s?href=%s" % (svc_name, href) if ip_address: uri = "%s&ip=%s" % (uri, ip_address) resp, resp_body = self._async_call(uri, method="DELETE", has_response=False, error_class=exc.PTRRecordDeletionFailed) return resp_body.get("status") == "COMPLETED"
0.00578
def _convert_ftp_time_to_iso(ftp_time): """ Convert datetime in the format 20160705042714 to a datetime object :return: datetime object """ date_time = datetime( int(ftp_time[:4]), int(ftp_time[4:6]), int(ftp_time[6:8]), int(ftp_time[8:10]), int(ftp_time[10:12]), int(ftp_time[12:14])) return date_time
0.005333
def load(fnames, tag=None, sat_id=None, obs_long=0., obs_lat=0., obs_alt=0., TLE1=None, TLE2=None): """ Returns data and metadata in the format required by pysat. Finds position of satellite in both ECI and ECEF co-ordinates. Routine is directly called by pysat and not the user. Parameters ---------- fnames : list-like collection File name that contains date in its name. tag : string Identifies a particular subset of satellite data sat_id : string Satellite ID obs_long: float Longitude of the observer on the Earth's surface obs_lat: float Latitude of the observer on the Earth's surface obs_alt: float Altitude of the observer on the Earth's surface TLE1 : string First string for Two Line Element. Must be in TLE format TLE2 : string Second string for Two Line Element. Must be in TLE format Example ------- inst = pysat.Instrument('pysat', 'sgp4', TLE1='1 25544U 98067A 18135.61844383 .00002728 00000-0 48567-4 0 9998', TLE2='2 25544 51.6402 181.0633 0004018 88.8954 22.2246 15.54059185113452') inst.load(2018, 1) """ import sgp4 # wgs72 is the most commonly used gravity model in satellite tracking community from sgp4.earth_gravity import wgs72 from sgp4.io import twoline2rv import ephem import pysatMagVect # TLEs (Two Line Elements for ISS) # format of TLEs is fixed and available from wikipedia... # lines encode list of orbital elements of an Earth-orbiting object # for a given point in time line1 = ('1 25544U 98067A 18135.61844383 .00002728 00000-0 48567-4 0 9998') line2 = ('2 25544 51.6402 181.0633 0004018 88.8954 22.2246 15.54059185113452') # use ISS defaults if not provided by user if TLE1 is not None: line1 = TLE1 if TLE2 is not None: line2 = TLE2 # create satellite from TLEs and assuming a gravity model # according to module webpage, wgs72 is common satellite = twoline2rv(line1, line2, wgs72) # grab date from filename parts = os.path.split(fnames[0])[-1].split('-') yr = int(parts[0]) month = int(parts[1]) day = int(parts[2][0:2]) date = pysat.datetime(yr, month, day) # create timing at 1 Hz (for 1 day) times = pds.date_range(start=date, end=date+pds.DateOffset(seconds=86399), freq='1S') # reduce requirements if on testing server # TODO Remove this when testing resources are higher on_travis = os.environ.get('ONTRAVIS') == 'True' if on_travis: times = times[0:100] # create list to hold satellite position, velocity position = [] velocity = [] for time in times: # orbit propagator - computes x,y,z position and velocity pos, vel = satellite.propagate(time.year, time.month, time.day, time.hour, time.minute, time.second) # print (pos) position.extend(pos) velocity.extend(vel) # put data into DataFrame data = pysat.DataFrame({'position_eci_x': position[::3], 'position_eci_y': position[1::3], 'position_eci_z': position[2::3], 'velocity_eci_x': velocity[::3], 'velocity_eci_y': velocity[1::3], 'velocity_eci_z': velocity[2::3]}, index=times) data.index.name = 'Epoch' # add position and velocity in ECEF # add call for GEI/ECEF translation here # instead, since available, I'll use an orbit predictor from another # package that outputs in ECEF # it also supports ground station calculations # the observer's (ground station) position on the Earth surface site = ephem.Observer() site.lon = str(obs_long) site.lat = str(obs_lat) site.elevation = obs_alt # The first parameter in readtle() is the satellite name sat = ephem.readtle('pysat' , line1, line2) output_params = [] for time in times: lp = {} site.date = time sat.compute(site) # parameters relative to the ground station lp['obs_sat_az_angle'] = ephem.degrees(sat.az) lp['obs_sat_el_angle'] = ephem.degrees(sat.alt) # total distance away lp['obs_sat_slant_range'] = sat.range # satellite location # sub latitude point lp['glat'] = np.degrees(sat.sublat) # sublongitude point lp['glong'] = np.degrees(sat.sublong) # elevation of sat in m, stored as km lp['alt'] = sat.elevation/1000. # get ECEF position of satellite lp['x'], lp['y'], lp['z'] = pysatMagVect.geodetic_to_ecef(lp['glat'], lp['glong'], lp['alt']) output_params.append(lp) output = pds.DataFrame(output_params, index=times) # modify input object to include calculated parameters data[['glong', 'glat', 'alt']] = output[['glong', 'glat', 'alt']] data[['position_ecef_x', 'position_ecef_y', 'position_ecef_z']] = output[['x', 'y', 'z']] data['obs_sat_az_angle'] = output['obs_sat_az_angle'] data['obs_sat_el_angle'] = output['obs_sat_el_angle'] data['obs_sat_slant_range'] = output['obs_sat_slant_range'] return data, meta.copy()
0.011161
def update(self): """Monolithic update method. This method calls the following methods with the dynamic loss scaling. 1. solver.zerograd 2. feed data 3. loss.forward 4. loss.backward 5. comm.all_reduce (if it is specified) 6. solver.update """ # Initialize gradients. self.solver.zero_grad() # Forward and backward for _ in range(self.accum_grad): # feed data self.data_feeder() # forward self.loss.forward(clear_no_need_grad=self.clear_buffer) # backward with scale self.loss.backward(self.scale, clear_buffer=self.clear_buffer) # AllReduce if self.comm and len(self.grads) != 0: self.comm.all_reduce(self.grads, division=False, inplace=False) # Check Inf/NaN in grads if self.solver.check_inf_or_nan_grad(): self.scale /= self.scaling_factor self._counter = 0 # Recursively call udpate function until no inf nor nan. self._recursive_count += 1 if self._recursive_count > self._max_recursive_count: self._recursive_count = 0 return # skip return self.update() self._recursive_count = 0 # Rescale grads self.solver.scale_grad(1. / self.scale) # Do some gradient clipping, etc. if self.weight_decay is not None: self.solver.weight_decay(self.weight_decay) # Update self.solver.update() if self._counter > self.N: self.scale *= self.scaling_factor self._counter = 0 self._counter += 1
0.001157
def lp_tri(f, fb): """ Triangle spectral shape function used by :func:`lp_samp`. Parameters ---------- f : ndarray containing frequency samples fb : the bandwidth as a float constant Returns ------- x : ndarray of spectrum samples for a single triangle shape Notes ----- This is a support function for the lowpass spectrum plotting function :func:`lp_samp`. Examples -------- >>> x = lp_tri(f, fb) """ x = np.zeros(len(f)) for k in range(len(f)): if abs(f[k]) <= fb: x[k] = 1 - abs(f[k])/float(fb) return x
0.003252
def ensure_node(self, tokens: ParseResults) -> BaseEntity: """Turn parsed tokens into canonical node name and makes sure its in the graph.""" if MODIFIER in tokens: return self.ensure_node(tokens[TARGET]) node = parse_result_to_dsl(tokens) self.graph.add_node_from_data(node) return node
0.008824
def print_details(self): """Print torrent details""" print("Title:", self.title) print("Category:", self.category) print("Page: ", self.page) print("Size: ", self.size) print("Files: ", self.files) print("Age: ", self.age) print("Seeds:", self.seeders) print("Leechers: ", self.leechers) print("Magnet: ", self.magnet) print("Download: ", self.download) print("Verified:", self.isVerified)
0.033898
def connect(self, server): "Connects to a server and return a connection id." if 'connections' not in session: session['connections'] = {} session.save() conns = session['connections'] id = str(len(conns)) conn = Connection(server) conns[id] = conn yield request.environ['cogen.core'].events.AddCoro(conn.pull) yield id
0.004773
def cli(): """ Usage: sugartex [OPTIONS] [TO] Reads from stdin and writes to stdout. Can have single argument/option only. When no args or the arg is not from options then run Pandoc SugarTeX filter that iterates over math blocks. Options: --kiwi Same as above but with kiwi flavor, --help Show this message and exit. """ if len(sys.argv) > 1: if sys.argv[1] == '--kiwi': kiwi_hack() elif sys.argv[1].lower() == '--help': print(str(cli.__doc__).replace('\n ', '\n')) return None main()
0.005
def comment_thread(cls, backend, *args, **kwargs): """Create a comment thread for the desired backend. :arg backend: String name of backend (e.g., 'file', 'github', 'redis', etc.). :arg *args, **kwargs: Arguments to be passed to contructor for that backend. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- :returns: A CommentThread sub-class for the given backend. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- PURPOSE: Some simple syntatic sugar for creating the desired backend. """ ct_cls = cls._known_backends.get(backend) if not ct_cls: return None return ct_cls(*args, **kwargs)
0.002509
def identification_field_factory(label, error_required): """ A simple identification field factory which enable you to set the label. :param label: String containing the label for this field. :param error_required: String containing the error message if the field is left empty. """ return forms.CharField(label=_(u"%(label)s") % {'label': label}, widget=forms.TextInput(attrs=attrs_dict), max_length=75, error_messages={'required': _(u"%(error)s") % {'error': error_required}})
0.004637
def getBinary(self): """Returns the binary message (so far) with typetags.""" address = OSCArgument(self.address)[1] typetags = OSCArgument(self.typetags)[1] return address + typetags + self.message
0.012987
def apmag_at_absmag(H, d, phi=1): """ Calculate the apparent magnitude of a TNO given its absolute magnitude H, for a given distance. :param H: TNO absolute magnitude (unitless) :param d: barycentric distance (AU) :param phi: phase angle (0-1, always v close to 1 for TNOs) :return: apparent magnitude of TNO """ d_observer = 1. # 1 AU # approximate object's distance d_heliocentric and d_geocentric as the same, d, because TNO m_r = H + 2.5 * math.log10((d ** 4) / (phi * d_observer ** 4)) print("m_r = {:2.2f} for a H = {} TNO at {} AU at opposition.".format( m_r, H, d)) return m_r
0.004673
def stratified_split(self, test_frac=0.2, seed=-1): """ Construct a column that can be used to perform a random stratified split. :param float test_frac: The fraction of rows that will belong to the "test". :param int seed: The seed for the random number generator. :returns: an H2OFrame having single categorical column with two levels: ``"train"`` and ``"test"``. :examples: >>> stratsplit = df["y"].stratified_split(test_frac=0.3, seed=12349453) >>> train = df[stratsplit=="train"] >>> test = df[stratsplit=="test"] >>> >>> # check that the distributions among the initial frame, and the >>> # train/test frames match >>> df["y"].table()["Count"] / df["y"].table()["Count"].sum() >>> train["y"].table()["Count"] / train["y"].table()["Count"].sum() >>> test["y"].table()["Count"] / test["y"].table()["Count"].sum() """ return H2OFrame._expr(expr=ExprNode('h2o.random_stratified_split', self, test_frac, seed))
0.006554
def logout(self): """Log out of the account.""" self._master_token = None self._auth_token = None self._email = None self._android_id = None
0.011111
def set_bulk_size(size): """Set size limit on bulk execution. Bulk execution bundles many operators to run together. This can improve performance when running a lot of small operators sequentially. Parameters ---------- size : int Maximum number of operators that can be bundled in a bulk. Returns ------- int Previous bulk size. """ prev = ctypes.c_int() check_call(_LIB.MXEngineSetBulkSize( ctypes.c_int(size), ctypes.byref(prev))) return prev.value
0.001873
def supports_color(): """ Returns True if the running system's terminal supports color, and False otherwise. """ unsupported_platform = (sys.platform in ('win32', 'Pocket PC')) # isatty is not always implemented, #6223. is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() if unsupported_platform or not is_a_tty: return False return True
0.002538
def recreate_relationships(self, class_, attribute_name, key): ''' Recreates one-to-many relationship ''' iterable = self.record_keeper.foreign_to_many_foreign_map[key] for foreign_page_id, foreign_page_id_list in iteritems(iterable): # Assumption: local page has been indexed and exists # TODO: handle case where it doesn't exist local_page_id = self.record_keeper.get_local_page(foreign_page_id) local_page = Page.objects.get(id=local_page_id).specific for _foreign_page_id in foreign_page_id_list: try: local_version_page_id = (self.record_keeper .get_local_page(_foreign_page_id)) foreign_page = Page.objects.get( id=local_version_page_id).specific realtionship_object = class_(page=local_page) setattr(realtionship_object, attribute_name, foreign_page) realtionship_object.save() except Exception as e: context = { "exception": e, "function_schema": ("recreate_relationships" "(class, attribute_name, key)"), "attribute_name": str(attribute_name), "key": str(key), "class": str(class_), "foreign_page_id": str(foreign_page_id), } self.log(ERROR, "recreating relationships", context=context)
0.001194
def find_lib_path(): """Find MXNet dynamic library files. Returns ------- lib_path : list(string) List of all found path to the libraries. """ lib_from_env = os.environ.get('MXNET_LIBRARY_PATH') if lib_from_env: if os.path.isfile(lib_from_env): if not os.path.isabs(lib_from_env): logging.warning("MXNET_LIBRARY_PATH should be an absolute path, instead of: %s", lib_from_env) else: if os.name == 'nt': os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.dirname(lib_from_env) return [lib_from_env] else: logging.warning("MXNET_LIBRARY_PATH '%s' doesn't exist", lib_from_env) curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) api_path = os.path.join(curr_path, '../../lib/') cmake_build_path = os.path.join(curr_path, '../../build/') dll_path = [curr_path, api_path, cmake_build_path] if os.name == 'nt': dll_path.append(os.path.join(curr_path, '../../build')) vs_configuration = 'Release' if platform.architecture()[0] == '64bit': dll_path.append(os.path.join(curr_path, '../../build', vs_configuration)) dll_path.append(os.path.join(curr_path, '../../windows/x64', vs_configuration)) else: dll_path.append(os.path.join(curr_path, '../../build', vs_configuration)) dll_path.append(os.path.join(curr_path, '../../windows', vs_configuration)) elif os.name == "posix" and os.environ.get('LD_LIBRARY_PATH', None): dll_path[0:0] = [p.strip() for p in os.environ['LD_LIBRARY_PATH'].split(":")] if os.name == 'nt': os.environ['PATH'] = os.path.dirname(__file__) + ';' + os.environ['PATH'] dll_path = [os.path.join(p, 'libmxnet.dll') for p in dll_path] elif platform.system() == 'Darwin': dll_path = [os.path.join(p, 'libmxnet.dylib') for p in dll_path] + \ [os.path.join(p, 'libmxnet.so') for p in dll_path] else: dll_path.append('../../../') dll_path = [os.path.join(p, 'libmxnet.so') for p in dll_path] lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)] if len(lib_path) == 0: raise RuntimeError('Cannot find the MXNet library.\n' + 'List of candidates:\n' + str('\n'.join(dll_path))) if os.name == 'nt': os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.dirname(lib_path[0]) return lib_path
0.004267
def execute_no_results(self, sock_info, generator): """Execute all operations, returning no results (w=0). """ if self.uses_collation: raise ConfigurationError( 'Collation is unsupported for unacknowledged writes.') if self.uses_array_filters: raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged writes.') # Cannot have both unacknowledged writes and bypass document validation. if self.bypass_doc_val and sock_info.max_wire_version >= 4: raise OperationFailure("Cannot set bypass_document_validation with" " unacknowledged write concern") # OP_MSG if sock_info.max_wire_version > 5: if self.ordered: return self.execute_command_no_results(sock_info, generator) return self.execute_op_msg_no_results(sock_info, generator) coll = self.collection # If ordered is True we have to send GLE or use write # commands so we can abort on the first error. write_concern = WriteConcern(w=int(self.ordered)) op_id = _randint() next_run = next(generator) while next_run: # An ordered bulk write needs to send acknowledged writes to short # circuit the next run. However, the final message on the final # run can be unacknowledged. run = next_run next_run = next(generator, None) needs_ack = self.ordered and next_run is not None try: if run.op_type == _INSERT: self.execute_insert_no_results( sock_info, run, op_id, needs_ack) elif run.op_type == _UPDATE: for operation in run.ops: doc = operation['u'] check_keys = True if doc and next(iter(doc)).startswith('$'): check_keys = False coll._update( sock_info, operation['q'], doc, operation['upsert'], check_keys, operation['multi'], write_concern=write_concern, op_id=op_id, ordered=self.ordered, bypass_doc_val=self.bypass_doc_val) else: for operation in run.ops: coll._delete(sock_info, operation['q'], not operation['limit'], write_concern, op_id, self.ordered) except OperationFailure: if self.ordered: break
0.000995
def variant_to_list(obj): """ Return a list containing the descriptors in the given object. The ``obj`` can be a list or a set of descriptor strings, or a Unicode string. If ``obj`` is a Unicode string, it will be split using spaces as delimiters. :param variant obj: the object to be parsed :rtype: list :raise TypeError: if the ``obj`` has a type not listed above """ if isinstance(obj, list): return obj elif is_unicode_string(obj): return [s for s in obj.split() if len(s) > 0] elif isinstance(obj, set) or isinstance(obj, frozenset): return list(obj) raise TypeError("The given value must be a list or a set of descriptor strings, or a Unicode string.")
0.00813
def _handle_zeros_in_scale(scale, copy=True): """ Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features. """ # if we are fitting on 1D arrays, scale might be a scalar if numpy.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, numpy.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale
0.001821
def clips_value(self, dvalue): """Convert a Python type into CLIPS.""" try: return VALUES[type(dvalue)](self._env, dvalue) except KeyError: if isinstance(dvalue, (list, tuple)): return self.list_to_multifield(dvalue) if isinstance(dvalue, (clips.facts.Fact)): return dvalue._fact if isinstance(dvalue, (clips.classes.Instance)): return dvalue._ist return ffi.NULL
0.004065
def __cost(self, params, phase, X): """Computes activation, cost function, and derivative.""" params = self.__roll(params) a = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1) # This is a1 calculated_a = [a] # a1 is at index 0, a_n is at index n-1 calculated_z = [0] # There is no z1, z_n is at index n-1 for i, theta in enumerate(params): # calculated_a now contains a1, a2, a3 if there was only one hidden layer (two theta matrices) z = calculated_a[-1] * theta.transpose() # z_n = a_n-1 * Theta_n-1' calculated_z.append(z) # Save the new z_n a = self.sigmoid(z) # a_n = sigmoid(z_n) if i != len(params) - 1: # Don't append extra ones for the output layer a = np.concatenate((np.ones((a.shape[0], 1)), a), axis=1) # Append the extra column of ones for all other layers calculated_a.append(a) # Save the new a if phase == 0: if self.__num_labels > 1: return np.argmax(calculated_a[-1], axis=1) return np.round(calculated_a[-1]) J = np.sum(-np.multiply(self.__y, np.log(calculated_a[-1]))-np.multiply(1-self.__y, np.log(1-calculated_a[-1])))/self.__m; # Calculate cost if self.__lambda != 0: # If we're using regularization... J += np.sum([np.sum(np.power(theta[:,1:], 2)) for theta in params])*self.__lambda/(2.0*self.__m) # ...add it from all theta matrices if phase == 1: return J reversed_d = [] reversed_theta_grad = [] for i in range(len(params)): # For once per theta matrix... if i == 0: # ...if it's the first one... d = calculated_a[-1] - self.__y # ...initialize the error... else: # ...otherwise d_n-1 = d_n * Theta_n-1[missing ones] .* sigmoid(z_n-1) d = np.multiply(reversed_d[-1]*params[-i][:,1:], self.sigmoid_grad(calculated_z[-1-i])) # With i=1/1 hidden layer we're getting Theta2 at index -1, and z2 at index -2 reversed_d.append(d) theta_grad = reversed_d[-1].transpose() * calculated_a[-i-2] / self.__m if self.__lambda != 0: theta_grad += np.concatenate((np.zeros((params[-1-i].shape[0], 1)), params[-1-i][:,1:]), axis=1) * self.__lambda / self.__m# regularization reversed_theta_grad.append(theta_grad) theta_grad = self.__unroll(reversed(reversed_theta_grad)) return theta_grad
0.013655
def _field_controller_generator(self): """ Generates the methods called by the injected controller """ # Local variable, to avoid messing with "self" stored_instance = self._ipopo_instance def get_value(self, name): # pylint: disable=W0613 """ Retrieves the controller value, from the iPOPO dictionaries :param name: The property name :return: The property value """ return stored_instance.get_controller_state(name) def set_value(self, name, new_value): # pylint: disable=W0613 """ Sets the property value and trigger an update event :param name: The property name :param new_value: The new property value """ # Get the previous value old_value = stored_instance.get_controller_state(name) if new_value != old_value: # Update the controller state stored_instance.set_controller_state(name, new_value) return new_value return get_value, set_value
0.001736
def load(self, data): """ Load a single row of data and convert it into entities and relations. """ objs = {} for mapper in self.entities: objs[mapper.name] = mapper.load(self.loader, data) for mapper in self.relations: objs[mapper.name] = mapper.load(self.loader, data, objs)
0.005865
def _parse_new_contract_args(*args, **kwargs): """Parse argument for new_contract() function.""" # No arguments if (not args) and (not kwargs): return [ { "name": "argument_invalid", "msg": "Argument `*[argument_name]*` is not valid", "type": RuntimeError, } ] # Process args if (len(args) > 1) or ((len(args) == 1) and kwargs): raise TypeError("Illegal custom contract exception definition") if len(args) == 1: return [dict([("name", "default")] + list(_format_arg(args[0]).items()))] # Process kwargs return [ dict([("name", name)] + list(_format_arg(kwargs[name]).items())) for name in sorted(list(kwargs.keys())) ]
0.002584
def make_sa(): """ Factory to creates a SQLAlchemy queue store, pulling config values from the CoilMQ configuration. """ configuration = dict(config.items('coilmq')) engine = engine_from_config(configuration, 'qstore.sqlalchemy.') init_model(engine) store = SAQueue() return store
0.00641
def function_info(self, functionKey): """Returns processed information about the function's name and file.""" node_type = 'function' filename, line_number, function_name = functionKey if function_name == '<module>': modulePath, moduleName = osp.split(filename) node_type = 'module' if moduleName == '__init__.py': modulePath, moduleName = osp.split(modulePath) function_name = '<' + moduleName + '>' if not filename or filename == '~': file_and_line = '(built-in)' node_type = 'builtin' else: if function_name == '__init__': node_type = 'constructor' file_and_line = '%s : %d' % (filename, line_number) return filename, line_number, function_name, file_and_line, node_type
0.003375
def receive_ack_requesting(self, pkt): """Receive ACK in REQUESTING state.""" logger.debug("C3. Received ACK?, in REQUESTING state.") if self.process_received_ack(pkt): logger.debug("C3: T. Received ACK, in REQUESTING state, " "raise BOUND.") raise self.BOUND()
0.005988
def _construct_location_stack_entry(location, num_traverses): """Return a LocationStackEntry namedtuple with the specified parameters.""" if not isinstance(num_traverses, int) or num_traverses < 0: raise AssertionError(u'Attempted to create a LocationStackEntry namedtuple with an invalid ' u'value for "num_traverses" {}. This is not allowed.' .format(num_traverses)) if not isinstance(location, Location): raise AssertionError(u'Attempted to create a LocationStackEntry namedtuple with an invalid ' u'value for "location" {}. This is not allowed.' .format(location)) return LocationStackEntry(location=location, num_traverses=num_traverses)
0.005063
def lrange(self, key, start, stop): """Emulate lrange.""" redis_list = self._get_list(key, 'LRANGE') start, stop = self._translate_range(len(redis_list), start, stop) return redis_list[start:stop + 1]
0.008621
def supports_py3(project_name): """Check with PyPI if a project supports Python 3.""" log = logging.getLogger("ciu") log.info("Checking {} ...".format(project_name)) request = requests.get("https://pypi.org/pypi/{}/json".format(project_name)) if request.status_code >= 400: log = logging.getLogger("ciu") log.warning("problem fetching {}, assuming ported ({})".format( project_name, request.status_code)) return True response = request.json() return any(c.startswith("Programming Language :: Python :: 3") for c in response["info"]["classifiers"])
0.003135
def isosurface_from_data(data, isolevel, origin, spacing): """Small wrapper to get directly vertices and faces to feed into programs """ spacing = np.array(extent/resolution) if isolevel >= 0: triangles = marching_cubes(data, isolevel) else: # Wrong traingle unwinding roder -- god only knows why triangles = marching_cubes(-data, -isolevel) faces = [] verts = [] for i, t in enumerate(triangles): faces.append([i * 3, i * 3 +1, i * 3 + 2]) verts.extend(t) faces = np.array(faces) verts = origin + spacing/2 + np.array(verts)*spacing return verts, faces
0.009449
def aggregate(self): """ Aggregate all reports of the same type into a master report """ for report in self.reportset: printtime('Processing {}'.format(report.split('.')[0]), self.start) # Initialise the header for each report - MLST is different, as the header is different for each # MLST scheme. This provides a generic header instead header = '' if report != 'mlst.csv' else 'Strain,Genus,SequenceType,Matches,1,2,3,4,5,6,7\n' # Initialise a string to hold the data for each report data = '' # Open the aggregated report with open(os.path.join(self.reportpath, report), 'w') as aggregate: for sample in self.runmetadata.samples: # Try to open the report for this run try: # with open(os.path.join(sample.general.reportpath, report), 'r') as runreport: # Only get the header from the first file if not header: header = runreport.readline() else: for row in runreport: # The final entry in a report does not have a newline character. Add \n as required if not row.endswith('\n'): row += '\n' # For certain reports, the header row is printed above each strain - ignore multiple # instances of the header if row.split(',')[0] != header.split(',')[0]: # Add the row to the string of data data += row except IOError: pass # Write the strings to the aggregate report file aggregate.write(header) aggregate.write(data)
0.003835
def parse(self, fo): """ Convert MEME output to motifs Parameters ---------- fo : file-like File object containing MEME output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} p = re.compile('MOTIF.+MEME-(\d+)\s*width\s*=\s*(\d+)\s+sites\s*=\s*(\d+)') pa = re.compile('\)\s+([A-Z]+)') line = fo.readline() while line: m = p.search(line) align = [] pfm = None if m: #print(m.group(0)) id = "%s_%s_w%s" % (self.name, m.group(1), m.group(2)) while not line.startswith("//"): ma = pa.search(line) if ma: #print(ma.group(0)) l = ma.group(1) align.append(l) if not pfm: pfm = [[0 for x in range(4)] for x in range(len(l))] for pos in range(len(l)): if l[pos] in nucs: pfm[pos][nucs[l[pos]]] += 1 else: for i in range(4): pfm[pos][i] += 0.25 line = fo.readline() motifs.append(Motif(pfm[:])) motifs[-1].id = id motifs[-1].align = align[:] line = fo.readline() return motifs
0.017618
def snapshot(self) -> Tuple[Hash32, UUID]: """ Perform a full snapshot of the current state. Snapshots are a combination of the :attr:`~state_root` at the time of the snapshot and the id of the changeset from the journaled DB. """ return self.state_root, self._account_db.record()
0.009119
def _add_admin(self, app, **kwargs): """Add a Flask Admin interface to an application. :param flask.Flask app: A Flask application :param kwargs: Keyword arguments are passed through to :class:`flask_admin.Admin` :rtype: flask_admin.Admin """ from flask_admin import Admin from flask_admin.contrib.sqla import ModelView admin = Admin(app, **kwargs) for flask_admin_model in self.flask_admin_models: if isinstance(flask_admin_model, tuple): # assume its a 2 tuple if len(flask_admin_model) != 2: raise TypeError model, view = flask_admin_model admin.add_view(view(model, self.session)) else: admin.add_view(ModelView(flask_admin_model, self.session)) return admin
0.003501
def seek(self, offset, whence=os.SEEK_SET): """Seek to position in stream, see file.seek""" pos = None if whence == os.SEEK_SET: pos = self.offset + offset elif whence == os.SEEK_CUR: pos = self.tell() + offset elif whence == os.SEEK_END: pos = self.offset + self.len + offset else: raise ValueError("invalid whence {}".format(whence)) if pos > self.offset + self.len or pos < self.offset: raise ValueError("seek position beyond chunk area") self.parent_fd.seek(pos, os.SEEK_SET)
0.0033
def poly(self, return_coeffs=False): """returns the quadratic as a Polynomial object.""" p = self.bpoints() coeffs = (p[0] - 2*p[1] + p[2], 2*(p[1] - p[0]), p[0]) if return_coeffs: return coeffs else: return np.poly1d(coeffs)
0.00692
def _element_charfix(self, element, charcount): """Updates the start and end attributes by charcount for the element.""" element.start += charcount element.docstart += charcount element.end += charcount element.docend += charcount
0.011111
def channeldir_node_to_row(self, path_tuple): """ Return a dict with keys corresponding to Content.csv columns. """ row = dict() for key in CONTENT_INFO_HEADER: row[key] = None row[CONTENT_PATH_KEY] = "/".join(path_tuple) # use / in .csv on Windows and UNIX title = path_tuple[-1].replace('_', ' ') for ext in content_kinds.MAPPING.keys(): if title.endswith(ext): title = title.replace('.'+ext, '') row[CONTENT_TITLE_KEY] = title row[CONTENT_SOURCEID_KEY] = path_tuple[-1] return row
0.004902