text
stringlengths
78
104k
score
float64
0
0.18
def new_module(self): ''' Make a fresh module to run in. Returns: Module ''' self.reset_run_errors() if self._code is None: return None module_name = 'bk_script_' + make_id().replace('-', '') module = ModuleType(str(module_name)) # str needed for py2.7 module.__dict__['__file__'] = os.path.abspath(self._path) return module
0.007059
def cf_dictionary_to_dict(dictionary): """ Converts a CFDictionary object into a python dictionary :param dictionary: The CFDictionary to convert :return: A python dict """ dict_length = CoreFoundation.CFDictionaryGetCount(dictionary) keys = (CFTypeRef * dict_length)() values = (CFTypeRef * dict_length)() CoreFoundation.CFDictionaryGetKeysAndValues( dictionary, _cast_pointer_p(keys), _cast_pointer_p(values) ) output = {} for index in range(0, dict_length): output[CFHelpers.native(keys[index])] = CFHelpers.native(values[index]) return output
0.004121
def get_tokens(max_value): """Defines tokens. Args: max_value: the maximum numeric range for the token. Returns: list of string tokens in vocabulary. """ vocab = [str(i) for i in range(max_value)] vocab = set(vocab) vocab.update(CodeOp.LITERALS) vocab.update(CodeOp.KEYWORDS) vocab |= set("".join(vocab)) return sorted(vocab)
0.022535
def get_valid_kwargs(func, potential_kwargs): """ Return valid kwargs to function func """ kwargs = {} for name in get_kwarg_names(func): with suppress(KeyError): kwargs[name] = potential_kwargs[name] return kwargs
0.003876
def Reload(self): """Call `Reload` on every `EventAccumulator`.""" logger.info('Beginning EventMultiplexer.Reload()') self._reload_called = True # Build a list so we're safe even if the list of accumulators is modified # even while we're reloading. with self._accumulators_mutex: items = list(self._accumulators.items()) names_to_delete = set() for name, accumulator in items: try: accumulator.Reload() except (OSError, IOError) as e: logger.error("Unable to reload accumulator '%s': %s", name, e) except directory_watcher.DirectoryDeletedError: names_to_delete.add(name) with self._accumulators_mutex: for name in names_to_delete: logger.warn("Deleting accumulator '%s'", name) del self._accumulators[name] logger.info('Finished with EventMultiplexer.Reload()') return self
0.006742
def extractFieldsFromResult(data): ''' Method that parses Infobel textual information to return a series of attributes. :return: a list of i3visio-like objects. ''' entities = [] # Defining the objects to extract fieldsRegExp = {} fieldsRegExp["i3visio.fullname"] = "<span class=\"fn\">([^<]*)</span>" fieldsRegExp["i3visio.name"] = " por <strong>[^ ]* ([^<]*)</strong>" fieldsRegExp["i3visio.surname"] = " por <strong>([^ ]*) " fieldsRegExp["i3visio.location.address"] = "itemprop=\"streetAddress\">([^<]*)</span>" fieldsRegExp["i3visio.location.city"] = "addressLocality\">([^<]*)</span>" fieldsRegExp["i3visio.location.postalcode"] = "postalCode\">([^<]*)</span>" fieldsRegExp["i3visio.phone"] = "document.write\('([0-9]+)'" for field in fieldsRegExp.keys(): listRecovered = re.findall(fieldsRegExp[field], data) if len(listRecovered) >0: aux = {} aux["type"]= field aux["value"] = listRecovered[0].replace('\xa0', ' ') aux["attributes"] = [] entities.append(aux) return entities
0.012521
def getColorHSV(name): """Retrieve the hue, saturation, value triple of a color name. Returns: a triple (degree, percent, percent). If not found (-1, -1, -1) is returned. """ try: x = getColorInfoList()[getColorList().index(name.upper())] except: return (-1, -1, -1) r = x[1] / 255. g = x[2] / 255. b = x[3] / 255. cmax = max(r, g, b) V = round(cmax * 100, 1) cmin = min(r, g, b) delta = cmax - cmin if delta == 0: hue = 0 elif cmax == r: hue = 60. * (((g - b)/delta) % 6) elif cmax == g: hue = 60. * (((b - r)/delta) + 2) else: hue = 60. * (((r - g)/delta) + 4) H = int(round(hue)) if cmax == 0: sat = 0 else: sat = delta / cmax S = int(round(sat * 100)) return (H, S, V)
0.008235
def longitude(self, longitude): """Setter for longitude.""" if not (-180 <= longitude <= 180): raise ValueError('longitude was {}, but has to be in [-180, 180]' .format(longitude)) self._longitude = longitude
0.007326
def get_relationship_form_for_update(self, relationship_id=None): """Gets the relationship form for updating an existing relationship. A new relationship form should be requested for each update transaction. arg: relationship_id (osid.id.Id): the ``Id`` of the ``Relationship`` return: (osid.relationship.RelationshipForm) - the relationship form raise: NotFound - ``relationship_id`` is not found raise: NullArgument - ``relationship_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ if relationship_id is None: raise NullArgument() try: url_path = ('/handcar/services/relationship/families/' + self._catalog_idstr + '/relationships/' + str(relationship_id)) relationship = objects.Relationship(self._get_request(url_path)) except Exception: raise relationship_form = objects.RelationshipForm(relationship._my_map) self._forms[relationship_form.get_id().get_identifier()] = not UPDATED return relationship_form
0.002322
def check(codeString, filename, reporter=None): """ Check the Python source given by C{codeString} for flakes. @param codeString: The Python source to check. @type codeString: C{str} @param filename: The name of the file the source came from, used to report errors. @type filename: C{str} @param reporter: A L{Reporter} instance, where errors and warnings will be reported. @return: The number of warnings emitted. @rtype: C{int} """ if reporter is None: reporter = modReporter._makeDefaultReporter() # First, compile into an AST and handle syntax errors. try: tree = ast.parse(codeString, filename=filename) except SyntaxError: value = sys.exc_info()[1] msg = value.args[0] (lineno, offset, text) = value.lineno, value.offset, value.text if checker.PYPY: if text is None: lines = codeString.splitlines() if len(lines) >= lineno: text = lines[lineno - 1] if sys.version_info >= (3, ) and isinstance(text, bytes): try: text = text.decode('ascii') except UnicodeDecodeError: text = None offset -= 1 # If there's an encoding problem with the file, the text is None. if text is None: # Avoid using msg, since for the only known case, it contains a # bogus message that claims the encoding the file declared was # unknown. reporter.unexpectedError(filename, 'problem decoding source') else: reporter.syntaxError(filename, msg, lineno, offset, text) return 1 except Exception: reporter.unexpectedError(filename, 'problem decoding source') return 1 # Okay, it's syntactically valid. Now check it. file_tokens = checker.make_tokens(codeString) w = checker.Checker(tree, file_tokens=file_tokens, filename=filename) w.messages.sort(key=lambda m: m.lineno) for warning in w.messages: reporter.flake(warning) return len(w.messages)
0.000456
def search_terms(q): '''Takes a search string and parses it into a list of keywords and phrases.''' tokens = parse_search_terms(q) # iterate through all the tokens and make a list of token values # (which are the actual words and phrases) values = [] for t in tokens: # word/phrase if t[0] is None: values.append(t[1]) # incomplete field elif t[1] is None: values.append('%s:' % t[0]) # anything else must be a field, value pair # - if value includes whitespace, wrap in quotes elif re.search('\s', t[1]): values.append('%s:"%s"' % t) # otherwise, leave unquoted else: values.append('%s:%s' % t) return values
0.002628
def get_condition(self, service_id, version_number, name): """Gets a specified condition.""" content = self._fetch("/service/%s/version/%d/condition/%s" % (service_id, version_number, name)) return FastlyCondition(self, content)
0.025641
def t_escaped_LINE_FEED_CHAR(self, t): r'\x6E' # 'n' t.lexer.pop_state() t.value = unichr(0x000a) return t
0.014388
def board_fen(self, *, promoted: Optional[bool] = False) -> str: """ Gets the board FEN. """ builder = [] empty = 0 for square in SQUARES_180: piece = self.piece_at(square) if not piece: empty += 1 else: if empty: builder.append(str(empty)) empty = 0 builder.append(piece.symbol()) if promoted and BB_SQUARES[square] & self.promoted: builder.append("~") if BB_SQUARES[square] & BB_FILE_H: if empty: builder.append(str(empty)) empty = 0 if square != H1: builder.append("/") return "".join(builder)
0.002436
def unparse_qs(qs, sort=False, reverse=False): """Reverse conversion for parse_qs""" result = [] items = qs.items() if sort: items = sorted(items, key=lambda x: x[0], reverse=reverse) for keys, values in items: query_name = quote(keys) for value in values: result.append(query_name + "=" + quote(value)) return "&".join(result)
0.002584
def refresh_plugin(self): """Refresh tabwidget.""" nb = None if self.tabwidget.count(): client = self.tabwidget.currentWidget() nb = client.notebookwidget nb.setFocus() else: nb = None self.update_notebook_actions()
0.00641
def get_aggregate_check(self, check, age=None): """ Returns the list of aggregates for a given check """ data = {} if age: data['max_age'] = age result = self._request('GET', '/aggregates/{}'.format(check), data=json.dumps(data)) return result.json()
0.005714
def setNreps(self, nreps): """Sets the number of reps before the raster plot resets""" for plot in self.responsePlots.values(): plot.setNreps(nreps)
0.011364
def mean_squared_logarithmic_error(pred:Tensor, targ:Tensor)->Rank0Tensor: "Mean squared logarithmic error between `pred` and `targ`." pred,targ = flatten_check(pred,targ) return F.mse_loss(torch.log(1 + pred), torch.log(1 + targ))
0.024691
def plot_diode_fold(dio_cross,bothfeeds=True,feedtype='l',min_samp=-500,max_samp=7000,legend=True,**kwargs): ''' Plots the calculated average power and time sampling of ON (red) and OFF (blue) for a noise diode measurement over the observation time series ''' #Get full stokes data of ND measurement obs = Waterfall(dio_cross,max_load=150) tsamp = obs.header['tsamp'] data = obs.data obs = None I,Q,U,V = get_stokes(data,feedtype) #Calculate time series, OFF and ON averages, and time samples for each tseriesI = np.squeeze(np.mean(I,axis=2)) I_OFF,I_ON,OFFints,ONints = foldcal(I,tsamp,inds=True,**kwargs) if bothfeeds==True: if feedtype=='l': tseriesQ = np.squeeze(np.mean(Q,axis=2)) tseriesX = (tseriesI+tseriesQ)/2 tseriesY = (tseriesI-tseriesQ)/2 if feedtype=='c': tseriesV = np.squeeze(np.mean(V,axis=2)) tseriesR = (tseriesI+tseriesV)/2 tseriesL = (tseriesI-tseriesV)/2 stop = ONints[-1,1] #Plot time series and calculated average for ON and OFF if bothfeeds==False: plt.plot(tseriesI[0:stop],'k-',label='Total Power') for i in ONints: plt.plot(np.arange(i[0],i[1]),np.full((i[1]-i[0]),np.mean(I_ON)),'r-') for i in OFFints: plt.plot(np.arange(i[0],i[1]),np.full((i[1]-i[0]),np.mean(I_OFF)),'b-') else: if feedtype=='l': diff = np.mean(tseriesX)-np.mean(tseriesY) plt.plot(tseriesX[0:stop],'b-',label='XX') plt.plot(tseriesY[0:stop]+diff,'r-',label='YY (shifted)') if feedtype=='c': diff = np.mean(tseriesL)-np.mean(tseriesR) plt.plot(tseriesL[0:stop],'b-',label='LL') plt.plot(tseriesR[0:stop]+diff,'r-',label='RR (shifted)') #Calculate plotting limits if bothfeeds==False: lowlim = np.mean(I_OFF)-(np.mean(I_ON)-np.mean(I_OFF))/2 hilim = np.mean(I_ON)+(np.mean(I_ON)-np.mean(I_OFF))/2 plt.ylim((lowlim,hilim)) plt.xlim((min_samp,max_samp)) plt.xlabel('Time Sample Number') plt.ylabel('Power (Counts)') plt.title('Noise Diode Fold') if legend==True: plt.legend()
0.027342
def resolve(self, key): """ Resolves the requested key to an object instance, raising a KeyError if the key is missing """ registration = self._registrations.get(key) if registration is None: raise KeyError("Unknown key: '{0}'".format(key)) return registration.resolve(self, key)
0.008798
def get_args(): """ Get and parse arguments. """ import argparse parser = argparse.ArgumentParser( description="Swift Navigation SBP Example.") parser.add_argument( "-s", "--serial-port", default=[DEFAULT_SERIAL_PORT], nargs=1, help="specify the serial port to use.") parser.add_argument( "-b", "--baud", default=[DEFAULT_SERIAL_BAUD], nargs=1, help="specify the baud rate to use.") parser.add_argument( "-a", "--address", default=[DEFAULT_UDP_ADDRESS], nargs=1, help="specify the serial port to use.") parser.add_argument( "-p", "--udp-port", default=[DEFAULT_UDP_PORT], nargs=1, help="specify the baud rate to use.") return parser.parse_args()
0.001172
def ignored_corner( intersection, tangent_s, tangent_t, edge_nodes1, edge_nodes2 ): """Check if an intersection is an "ignored" corner. .. note:: This is a helper used only by :func:`classify_intersection`. An "ignored" corner is one where the surfaces just "kiss" at the point of intersection but their interiors do not meet. We can determine this by comparing the tangent lines from the point of intersection. .. note:: This assumes the ``intersection`` has been shifted to the beginning of a curve so only checks if ``s == 0.0`` or ``t == 0.0`` (rather than also checking for ``1.0``). Args: intersection (.Intersection): An intersection to "diagnose". tangent_s (numpy.ndarray): The tangent vector (``2 x 1`` array) to the first curve at the intersection. tangent_t (numpy.ndarray): The tangent vector (``2 x 1`` array) to the second curve at the intersection. edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the first surface being intersected. edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the second surface being intersected. Returns: bool: Indicates if the corner is to be ignored. """ if intersection.s == 0.0: if intersection.t == 0.0: # Double corner. return ignored_double_corner( intersection, tangent_s, tangent_t, edge_nodes1, edge_nodes2 ) else: # s-only corner. prev_index = (intersection.index_first - 1) % 3 prev_edge = edge_nodes1[prev_index] return ignored_edge_corner(tangent_t, tangent_s, prev_edge) elif intersection.t == 0.0: # t-only corner. prev_index = (intersection.index_second - 1) % 3 prev_edge = edge_nodes2[prev_index] return ignored_edge_corner(tangent_s, tangent_t, prev_edge) else: # Not a corner. return False
0.000475
def kill(self, signal=None): """ Kill or send a signal to the container. Args: signal (str or int): The signal to send. Defaults to ``SIGKILL`` Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ return self.client.api.kill(self.id, signal=signal)
0.005479
def admin_create_user(self, username, temporary_password='', attr_map=None, **kwargs): """ Create a user using admin super privileges. :param username: User Pool username :param temporary_password: The temporary password to give the user. Leave blank to make Cognito generate a temporary password for the user. :param attr_map: Attribute map to Cognito's attributes :param kwargs: Additional User Pool attributes :return response: Response from Cognito """ response = self.client.admin_create_user( UserPoolId=self.user_pool_id, Username=username, UserAttributes=dict_to_cognito(kwargs, attr_map), TemporaryPassword=temporary_password, ) kwargs.update(username=username) self._set_attributes(response, kwargs) response.pop('ResponseMetadata') return response
0.003236
def plot_irm(fignum, B, M, title): """ function to plot IRM backfield curves Parameters _________ fignum : matplotlib figure number B : list or array of field values M : list or array of magnetizations title : string title for plot """ rpars = {} Mnorm = [] backfield = 0 X, Y = [], [] for k in range(len(B)): if M[k] < 0: break if k <= 5: kmin = 0 else: kmin = k - 5 for k in range(kmin, k + 1): X.append(B[k]) if B[k] < 0: backfield = 1 Y.append(M[k]) if backfield == 1: poly = np.polyfit(X, Y, 1) if poly[0] != 0: bcr = (old_div(-poly[1], poly[0])) else: bcr = 0 rpars['remanence_mr_moment'] = '%8.3e' % (M[0]) rpars['remanence_bcr'] = '%8.3e' % (-bcr) rpars['magic_method_codes'] = 'LP-BCR-BF' if M[0] != 0: for m in M: Mnorm.append(old_div(m, M[0])) # normalize to unity Msat title = title + ':' + '%8.3e' % (M[0]) else: if M[-1] != 0: for m in M: Mnorm.append(old_div(m, M[-1])) # normalize to unity Msat title = title + ':' + '%8.3e' % (M[-1]) # do plots if desired if fignum != 0 and M[0] != 0: # skip plot for fignum = 0 plt.figure(num=fignum) plt.clf() if not isServer: plt.figtext(.02, .01, version_num) plt.plot(B, Mnorm) plt.axhline(0, color='k') plt.axvline(0, color='k') plt.xlabel('B (T)') plt.ylabel('M/Mr') plt.title(title) if backfield == 1: plt.scatter([bcr], [0], marker='s', c='b') bounds = plt.axis() n1 = 'Bcr: ' + '%8.2e' % (-bcr) + ' T' plt.figtext(.2, .5, n1) n2 = 'Mr: ' + '%8.2e' % (M[0]) + ' Am^2' plt.figtext(.2, .45, n2) elif fignum != 0: plt.figure(num=fignum) # plt.clf() if not isServer: plt.figtext(.02, .01, version_num) print('M[0]=0, skipping specimen') return rpars
0.000466
def compose(self, *args): """ Returns a function that is the composition of a list of functions, each consuming the return value of the function that follows. """ args = list(args) def composed(*ar, **kwargs): lastRet = self.obj(*ar, **kwargs) for i in args: lastRet = i(lastRet) return lastRet return self._wrap(composed)
0.004619
def classify_harmonic(self, partial_labels, use_CMN=True): '''Harmonic function method for semi-supervised classification, also known as the Gaussian Mean Fields algorithm. partial_labels: (n,) array of integer labels, -1 for unlabeled. use_CMN : when True, apply Class Mass Normalization From "Semi-Supervised Learning Using Gaussian Fields and Harmonic Functions" by Zhu, Ghahramani, and Lafferty in 2003. Based on the matlab code at: http://pages.cs.wisc.edu/~jerryzhu/pub/harmonic_function.m ''' # prepare labels labels = np.array(partial_labels, copy=True) unlabeled = labels == -1 # convert known labels to one-hot encoding fl, classes = _onehot(labels[~unlabeled]) L = self.laplacian(normed=False) if ss.issparse(L): L = L.tocsr()[unlabeled].toarray() else: L = L[unlabeled] Lul = L[:,~unlabeled] Luu = L[:,unlabeled] fu = -np.linalg.solve(Luu, Lul.dot(fl)) if use_CMN: scale = (1 + fl.sum(axis=0)) / fu.sum(axis=0) fu *= scale # assign new labels labels[unlabeled] = classes[fu.argmax(axis=1)] return labels
0.006975
def check_child_friendly(self, name): """ Check if a module is a container and so can have children """ name = name.split()[0] if name in self.container_modules: return root = os.path.dirname(os.path.realpath(__file__)) module_path = os.path.join(root, "modules") try: info = imp.find_module(name, [module_path]) except ImportError: return if not info: return (file, pathname, description) = info try: py_mod = imp.load_module(name, file, pathname, description) except Exception: # We cannot load the module! We could error out here but then the # user gets informed that the problem is with their config. This # is not correct. Better to say that all is well and then the # config can get parsed and py3status loads. The error about the # failing module load is better handled at that point, and will be. return try: container = py_mod.Py3status.Meta.container except AttributeError: container = False # delete the module del py_mod if container: self.container_modules.append(name) else: self.error("Module `{}` cannot contain others".format(name))
0.001443
def unassign_authorization_from_vault(self, authorization_id, vault_id): """Removes an ``Authorization`` from a ``Vault``. arg: authorization_id (osid.id.Id): the ``Id`` of the ``Authorization`` arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` raise: NotFound - ``authorization_id`` or ``vault_id`` not found or ``authorization_id`` not assigned to ``vault_id`` raise: NullArgument - ``authorization_id`` or ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin mgr = self._get_provider_manager('AUTHORIZATION', local=True) lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy) lookup_session.get_vault(vault_id) # to raise NotFound self._unassign_object_from_catalog(authorization_id, vault_id)
0.001741
def setup(self, phase=None, quantity='', diffusive_conductance='', hydraulic_conductance='', pressure='', s_scheme='', **kwargs): r""" """ if phase: self.settings['phase'] = phase.name if quantity: self.settings['quantity'] = quantity if diffusive_conductance: self.settings['diffusive_conductance'] = diffusive_conductance if hydraulic_conductance: self.settings['hydraulic_conductance'] = hydraulic_conductance if pressure: self.settings['pressure'] = pressure if s_scheme: self.settings['s_scheme'] = s_scheme super().setup(**kwargs)
0.004317
def get_schema(frame, name, keys=None, con=None, dtype=None): """ Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : string name of SQL table keys : string or sequence, default: None columns to use a primary key con: an open SQL database connection object or a SQLAlchemy connectable Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. """ pandas_sql = pandasSQL_builder(con=con) return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
0.00114
def getOption(self, name): """ Get the current value of the specified option. If the option does not exist, returns None. Args: name: Option name. Returns: Value of the option. Raises: InvalidArgumet: if the option name is not valid. """ try: value = lock_and_call( lambda: self._impl.getOption(name).value(), self._lock ) except RuntimeError: return None else: try: return int(value) except ValueError: try: return float(value) except ValueError: return value
0.002625
def loss(self, xs, ys): """Computes the loss of the network.""" return float( self.sess.run( self.cross_entropy, feed_dict={ self.x: xs, self.y_: ys }))
0.007937
def get(obj): """ Determines file format and picks suitable file types, extensions and MIME types Takes: obj (bytes) -> byte sequence (128 bytes are enough) Returns: (<class 'fleep.Info'>) -> Class instance """ if not isinstance(obj, bytes): raise TypeError("object type must be bytes") info = { "type": dict(), "extension": dict(), "mime": dict() } stream = " ".join(['{:02X}'.format(byte) for byte in obj]) for element in data: for signature in element["signature"]: offset = element["offset"] * 2 + element["offset"] if signature == stream[offset:len(signature) + offset]: for key in ["type", "extension", "mime"]: info[key][element[key]] = len(signature) for key in ["type", "extension", "mime"]: info[key] = [element for element in sorted(info[key], key=info[key].get, reverse=True)] return Info(info["type"], info["extension"], info["mime"])
0.002921
def drawBackground(self, painter, rect): """When an area of the window is exposed, we just copy out of the server-side, off-screen pixmap to that area. """ if not self.pixmap: return x1, y1, x2, y2 = rect.getCoords() width = x2 - x1 + 1 height = y2 - y1 + 1 # redraw the screen from backing pixmap rect = QtCore.QRect(x1, y1, width, height) painter.drawPixmap(rect, self.pixmap, rect)
0.004193
def _ppf(self, q, dist, cache): """Point percentile function.""" return -evaluation.evaluate_inverse(dist, 1-q, cache=cache)
0.014286
def HumanReadableStartType(self): """Return a human readable string describing the start type value. Returns: str: human readable description of the start type value. """ if isinstance(self.start_type, py2to3.STRING_TYPES): return self.start_type return human_readable_service_enums.SERVICE_ENUMS['Start'].get( self.start_type, '{0:d}'.format(self.start_type))
0.004988
def subdict_match(data, expr, delimiter=DEFAULT_TARGET_DELIM, regex_match=False, exact_match=False): ''' Check for a match in a dictionary using a delimiter character to denote levels of subdicts, and also allowing the delimiter character to be matched. Thus, 'foo:bar:baz' will match data['foo'] == 'bar:baz' and data['foo']['bar'] == 'baz'. The latter would take priority over the former, as more deeply-nested matches are tried first. ''' def _match(target, pattern, regex_match=False, exact_match=False): # The reason for using six.text_type first and _then_ using # to_unicode as a fallback is because we want to eventually have # unicode types for comparison below. If either value is numeric then # six.text_type will turn it into a unicode string. However, if the # value is a PY2 str type with non-ascii chars, then the result will be # a UnicodeDecodeError. In those cases, we simply use to_unicode to # decode it to unicode. The reason we can't simply use to_unicode to # begin with is that (by design) to_unicode will raise a TypeError if a # non-string/bytestring/bytearray value is passed. try: target = six.text_type(target).lower() except UnicodeDecodeError: target = salt.utils.stringutils.to_unicode(target).lower() try: pattern = six.text_type(pattern).lower() except UnicodeDecodeError: pattern = salt.utils.stringutils.to_unicode(pattern).lower() if regex_match: try: return re.match(pattern, target) except Exception: log.error('Invalid regex \'%s\' in match', pattern) return False else: return target == pattern if exact_match \ else fnmatch.fnmatch(target, pattern) def _dict_match(target, pattern, regex_match=False, exact_match=False): wildcard = pattern.startswith('*:') if wildcard: pattern = pattern[2:] if pattern == '*': # We are just checking that the key exists return True elif pattern in target: # We might want to search for a key return True elif subdict_match(target, pattern, regex_match=regex_match, exact_match=exact_match): return True if wildcard: for key in target: if isinstance(target[key], dict): if _dict_match(target[key], pattern, regex_match=regex_match, exact_match=exact_match): return True elif isinstance(target[key], list): for item in target[key]: if _match(item, pattern, regex_match=regex_match, exact_match=exact_match): return True elif _match(target[key], pattern, regex_match=regex_match, exact_match=exact_match): return True return False splits = expr.split(delimiter) num_splits = len(splits) if num_splits == 1: # Delimiter not present, this can't possibly be a match return False splits = expr.split(delimiter) num_splits = len(splits) if num_splits == 1: # Delimiter not present, this can't possibly be a match return False # If we have 4 splits, then we have three delimiters. Thus, the indexes we # want to use are 3, 2, and 1, in that order. for idx in range(num_splits - 1, 0, -1): key = delimiter.join(splits[:idx]) if key == '*': # We are matching on everything under the top level, so we need to # treat the match as the entire data being passed in matchstr = expr match = data else: matchstr = delimiter.join(splits[idx:]) match = traverse_dict_and_list(data, key, {}, delimiter=delimiter) log.debug("Attempting to match '%s' in '%s' using delimiter '%s'", matchstr, key, delimiter) if match == {}: continue if isinstance(match, dict): if _dict_match(match, matchstr, regex_match=regex_match, exact_match=exact_match): return True continue if isinstance(match, (list, tuple)): # We are matching a single component to a single list member for member in match: if isinstance(member, dict): if _dict_match(member, matchstr, regex_match=regex_match, exact_match=exact_match): return True if _match(member, matchstr, regex_match=regex_match, exact_match=exact_match): return True continue if _match(match, matchstr, regex_match=regex_match, exact_match=exact_match): return True return False
0.000176
def pykeyword(operation='list', keywordtotest=None): """ Check if a keyword exists in the Python keyword dictionary. :type operation: string :param operation: Whether to list or check the keywords. Possible options are 'list' and 'in'. :type keywordtotest: string :param keywordtotest: The keyword to check. :return: The list of keywords or if a keyword exists. :rtype: list or boolean >>> "True" in pykeyword("list") True >>> pykeyword("in", "True") True >>> pykeyword("in", "foo") False >>> pykeyword("foo", "foo") Traceback (most recent call last): ... ValueError: Invalid operation specified. """ # If the operation was 'list' if operation == 'list': # Return an array of keywords return str(keyword.kwlist) # If the operation was 'in' elif operation == 'in': # Return a boolean for if the string was a keyword return keyword.iskeyword(str(keywordtotest)) # Raise a warning raise ValueError("Invalid operation specified.")
0.001869
def append(self, png, **options): """Append one frame. :arg PNG png: Append a :class:`PNG` as a frame. :arg dict options: The options for :class:`FrameControl`. """ if not isinstance(png, PNG): raise TypeError("Expect an instance of `PNG` but got `{}`".format(png)) control = FrameControl(**options) if control.width is None: control.width = png.width if control.height is None: control.height = png.height self.frames.append((png, control))
0.03397
def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None): """ Return a new DStream in which each RDD contains the count of distinct elements in RDDs in a sliding window over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream. """ keyed = self.map(lambda x: (x, 1)) counted = keyed.reduceByKeyAndWindow(operator.add, operator.sub, windowDuration, slideDuration, numPartitions) return counted.filter(lambda kv: kv[1] > 0)
0.009221
def convert_list_elements(self): """ A sequence of two or more items, which may or may not be ordered. The <list> element has an optional <label> element and optional <title> element, followed by one or more <list-item> elements. This is element is recursive as the <list-item> elements may contain further <list> or <def-list> elements. Much of the potential complexity in dealing with lists comes from this recursion. """ #I have yet to gather many examples of this element, and may have to #write a recursive method for the processing of lists depending on how #PLoS produces their XML, for now this method is ignorant of nesting #TODO: prefix-words, one possible solution would be to have this method #edit the CSS to provide formatting support for arbitrary prefixes... #This is a block level element, so elevate it if found in p for list_el in self.main.getroot().findall('.//list'): if list_el.getparent().tag == 'p': elevate_element(list_el) #list_el is used instead of list (list is reserved) for list_el in self.main.getroot().findall('.//list'): if 'list-type' not in list_el.attrib: list_el_type = 'order' else: list_el_type = list_el.attrib['list-type'] #Unordered lists if list_el_type in ['', 'bullet', 'simple']: list_el.tag = 'ul' #CSS must be used to recognize the class and suppress bullets if list_el_type == 'simple': list_el.attrib['class'] = 'simple' #Ordered lists else: list_el.tag = 'ol' list_el.attrib['class'] = list_el_type #Convert the list-item element tags to 'li' for list_item in list_el.findall('list-item'): list_item.tag = 'li' remove_all_attributes(list_el, exclude=['id', 'class'])
0.00636
def _startDPDrag(self): """Callback for item menu.""" dp = self._dp_menu_on if dp and dp.archived: drag = QDrag(self) mimedata = QMimeData() mimedata.setUrls([QUrl.fromLocalFile(dp.fullpath)]) drag.setMimeData(mimedata) drag.exec_(Qt.CopyAction | Qt.LinkAction)
0.005797
def cee_map_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map") name = ET.SubElement(cee_map, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
0.007752
def propose_live(self): """Return a live point/axes to be used by other sampling methods.""" # Copy a random live point. i = self.rstate.randint(self.nlive) u = self.live_u[i, :] # Check for ellipsoid overlap. ell_idxs = self.mell.within(u) nidx = len(ell_idxs) # Automatically trigger an update if we're not in any ellipsoid. if nidx == 0: try: # Expected ln(prior volume) at a given iteration. expected_vol = math.exp(self.saved_logvol[-1] - self.dlv) except: # Expected ln(prior volume) at the first iteration. expected_vol = math.exp(-self.dlv) pointvol = expected_vol / self.nlive # minimum point volume # Update the bounding ellipsoids. bound = self.update(pointvol) if self.save_bounds: self.bound.append(bound) self.nbound += 1 self.since_update = 0 # Check for ellipsoid overlap (again). ell_idxs = self.mell.within(u) nidx = len(ell_idxs) # Pick a random ellipsoid that encompasses `u`. ell_idx = ell_idxs[self.rstate.randint(nidx)] # Choose axes. if self.sampling in ['rwalk', 'rstagger', 'rslice']: ax = self.mell.ells[ell_idx].axes elif self.sampling == 'slice': ax = self.mell.ells[ell_idx].paxes else: ax = np.identity(self.npdim) return u, ax
0.001944
def _ask_local_config(self): """ Ask some parameters about the local configuration """ options = {"backend": "local", "local-config": {}} # Concurrency while True: concurrency = self._ask_with_default( "Maximum concurrency (number of tasks running simultaneously). Leave it empty to use the number of " "CPU of your host.", "") if concurrency == "": break try: concurrency = int(concurrency) except: self._display_error("Invalid number") continue if concurrency <= 0: self._display_error("Invalid number") continue options["local-config"]["concurrency"] = concurrency break # Debug hostname hostname = self._ask_with_default( "What is the external hostname/address of your machine? You can leave this empty and let INGInious " "autodetect it.", "") if hostname != "": options["local-config"]["debug_host"] = hostname self._display_info( "You can now enter the port range for the remote debugging feature of INGInious. Please verify that these " "ports are open in your firewall. You can leave this parameters empty, the default is 64100-64200") # Debug port range port_range = None while True: start_port = self._ask_with_default("Beginning of the range", "") if start_port != "": try: start_port = int(start_port) except: self._display_error("Invalid number") continue end_port = self._ask_with_default("End of the range", str(start_port + 100)) try: end_port = int(end_port) except: self._display_error("Invalid number") continue if start_port > end_port: self._display_error("Invalid range") continue port_range = str(start_port) + "-" + str(end_port) else: break if port_range != None: options["local-config"]["debug_ports"] = port_range return options
0.004632
def ensure(): """ Makes sure the current working directory is a Git repository. """ LOGGER.debug('checking repository') if not os.path.exists('.git'): LOGGER.error('This command is meant to be ran in a Git repository.') sys.exit(-1) LOGGER.debug('repository OK')
0.008982
def _expected_reads(run_info_file): """Parse the number of expected reads from the RunInfo.xml file. """ reads = [] if os.path.exists(run_info_file): tree = ElementTree() tree.parse(run_info_file) read_elem = tree.find("Run/Reads") reads = read_elem.findall("Read") return len(reads)
0.002985
def is_alpha_number(number): """Checks if the number is a valid vanity (alpha) number such as 800 MICROSOFT. A valid vanity number will start with at least 3 digits and will have three or more alpha characters. This does not do region-specific checks - to work out if this number is actually valid for a region, it should be parsed and methods such as is_possible_number_with_reason() and is_valid_number() should be used. Arguments: number -- the number that needs to be checked Returns True if the number is a valid vanity number """ if not _is_viable_phone_number(number): # Number is too short, or doesn't match the basic phone number pattern. return False extension, stripped_number = _maybe_strip_extension(number) return bool(fullmatch(_VALID_ALPHA_PHONE_PATTERN, stripped_number))
0.001166
def haversine(point1, point2, unit='km'): """ Calculate the great-circle distance between two points on the Earth surface. :input: two 2-tuples, containing the latitude and longitude of each point in decimal degrees. Keyword arguments: unit -- a string containing the initials of a unit of measurement (i.e. miles = mi) default 'km' (kilometers). Example: haversine((45.7597, 4.8422), (48.8567, 2.3508)) :output: Returns the distance between the two points. The default returned unit is kilometers. The default unit can be changed by setting the unit parameter to a string containing the initials of the desired unit. Other available units are miles (mi), nautic miles (nmi), meters (m), feets (ft) and inches (in). """ # mean earth radius - https://en.wikipedia.org/wiki/Earth_radius#Mean_radius AVG_EARTH_RADIUS_KM = 6371.0088 # Units values taken from http://www.unitconversion.org/unit_converter/length.html conversions = {'km': 1, 'm': 1000, 'mi': 0.621371192, 'nmi': 0.539956803, 'ft': 3280.839895013, 'in': 39370.078740158} # get earth radius in required units avg_earth_radius = AVG_EARTH_RADIUS_KM * conversions[unit] # unpack latitude/longitude lat1, lng1 = point1 lat2, lng2 = point2 # convert all latitudes/longitudes from decimal degrees to radians lat1, lng1, lat2, lng2 = map(radians, (lat1, lng1, lat2, lng2)) # calculate haversine lat = lat2 - lat1 lng = lng2 - lng1 d = sin(lat * 0.5) ** 2 + cos(lat1) * cos(lat2) * sin(lng * 0.5) ** 2 return 2 * avg_earth_radius * asin(sqrt(d))
0.003472
def experiments_predictions_create(self, experiment_id, model_id, argument_defs, name, arguments=None, properties=None): """Create new model run for given experiment. Parameters ---------- experiment_id : string Unique experiment identifier model_id : string Unique identifier of model to run name : string User-provided name for the model run argument_defs : list(attribute.AttributeDefinition) Definition of valid arguments for the given model arguments : list(dict('name':...,'value:...')), optional List of attribute instances properties : Dictionary, optional Set of model run properties. Returns ------- ModelRunHandle Handle for created model run or None if experiment is unknown """ # Get experiment to ensure that it exists if self.experiments_get(experiment_id) is None: return None # Return created model run return self.predictions.create_object( name, experiment_id, model_id, argument_defs, arguments=arguments, properties=properties )
0.002375
def create(cls, session, record, imported=False, auto_reply=False): """Create a conversation. Please note that conversation cannot be created with more than 100 threads, if attempted the API will respond with HTTP 412. Args: session (requests.sessions.Session): Authenticated session. record (helpscout.models.Conversation): The conversation to be created. imported (bool, optional): The ``imported`` request parameter enables conversations to be created for historical purposes (i.e. if moving from a different platform, you can import your history). When ``imported`` is set to ``True``, no outgoing emails or notifications will be generated. auto_reply (bool): The ``auto_reply`` request parameter enables auto replies to be sent when a conversation is created via the API. When ``auto_reply`` is set to ``True``, an auto reply will be sent as long as there is at least one ``customer`` thread in the conversation. Returns: helpscout.models.Conversation: Newly created conversation. """ return super(Conversations, cls).create( session, record, imported=imported, auto_reply=auto_reply, )
0.001446
def p_route(self, p): """route : ROUTE route_name route_version route_io route_deprecation NL \ INDENT docsection attrssection DEDENT | ROUTE route_name route_version route_io route_deprecation NL""" p[0] = AstRouteDef(self.path, p.lineno(1), p.lexpos(1), p[2], p[3], p[5], *p[4]) if len(p) > 7: p[0].set_doc(p[8]) if p[9]: keys = set() for attr in p[9]: if attr.name in keys: msg = "Attribute '%s' defined more than once." % attr.name self.errors.append((msg, attr.lineno, attr.path)) keys.add(attr.name) p[0].set_attrs(p[9])
0.007989
def free(self): """Remove the lock on the connection if the connection is not active :raises: ConnectionBusyError """ LOGGER.debug('Connection %s freeing', self.id) if self.handle.isexecuting(): raise ConnectionBusyError(self) with self._lock: self.used_by = None LOGGER.debug('Connection %s freed', self.id)
0.005128
def saltpath(): ''' Return the path of the salt module ''' # Provides: # saltpath salt_path = os.path.abspath(os.path.join(__file__, os.path.pardir)) return {'saltpath': os.path.dirname(salt_path)}
0.004405
def can_download(self, dist, force=False): """ Can download if location/file does not exist or if force=True :param dist: :param force: :return: True/False """ return not os.path.exists(os.path.join(self.output, dist['basename'])) or force
0.010169
def trimRight(self, amount): """ Trim this fastqSequence in-place by removing <amount> nucleotides from the 3' end (right end). :param amount: the number of nucleotides to trim from the right-side of this sequence. """ if amount == 0: return self.sequenceData = self.sequenceData[:-amount] self.seq_qual = self.seq_qual[:-amount]
0.005063
def update_line(s, bold=False, underline=False, blinking=False, color=None, bgcolor=None): """ Overwrites the output of the current line and prints s on the same line without a new line. """ s = get_line(s, bold=bold, underline=underline, blinking=blinking, color=color, bgcolor=bgcolor, update_line=True) print(s, end='')
0.005495
def _http_error_handler(http_error): ''' Simple error handler for azure.''' message = str(http_error) if http_error.respbody is not None: message += '\n' + http_error.respbody.decode('utf-8-sig') raise AzureHttpError(message, http_error.status)
0.003731
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: ModelBuildContext for this ModelBuildInstance :rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildContext """ if self._context is None: self._context = ModelBuildContext( self._version, assistant_sid=self._solution['assistant_sid'], sid=self._solution['sid'], ) return self._context
0.008143
def readOne(stream, validate=False, transform=True, ignoreUnreadable=False, allowQP=False): """ Return the first component from stream. """ return next(readComponents(stream, validate, transform, ignoreUnreadable, allowQP))
0.003546
def dt2ts(dt, drop_micro=False): ''' convert datetime objects to timestamp seconds (float) ''' is_true(HAS_DATEUTIL, "`pip install python_dateutil` required") if is_empty(dt, except_=False): ts = None elif isinstance(dt, (int, long, float)): # its a ts already ts = float(dt) elif isinstance(dt, basestring): # convert to datetime first try: parsed_dt = float(dt) except (TypeError, ValueError): parsed_dt = dt_parse(dt) ts = dt2ts(parsed_dt) else: assert isinstance(dt, (datetime, date)) # keep micros; see: http://stackoverflow.com/questions/7031031 ts = (( timegm(dt.timetuple()) * 1000.0) + (dt.microsecond / 1000.0)) / 1000.0 if ts is None: pass elif drop_micro: ts = float(int(ts)) else: ts = float(ts) return ts
0.001116
def sort(records: Sequence[Record]) -> List[Record]: "Sort records into a canonical order, suitable for comparison." return sorted(records, key=_record_key)
0.006098
def get_dev_at_mountpoint(mntpoint): """ Retrieves the device mounted at mntpoint, or raises MountError if none. """ results = util.subp(['findmnt', '-o', 'SOURCE', mntpoint]) if results.return_code != 0: raise MountError('No device mounted at %s' % mntpoint) stdout = results.stdout.decode(sys.getdefaultencoding()) return stdout.replace('SOURCE\n', '').strip().split('\n')[-1]
0.004396
def true_num_genes(model, custom_spont_id=None): """Return the number of genes in a model ignoring spontaneously labeled genes. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of genes excluding spontaneous genes """ true_num = 0 for gene in model.genes: if not is_spontaneous(gene, custom_id=custom_spont_id): true_num += 1 return true_num
0.005725
def get_lat_long(self, callsign, timestamp=timestamp_now): """ Returns Latitude and Longitude for a callsign Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: dict: Containing Latitude and Longitude Raises: KeyError: No data found for callsign Example: The following code returns Latitude & Longitude for "DH1TW" >>> from pyhamtools import LookupLib, Callinfo >>> my_lookuplib = LookupLib(lookuptype="countryfile") >>> cic = Callinfo(my_lookuplib) >>> cic.get_lat_long("DH1TW") { 'latitude': 51.0, 'longitude': -10.0 } Note: Unfortunately, in most cases the returned Latitude and Longitude are not very precise. Clublog and Country-files.com use the country's capital coordinates in most cases, if no dedicated entry in the database exists. Best results will be retrieved with QRZ.com Lookup. """ callsign_data = self.get_all(callsign, timestamp=timestamp) return { const.LATITUDE: callsign_data[const.LATITUDE], const.LONGITUDE: callsign_data[const.LONGITUDE] }
0.003723
def finalize(self): """ Finalize the visualization to create an "origin grid" feel instead of the default matplotlib feel. Set the title, remove spines, and label the grid with components. This function also adds a legend from the sizes if required. """ # Set the default title if a user hasn't supplied one self.set_title("{} Intercluster Distance Map (via {})".format( self.estimator.__class__.__name__, self.embedding.upper() )) # Create the origin grid and minimalist display self.ax.set_xticks([0]) self.ax.set_yticks([0]) self.ax.set_xticklabels([]) self.ax.set_yticklabels([]) self.ax.set_xlabel("PC2") self.ax.set_ylabel("PC1") # Make the legend by creating an inset axes that shows relative sizing # based on the scoring metric supplied by the user. if self.legend: self._make_size_legend() return self.ax
0.001998
def continuous_decode_on_train_data(self): """Decode from dataset on new checkpoint.""" for _ in next_checkpoint(self._hparams.model_dir, self._decode_hparams.decode_timeout_mins): self.decode(dataset_split=tf.estimator.ModeKeys.TRAIN)
0.007194
def _build_units(indep_units, dep_units, op): """Build unit math operations.""" if (not dep_units) and (not indep_units): return "" if dep_units and (not indep_units): return dep_units if (not dep_units) and indep_units: return ( remove_extra_delims("1{0}({1})".format(op, indep_units)) if op == "/" else remove_extra_delims("({0})".format(indep_units)) ) return remove_extra_delims("({0}){1}({2})".format(dep_units, op, indep_units))
0.003831
def safe_index(cls, unique_id): """ Return a valid elastic index generated from unique_id """ index = unique_id if unique_id: index = unique_id.replace("/", "_").lower() return index
0.00885
def end_of_history(event): """ Move to the end of the input history, i.e., the line currently being entered. """ event.current_buffer.history_forward(count=10**100) buff = event.current_buffer buff.go_to_history(len(buff._working_lines) - 1)
0.007547
def show(fig=None, path='_map.html', **kwargs): """ Convert a Matplotlib Figure to a Leaflet map. Open in a browser Parameters ---------- fig : figure, default gcf() Figure used to convert to map path : string, default '_map.html' Filename where output html will be saved See fig_to_html() for description of keyword args. """ import webbrowser fullpath = os.path.abspath(path) with open(fullpath, 'w') as f: save_html(fig, fileobj=f, **kwargs) webbrowser.open('file://' + fullpath)
0.001795
def get_next_grade(self): """Gets the next ``Grade`` in this list. return: (osid.grading.Grade) - the next ``Grade`` in this list. The ``has_next()`` method should be used to test that a next ``Grade`` is available before calling this method. raise: IllegalState - no more elements available in this list raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ try: next_object = next(self) except StopIteration: raise IllegalState('no more elements available in this list') except Exception: # Need to specify exceptions here! raise OperationFailed() else: return next_object
0.002497
def stdio_mgr(in_str=""): r"""Subsitute temporary text buffers for `stdio` in a managed context. Context manager. Substitutes empty :cls:`~io.StringIO`\ s for :cls:`sys.stdout` and :cls:`sys.stderr`, and a :cls:`TeeStdin` for :cls:`sys.stdin` within the managed context. Upon exiting the context, the original stream objects are restored within :mod:`sys`, and the temporary streams are closed. Parameters ---------- in_str |str| *(optional)* -- Initialization text for the :cls:`TeeStdin` substitution for `stdin`. Default is an empty string. Yields ------ in_ :cls:`TeeStdin` -- Temporary stream for `stdin`. out_ :cls:`~io.StringIO` -- Temporary stream for `stdout`, initially empty. err_ :cls:`~io.StringIO` -- Temporary stream for `stderr`, initially empty. """ old_stdin = sys.stdin old_stdout = sys.stdout old_stderr = sys.stderr new_stdout = StringIO() new_stderr = StringIO() new_stdin = TeeStdin(new_stdout, in_str) sys.stdin = new_stdin sys.stdout = new_stdout sys.stderr = new_stderr yield new_stdin, new_stdout, new_stderr sys.stdin = old_stdin sys.stdout = old_stdout sys.stderr = old_stderr new_stdin.close() new_stdout.close() new_stderr.close()
0.00073
def set_session_info(self, info): ''' :type info: :class:`~kitty.data.data_manager.SessionInfo` :param info: info to set ''' if not self.info: self.info = SessionInfo() info_d = self.info.as_dict() ks = [] vs = [] for k, v in info_d.items(): ks.append(k) vs.append(v) self.insert(ks, vs) changed = self.info.copy(info) if changed: self.update(self.info.as_dict())
0.003738
def package_installed(self, package, shutit_pexpect_child=None, note=None, loglevel=logging.DEBUG): """Returns True if we can be sure the package is installed. @param package: Package as a string, eg 'wget'. @param shutit_pexpect_child: See send() @param note: See send() @rtype: boolean """ shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) return shutit_pexpect_session(package,note=note,loglevel=loglevel)
0.039474
def executeOnController(self, clusterId, script, lang): """ Parameters: - clusterId - script - lang """ self.send_executeOnController(clusterId, script, lang) return self.recv_executeOnController()
0.007576
def mapped_read_count(self, force=False): """ Counts total reads in a BAM file. If a file self.bam + '.scale' exists, then just read the first line of that file that doesn't start with a "#". If such a file doesn't exist, then it will be created with the number of reads as the first and only line in the file. The result is also stored in self._readcount so that the time-consuming part only runs once; use force=True to force re-count. Parameters ---------- force : bool If True, then force a re-count; otherwise use cached data if available. """ # Already run? if self._readcount and not force: return self._readcount if os.path.exists(self.fn + '.mmr') and not force: for line in open(self.fn + '.mmr'): if line.startswith('#'): continue self._readcount = float(line.strip()) return self._readcount cmds = ['samtools', 'view', '-c', '-F', '0x4', self.fn] p = subprocess.Popen( cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if stderr: sys.stderr.write('samtools says: %s' % stderr) return None mapped_reads = int(stdout) # write to file so the next time you need the lib size you can access # it quickly if not os.path.exists(self.fn + '.mmr'): fout = open(self.fn + '.mmr', 'w') fout.write(str(mapped_reads) + '\n') fout.close() self._readcount = mapped_reads return self._readcount
0.001119
def _ast_to_code(self, node, **kwargs): """Convert an abstract syntax tree to python source code.""" if isinstance(node, OptreeNode): return self._ast_optree_node_to_code(node, **kwargs) elif isinstance(node, Identifier): return self._ast_identifier_to_code(node, **kwargs) elif isinstance(node, Terminal): return self._ast_terminal_to_code(node, **kwargs) elif isinstance(node, OptionGroup): return self._ast_option_group_to_code(node, **kwargs) elif isinstance(node, RepetitionGroup): return self._ast_repetition_group_to_code(node, **kwargs) elif isinstance(node, SpecialHandling): return self._ast_special_handling_to_code(node, **kwargs) elif isinstance(node, Number): return self._ast_number_to_code(node, **kwargs) else: raise Exception("Unhandled ast node: {0}".format(node))
0.010381
def setup(): """ Returns the attributes of this family if using in a probabilistic model Notes ---------- - scale notes whether family has a variance parameter (sigma) - shape notes whether family has a tail thickness parameter (nu) - skewness notes whether family has a skewness parameter (gamma) - mean_transform is a function which transforms the location parameter - cythonized notes whether the family has cythonized routines Returns ---------- - model name, link function, scale, shape, skewness, mean_transform, cythonized """ name = "Cauchy" link = np.array scale = True shape = False skewness = False mean_transform = np.array cythonized = True # used for GAS models return name, link, scale, shape, skewness, mean_transform, cythonized
0.006557
def user_timeline(self, delegate, user=None, params={}, extra_args=None): """Get the most recent updates for a user. If no user is specified, the statuses for the authenticating user are returned. See search for example of how results are returned.""" if user: params['id'] = user return self.__get('/statuses/user_timeline.xml', delegate, params, txml.Statuses, extra_args=extra_args)
0.004228
def trigger(self, events, *args, **kwargs): """ Fires the given *events* (string or list of strings). All callbacks associated with these *events* will be called and if their respective objects have a *times* value set it will be used to determine when to remove the associated callback from the event. If given, callbacks associated with the given *events* will be called with *args* and *kwargs*. """ # Make sure our _on_off_events dict is present (if first invokation) if not hasattr(self, '_on_off_events'): self._on_off_events = {} if not hasattr(self, 'exc_info'): self.exc_info = None logging.debug("OnOffMixin triggering event(s): %s" % events) if isinstance(events, (str, unicode)): events = [events] for event in events: if event in self._on_off_events: for callback_obj in self._on_off_events[event]: callback_obj['callback'](*args, **kwargs) callback_obj['calls'] += 1 if callback_obj['calls'] == callback_obj['times']: self.off(event, callback_obj['callback'])
0.001619
def _gen_csv_data(self, f, dialect): """ Yields (column data, row number) tuples from the given csv file handler, using the given Dialect named tuple instance. Depends on self.ipa_col being correctly set. Helper for the gen_ipa_data method. """ reader = self._get_csv_reader(f, dialect) for line in reader: try: datum = line[self.ipa_col] except IndexError: mes = 'Could not find IPA data on line: {}'.format(line) raise ValueError(mes) yield datum, reader.line_num
0.031683
def infer_x(self, y, sigma=None, k=None, **kwargs): """Infer probable x from input y @param y the desired output for infered x. @param k how many neighbors to consider for the average this value override the class provided one on a per method call basis. """ assert len(y) == self.fmodel.dim_y, "Wrong dimension for y. Expected %i, got %i" % (self.fmodel.dim_y, len(y)) k = k or self.k sigma = sigma or self.sigma x_guess = self._guess_x(y, k = k)[0] dists, index = self.fmodel.dataset.nn_x(x_guess, k = k) w = self._weights(index, dists, sigma*sigma, y) return [np.sum([wi*self.fmodel.dataset.get_x(idx) for wi, idx in zip(w, index)], axis = 0)]
0.011392
def get_service_state(self, service_id: str) -> str: """Get the state of the service. Only the manager nodes can retrieve service state Args: service_id (str): Service id Returns: str, state of the service """ # Get service service = self._client.services.get(service_id) # Get the state of the service for service_task in service.tasks(): service_state = service_task['DesiredState'] return service_state
0.003802
def remove_pool_snapshot(service, pool_name, snapshot_name): """ Remove a snapshot from a RADOS pool in ceph. :param service: six.string_types. The Ceph user name to run the command under :param pool_name: six.string_types :param snapshot_name: six.string_types :return: None. Can raise CalledProcessError """ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] try: check_call(cmd) except CalledProcessError: raise
0.005964
def _data(self): """A deep copy NDArray of the data array associated with the BaseSparseNDArray. This function blocks. Do not use it in performance critical code. """ self.wait_to_read() hdl = NDArrayHandle() check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl))) return NDArray(hdl)
0.011173
def easy_train_and_evaluate(hyper_params, Model=None, create_loss=None, training_data=None, validation_data=None, inline_plotting=False, session_config=None, log_suffix=None, continue_training=False, continue_with_specific_checkpointpath=None): """ Train and evaluate your model without any boilerplate code. 1) Write your data using the starttf.tfrecords.autorecords.write_data method. 2) Create your hyper parameter file containing all required fields and then load it using starttf.utils.hyper_params.load_params method. Minimal Sample Hyperparams File: {"train": { "learning_rate": { "type": "const", "start_value": 0.001 }, "optimizer": { "type": "adam" }, "batch_size": 1024, "iters": 10000, "summary_iters": 100, "checkpoint_path": "checkpoints/mnist", "tf_records_path": "data/.records/mnist" } } 3) Pass everything required to this method and that's it. :param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params :param Model: A keras model. :param create_loss: A create_loss function like that in starttf.examples.mnist.loss. :param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook. :param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters. :param session_config: A configuration for the session. :param log_suffix: A suffix for the log folder, so you can remember what was special about the run. :return: """ time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H.%M.%S') chkpt_path = hyper_params.train.checkpoint_path + "/" + time_stamp if log_suffix is not None: chkpt_path = chkpt_path + "_" + log_suffix if session_config is None: session_config = get_default_config() if continue_with_specific_checkpointpath: chkpt_path = hyper_params.train.checkpoint_path + "/" + continue_with_specific_checkpointpath print("Continue with checkpoint: {}".format(chkpt_path)) elif continue_training: chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)]) chkpt_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1] print("Latest found checkpoint: {}".format(chkpt_path)) if not os.path.exists(chkpt_path): os.makedirs(chkpt_path) # If hyperparam config is used, load and save code if Model is None: model_backup = os.path.join(chkpt_path, "model.py") copyfile(hyperparams["arch"]["model"].replace(".", os.sep), model_backup) arch_model = __import__(hyperparams["arch"]["model"], fromlist=["Model"]) Model = arch_model.Model if create_loss is None: loss_backup = os.path.join(chkpt_path, "loss.py") copyfile(hyperparams["arch"]["loss"].replace(".", os.sep), loss_backup) arch_loss = __import__(hyperparams["arch"]["loss"], fromlist=["create_loss"]) create_loss = arch_loss.create_loss # Load training data print("Load data") if training_data is None: training_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_TRAIN), hyper_params.train.batch_size) if validation_data is None: validation_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_VALIDATION), hyper_params.train.batch_size) # Write hyper parameters to be able to track what config you had. with open(chkpt_path + "/hyperparameters.json", "w") as json_file: json_file.write(json.dumps(hyper_params.to_dict(), indent=4, sort_keys=True)) estimator_spec = create_tf_estimator_spec(chkpt_path, Model, create_loss, inline_plotting) # Create a run configuration config = None if hyper_params.train.get("distributed", False): distribution = tf.contrib.distribute.MirroredStrategy() config = tf.estimator.RunConfig(model_dir=chkpt_path, save_summary_steps=hyper_params.train.summary_steps, train_distribute=distribution, save_checkpoints_steps=hyper_params.train.save_checkpoint_steps, keep_checkpoint_max=hyper_params.train.keep_checkpoint_max, keep_checkpoint_every_n_hours=1) else: config = tf.estimator.RunConfig(session_config=session_config, model_dir=chkpt_path, save_summary_steps=hyper_params.train.summary_steps, save_checkpoints_steps=hyper_params.train.save_checkpoint_steps, keep_checkpoint_max=hyper_params.train.keep_checkpoint_max, keep_checkpoint_every_n_hours=1) # Create the estimator. estimator = None if hyper_params.train.get("warm_start_checkpoint", None) is not None: warm_start_dir = hyper_params.train.warm_start_checkpoint estimator = tf.estimator.Estimator(estimator_spec, config=config, warm_start_from=warm_start_dir, params=hyper_params) else: estimator = tf.estimator.Estimator(estimator_spec, config=config, params=hyper_params) # Specify training and actually train. throttle_secs = hyper_params.train.get("throttle_secs", 120) train_spec = tf.estimator.TrainSpec(input_fn=training_data, max_steps=hyper_params.train.steps) eval_spec = tf.estimator.EvalSpec(input_fn=validation_data, throttle_secs=throttle_secs) print("Start training") tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) return estimator
0.004344
def gg_ipython(plot, data, width=IPYTHON_IMAGE_SIZE, height=None, *args, **kwargs): """Render pygg in an IPython notebook Allows one to say things like: import pygg p = pygg.ggplot('diamonds', pygg.aes(x='carat', y='price', color='clarity')) p += pygg.geom_point(alpha=0.5, size = 2) p += pygg.scale_x_log10(limits=[1, 2]) pygg.gg_ipython(p, data=None, quiet=True) directly in an IPython notebook and see the resulting ggplot2 image displayed inline. This function is print a warning if the IPython library cannot be imported. The ggplot2 image is rendered as a PNG and not as a vectorized graphics object right now. Note that by default gg_ipython sets the output height and width to IPYTHON_IMAGE_SIZE pixels as this is a reasonable default size for a browser-based notebook. Height is by default None, indicating that height should be set to the same value as width. It is possible to adjust the aspect ratio of the output by providing non-None values for both width and height """ try: import IPython.display tmp_image_filename = tempfile.NamedTemporaryFile(suffix='.jpg').name # Quiet by default kwargs['quiet'] = kwargs.get('quiet', True) if width is None: raise ValueError("Width cannot be None") height = height or width w_in, h_in = size_r_img_inches(width, height) ggsave(name=tmp_image_filename, plot=plot, data=data, dpi=600, width=w_in, height=h_in, units=esc('in'), *args, **kwargs) return IPython.display.Image(filename=tmp_image_filename, width=width, height=height) except ImportError: print "Could't load IPython library; integration is disabled"
0.001094
def verify_raw_data(raw_data, pubkey_hex, sigb64): """ Verify the signature over a string, given the public key and base64-encode signature. Return True on success. Return False on error. """ if not isinstance(raw_data, (str, unicode)): raise ValueError("data is not a string") raw_data = str(raw_data) vi = ECVerifier(pubkey_hex, sigb64) vi.update(raw_data) return vi.verify()
0.00232
def __on_message(self, client, userdata, msg): # pylint: disable=W0613 """ A message has been received from a server :param client: Client that received the message :param userdata: User data (unused) :param msg: A MQTTMessage bean """ # Notify the caller, if any if self.on_message is not None: try: self.on_message(self, msg) except Exception as ex: _logger.exception("Error notifying MQTT listener: %s", ex)
0.005566
def create(server_context, domain_definition, container_path=None): # type: (ServerContext, dict, str) -> Domain """ Create a domain :param server_context: A LabKey server context. See utils.create_server_context. :param domain_definition: A domain definition. :param container_path: labkey container path if not already set in context :return: Domain """ url = server_context.build_url('property', 'createDomain.api', container_path=container_path) headers = { 'Content-Type': 'application/json' } domain = None raw_domain = server_context.make_request(url, json_dumps(domain_definition), headers=headers) if raw_domain is not None: domain = Domain.from_data(raw_domain) return domain
0.005222
def set_motion_detect(self, enable): """Set motion detection.""" if enable: return api.request_motion_detection_enable(self.sync.blink, self.network_id, self.camera_id) return api.request_motion_detection_disable(self.sync.blink, self.network_id, self.camera_id)
0.003906
def softplus_inverse(x, name=None): """Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)). Mathematically this op is equivalent to: ```none softplus_inverse = log(exp(x) - 1.) ``` Args: x: `Tensor`. Non-negative (not enforced), floating-point. name: A name for the operation (optional). Returns: `Tensor`. Has the same type/shape as input `x`. """ with tf.name_scope(name or "softplus_inverse"): x = tf.convert_to_tensor(value=x, name="x") # We begin by deriving a more numerically stable softplus_inverse: # x = softplus(y) = Log[1 + exp{y}], (which means x > 0). # ==> exp{x} = 1 + exp{y} (1) # ==> y = Log[exp{x} - 1] (2) # = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}] # = Log[(1 - exp{-x}) / 1] + Log[exp{x}] # = Log[1 - exp{-x}] + x (3) # (2) is the "obvious" inverse, but (3) is more stable than (2) for large x. # For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will # be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0. # # In addition to the numerically stable derivation above, we clamp # small/large values to be congruent with the logic in: # tensorflow/core/kernels/softplus_op.h # # Finally, we set the input to one whenever the input is too large or too # small. This ensures that no unchosen codepath is +/- inf. This is # necessary to ensure the gradient doesn't get NaNs. Recall that the # gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false` # thus an `inf` in an unselected path results in `0*inf=nan`. We are careful # to overwrite `x` with ones only when we will never actually use this # value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`. threshold = np.log(np.finfo(dtype_util.as_numpy_dtype(x.dtype)).eps) + 2. is_too_small = tf.less(x, np.exp(threshold)) is_too_large = tf.greater(x, -threshold) too_small_value = tf.math.log(x) too_large_value = x # This `where` will ultimately be a NOP because we won't select this # codepath whenever we used the surrogate `ones_like`. x = tf.where(tf.logical_or(is_too_small, is_too_large), tf.ones_like(x), x) y = x + tf.math.log(-tf.math.expm1(-x)) # == log(expm1(x)) return tf.where(is_too_small, too_small_value, tf.where(is_too_large, too_large_value, y))
0.00199
def file_matches_regexps(filename, patterns): """Does this filename match any of the regular expressions?""" return any(re.match(pat, filename) for pat in patterns)
0.005814
def email(self, client_id, email, send='link', auth_params=None): """Start flow sending an email. Given the user email address, it will send an email with: - A link (default, send:"link"). You can then authenticate with this user opening the link and he will be automatically logged in to the application. Optionally, you can append/override parameters to the link (like scope, redirect_uri, protocol, response_type, etc.) using auth_params dict. - A verification code (send:"code"). You can then authenticate with this user using email as username and code as password. Args: client_id (str): Client Id of the application. email (str): Email address. send (str, optional): Can be: 'link' or 'code'. Defaults to 'link'. auth_params (dict, optional): Parameters to append or override. """ return self.post( 'https://{}/passwordless/start'.format(self.domain), data={ 'client_id': client_id, 'connection': 'email', 'email': email, 'send': send, 'authParams': auth_params }, headers={'Content-Type': 'application/json'} )
0.001504
def encode_utf8(s, f): """UTF-8 encodes string `s` to file-like object `f` according to the MQTT Version 3.1.1 specification in section 1.5.3. The maximum length for the encoded string is 2**16-1 (65535) bytes. An assertion error will result if the encoded string is longer. Parameters ---------- s: str String to be encoded. f: file File-like object. Returns ------- int Number of bytes written to f. """ encode = codecs.getencoder('utf8') encoded_str_bytes, num_encoded_chars = encode(s) num_encoded_str_bytes = len(encoded_str_bytes) assert 0 <= num_encoded_str_bytes <= 2**16-1 num_encoded_bytes = num_encoded_str_bytes + 2 f.write(FIELD_U8.pack((num_encoded_str_bytes & 0xff00) >> 8)) f.write(FIELD_U8.pack(num_encoded_str_bytes & 0x00ff)) f.write(encoded_str_bytes) return num_encoded_bytes
0.001099
def match(self, methods, request_method): """Check for a method match. :param methods: A method or tuple of methods to match against. :param request_method: The method to check for a match. :returns: An empty :class:`dict` in the case of a match, or ``None`` if there is no matching handler for the given method. Example: >>> MethodRouter().match(('GET', 'HEAD'), 'HEAD') {} >>> MethodRouter().match('POST', 'DELETE') """ if isinstance(methods, basestring): return {} if request_method == methods else None return {} if request_method in methods else None
0.002933
def build_lst(a, b): """ function to be folded over a list (with initial value `None`) produces one of: 1. `None` 2. A single value 3. A list of all values """ if type(a) is list: return a + [b] elif a: return [a, b] else: return b
0.003247