text
stringlengths
78
104k
score
float64
0
0.18
def get_assignable_vault_ids(self, vault_id): """Gets a list of vault including and under the given vault node in which any authorization can be assigned. arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` return: (osid.id.IdList) - list of assignable vault ``Ids`` raise: NullArgument - ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids # This will likely be overridden by an authorization adapter mgr = self._get_provider_manager('AUTHORIZATION', local=True) lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy) vaults = lookup_session.get_vaults() id_list = [] for vault in vaults: id_list.append(vault.get_id()) return IdList(id_list)
0.003021
def id_pools_vsn_ranges(self): """ Gets the IdPoolsRanges API Client for VSN Ranges. Returns: IdPoolsRanges: """ if not self.__id_pools_vsn_ranges: self.__id_pools_vsn_ranges = IdPoolsRanges('vsn', self.__connection) return self.__id_pools_vsn_ranges
0.009288
def compareBulk(self, retina_name, body): """Bulk compare Args: retina_name, str: The retina name (required) body, ExpressionOperation: Bulk comparison of elements 2 by 2 (required) Returns: Array[Metric] """ resourcePath = '/compare/bulk' method = 'POST' queryParams = {} headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'} postData = None queryParams['retina_name'] = retina_name postData = body response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams) return [metric.Metric(**r) for r in response.json()]
0.007052
def creds(provider): ''' Return the credentials for AWS signing. This could be just the id and key specified in the provider configuration, or if the id or key is set to the literal string 'use-instance-role-credentials' creds will pull the instance role credentials from the meta data, cache them, and provide them instead. ''' # Declare globals global __AccessKeyId__, __SecretAccessKey__, __Token__, __Expiration__ ret_credentials = () # if id or key is 'use-instance-role-credentials', pull them from meta-data ## if needed if provider['id'] == IROLE_CODE or provider['key'] == IROLE_CODE: # Check to see if we have cache credentials that are still good if __Expiration__ != '': timenow = datetime.utcnow() timestamp = timenow.strftime('%Y-%m-%dT%H:%M:%SZ') if timestamp < __Expiration__: # Current timestamp less than expiration fo cached credentials return __AccessKeyId__, __SecretAccessKey__, __Token__ # We don't have any cached credentials, or they are expired, get them # Connections to instance meta-data must fail fast and never be proxied try: result = requests.get( "http://169.254.169.254/latest/meta-data/iam/security-credentials/", proxies={'http': ''}, timeout=AWS_METADATA_TIMEOUT, ) result.raise_for_status() role = result.text except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError): return provider['id'], provider['key'], '' try: result = requests.get( "http://169.254.169.254/latest/meta-data/iam/security-credentials/{0}".format(role), proxies={'http': ''}, timeout=AWS_METADATA_TIMEOUT, ) result.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError): return provider['id'], provider['key'], '' data = result.json() __AccessKeyId__ = data['AccessKeyId'] __SecretAccessKey__ = data['SecretAccessKey'] __Token__ = data['Token'] __Expiration__ = data['Expiration'] ret_credentials = __AccessKeyId__, __SecretAccessKey__, __Token__ else: ret_credentials = provider['id'], provider['key'], '' if provider.get('role_arn') is not None: provider_shadow = provider.copy() provider_shadow.pop("role_arn", None) log.info("Assuming the role: %s", provider.get('role_arn')) ret_credentials = assumed_creds(provider_shadow, role_arn=provider.get('role_arn'), location='us-east-1') return ret_credentials
0.002558
def _fieldnames_to_colnames(model_cls, fieldnames): """Get the names of columns referenced by the given model fields.""" get_field = model_cls._meta.get_field fields = map(get_field, fieldnames) return {f.column for f in fields}
0.007692
def format(self): """ The |ChartFormat| object providing access to the shape formatting properties of this data point, such as line and fill. """ dPt = self._ser.get_or_add_dPt_for_point(self._idx) return ChartFormat(dPt)
0.007435
def integral(A=None,dF=None,F=None,axis = 0,trapez = False,cumulative = False): ''' Turns an array A of length N (the function values in N points) and an array dF of length N-1 (the masses of the N-1 intervals) into an array of length N (the integral \int A dF at N points, with first entry 0) :param A: Integrand (optional, default ones, length N) :param dF: Integrator (optional, default ones, length N-1) :param F: Alternative to dF (optional, length N) :param trapez: Use trapezoidal rule (else left point) ''' ndim = max(v.ndim for v in (A,dF,F) if v is not None) def broadcast(x): new_shape = [1]*ndim new_shape[axis] = -1 return np.reshape(x,new_shape) if F is not None: assert(dF is None) if F.ndim<ndim: F = broadcast(F) N = F.shape[axis] dF = F.take(indices = range(1,N),axis = axis)-F.take(indices = range(N-1),axis = axis) elif dF is not None: if dF.ndim<ndim: dF = broadcast(dF) N = dF.shape[axis]+1 else: if A.ndim<ndim: A = broadcast(A) N = A.shape[axis] if A is not None: if trapez: midA = (A.take(indices = range(1,N),axis = axis)+A.take(indices = range(N-1),axis = axis))/2 else: midA = A.take(indices=range(N-1),axis=axis) if dF is not None: dY = midA*dF else: dY = midA else: dY = dF pad_shape = list(dY.shape) pad_shape[axis] = 1 pad = np.zeros(pad_shape) if cumulative: return np.concatenate((pad,np.cumsum(dY,axis = axis)),axis = axis) else: return np.sum(dY,axis = axis)
0.033314
def sort_values(self, by, ascending=True): """Summary Returns: TYPE: Description """ if len(self.column_types) == 1: vec_type = [WeldVec(self.column_types[0])] else: vec_type = [WeldVec(WeldStruct(self.column_types))] if len(self.column_names) > 1: key_index = self.column_names.index(by) else : key_index = None return GroupedDataFrameWeld( grizzly_impl.groupby_sort( self.columns, self.column_types, self.grouping_columns, self.grouping_column_types, key_index, ascending ), self.grouping_column_names, self.column_names, self.grouping_column_types, vec_type )
0.003468
def parse_args(args): """ Parse command line parameters :param args: command line parameters as list of strings :return: command line parameters as :obj:`argparse.Namespace` """ parser = argparse.ArgumentParser( description="Build html reveal.js slides from markdown in docs/ dir") parser.add_argument( '-v', '--verbose', help='Whether to show progress messages on stdout, including HTML', action='store_true') parser.add_argument( '--version', help='print twip package version and exit.', action='version', version='twip {ver}'.format(ver=__version__)) parser.add_argument( '-b', '--blog_path', help='Path to source markdown files. Must contain an `images` subdir', default=BLOG_PATH) parser.add_argument( '-s', '--slide_path', help='Path to dir for output slides (HTML). An images subdir will be added. A slides subdir should already exist.', default=DOCS_PATH) parser.add_argument( '-p', '--presentation', help='Source markdown base file name (without .md extension). The HTML slides will share the same basename.', default='2015-10-27-Hacking-Oregon-Hidden-Political-Connections') return parser.parse_args(args)
0.002252
def load(self, steps_dir=None, step_file=None, step_list=None): """Load CWL steps into the WorkflowGenerator's steps library. Adds steps (command line tools and workflows) to the ``WorkflowGenerator``'s steps library. These steps can be used to create workflows. Args: steps_dir (str): path to directory containing CWL files. All CWL in the directory are loaded. step_file (str): path to a file containing a CWL step that will be added to the steps library. """ self._closed() self.steps_library.load(steps_dir=steps_dir, step_file=step_file, step_list=step_list)
0.002786
def is_philips(dicom_input): """ Use this function to detect if a dicom series is a philips dataset :param dicom_input: directory with dicom files for 1 scan of a dicom_header """ # read dicom header header = dicom_input[0] if 'Manufacturer' not in header or 'Modality' not in header: return False # we try generic conversion in these cases # check if Modality is mr if header.Modality.upper() != 'MR': return False # check if manufacturer is Philips if 'PHILIPS' not in header.Manufacturer.upper(): return False return True
0.001661
def intern_unbound( ns: sym.Symbol, name: sym.Symbol, dynamic: bool = False, meta=None ) -> "Var": """Create a new unbound `Var` instance to the symbol `name` in namespace `ns`.""" var_ns = Namespace.get_or_create(ns) return var_ns.intern(name, Var(var_ns, name, dynamic=dynamic, meta=meta))
0.015291
def reverseCommit(self): """ Remove the inserted character(s). """ tc = self.qteWidget.textCursor() # Select the area from before the insertion to after the insertion, # and remove it. tc.setPosition(self.cursorPos1, QtGui.QTextCursor.MoveAnchor) tc.setPosition(self.cursorPos2, QtGui.QTextCursor.KeepAnchor) tc.removeSelectedText() # Add the previously selected text (if there was any). Note that the # text will not be 'selected' (ie. highlighted) this time. if len(self.selText) > 0: tc.setPosition(self.selStart) tc.insertHtml(self.selText)
0.002999
def change_view(self, request, object_id, form_url='', extra_context=None): """ Override change view to add extra context enabling moderate tool. """ context = { 'has_moderate_tool': True } if extra_context: context.update(extra_context) return super(AdminModeratorMixin, self).change_view( request=request, object_id=object_id, form_url=form_url, extra_context=context )
0.003945
def bulk_get(cls, exports, api=None): """ Retrieve exports in bulk. :param exports: Exports to be retrieved. :param api: Api instance. :return: list of ExportBulkRecord objects. """ api = api or cls._API export_ids = [Transform.to_export(export) for export in exports] data = {'export_ids': export_ids} response = api.post(url=cls._URL['bulk_get'], data=data) return ExportBulkRecord.parse_records(response=response, api=api)
0.003891
def chunks(seq, size): """ simple two-line alternative to `ubelt.chunks` """ return (seq[pos:pos + size] for pos in range(0, len(seq), size))
0.006711
def arc_data(self): """Return the map from filenames to lists of line number pairs.""" return dict( [(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)] )
0.00995
def __decode_data(self): """! @brief Decodes data from CF-tree features. """ self.__clusters = [ [] for _ in range(self.__number_clusters) ]; self.__noise = []; for index_point in range(0, len(self.__pointer_data)): (_, cluster_index) = self.__get_nearest_feature(self.__pointer_data[index_point], self.__features); self.__clusters[cluster_index].append(index_point);
0.026477
def add_version_tracking(self, info_id, version, date, command_line=''): """ Add a line with information about which software that was run and when to the header. Arguments: info_id (str): The id of the info line version (str): The version of the software used date (str): Date when software was run command_line (str): The command line that was used for run """ other_line = '##Software=<ID={0},Version={1},Date="{2}",CommandLineOptions="{3}">'.format( info_id, version, date, command_line) self.other_dict[info_id] = other_line return
0.010294
def _parse_country_file(self, cty_file, country_mapping_filename=None): """ Parse the content of a PLIST file from country-files.com return the parsed values in dictionaries. Country-files.com provides Prefixes and Exceptions """ import plistlib cty_list = None entities = {} exceptions = {} prefixes = {} exceptions_index = {} prefixes_index = {} exceptions_counter = 0 prefixes_counter = 0 mapping = None with open(country_mapping_filename, "r") as f: mapping = json.loads(f.read(),encoding='UTF-8') cty_list = plistlib.readPlist(cty_file) for item in cty_list: entry = {} call = str(item) entry[const.COUNTRY] = unicode(cty_list[item]["Country"]) if mapping: entry[const.ADIF] = int(mapping[cty_list[item]["Country"]]) entry[const.CQZ] = int(cty_list[item]["CQZone"]) entry[const.ITUZ] = int(cty_list[item]["ITUZone"]) entry[const.CONTINENT] = unicode(cty_list[item]["Continent"]) entry[const.LATITUDE] = float(cty_list[item]["Latitude"]) entry[const.LONGITUDE] = float(cty_list[item]["Longitude"])*(-1) if cty_list[item]["ExactCallsign"]: if call in exceptions_index.keys(): exceptions_index[call].append(exceptions_counter) else: exceptions_index[call] = [exceptions_counter] exceptions[exceptions_counter] = entry exceptions_counter += 1 else: if call in prefixes_index.keys(): prefixes_index[call].append(prefixes_counter) else: prefixes_index[call] = [prefixes_counter] prefixes[prefixes_counter] = entry prefixes_counter += 1 self._logger.debug(str(len(prefixes))+" Prefixes added") self._logger.debug(str(len(prefixes_index))+" Prefixes in Index") self._logger.debug(str(len(exceptions))+" Exceptions added") self._logger.debug(str(len(exceptions_index))+" Exceptions in Index") result = { "prefixes" : prefixes, "exceptions" : exceptions, "prefixes_index" : prefixes_index, "exceptions_index" : exceptions_index, } return result
0.003645
def create_objective(dist, abscissas): """Create objective function.""" abscissas_ = numpy.array(abscissas[1:-1]) def obj(absisa): """Local objective function.""" out = -numpy.sqrt(dist.pdf(absisa)) out *= numpy.prod(numpy.abs(abscissas_ - absisa)) return out return obj
0.006289
def plotres(psr,deleted=False,group=None,**kwargs): """Plot residuals, compute unweighted rms residual.""" res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs if (not deleted) and N.any(psr.deleted != 0): res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0] print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs)) meanres = math.sqrt(N.mean(res**2)) / 1e-6 if group is None: i = N.argsort(t) P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs) else: if (not deleted) and N.any(psr.deleted): flagmask = psr.flagvals(group)[~psr.deleted] else: flagmask = psr.flagvals(group) unique = list(set(flagmask)) for flagval in unique: f = (flagmask == flagval) flagres, flagt, flagerrs = res[f], t[f], errs[f] i = N.argsort(flagt) P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs) P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1)) P.xlabel('MJD'); P.ylabel('res [us]') P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
0.019786
def signal(sig, action): """ The point of this module and method is to decouple signal handlers from each other. Standard way to deal with handlers is to always store the old handler and call it. It creates a chain of handlers, making it impossible to later remove the handler. This method behaves like signal.signal() from standard python library. It always returns SIG_DFL indicating that the new handler is not supposed to call the old one. """ assert callable(action), ("Second argument of signal() needs to be a " "callable, got %r instead" % (action, )) global _handlers _install_handler(sig) if action in _handlers[sig]: log.debug('signal', "Handler for signal %s already registered. %r", sig, action) return SIG_DFL _handlers[sig][1].append(action) return SIG_DFL
0.001115
def publish(self): ''' Runs :func:`cleanup` first, then pushes the changes to the :attr:`remote`. ''' self.cleanup remote = self.remote branch = self.branch return self.m( 'pushing changes to %s/%s' % (remote, branch), cmdd=dict( cmd='git push -u %s %s' % (remote, branch), cwd=self.local ), more=dict(remote=remote, branch=branch) )
0.004107
def use_plenary_assessment_offered_view(self): """Pass through to provider AssessmentOfferedLookupSession.use_plenary_assessment_offered_view""" self._object_views['assessment_offered'] = PLENARY # self._get_provider_session('assessment_offered_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_assessment_offered_view() except AttributeError: pass
0.007813
def prune_loop_for_kic(self, loops_segments, search_radius, expected_min_loop_length = None, expected_max_loop_length = None, generate_pymol_session = False): '''A wrapper for prune_structure_according_to_loop_definitions suitable for the Rosetta kinematic closure (KIC) loop modeling method.''' return self.prune_structure_according_to_loop_definitions(loops_segments, search_radius, expected_min_loop_length = expected_min_loop_length, expected_max_loop_length = expected_max_loop_length, generate_pymol_session = generate_pymol_session, check_sequence = True, keep_Ca_buttress_atoms = True)
0.034483
def get_permission(self, username, virtual_host): """Get User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') return self.http_client.get(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ))
0.00289
def extract_subsection(im, shape): r""" Extracts the middle section of a image Parameters ---------- im : ND-array Image from which to extract the subsection shape : array_like Can either specify the size of the extracted section or the fractional size of the image to extact. Returns ------- image : ND-array An ND-array of size given by the ``shape`` argument, taken from the center of the image. Examples -------- >>> import scipy as sp >>> from porespy.tools import extract_subsection >>> im = sp.array([[1, 1, 1, 1], [1, 2, 2, 2], [1, 2, 3, 3], [1, 2, 3, 4]]) >>> print(im) [[1 1 1 1] [1 2 2 2] [1 2 3 3] [1 2 3 4]] >>> im = extract_subsection(im=im, shape=[2, 2]) >>> print(im) [[2 2] [2 3]] """ # Check if shape was given as a fraction shape = sp.array(shape) if shape[0] < 1: shape = sp.array(im.shape) * shape center = sp.array(im.shape) / 2 s_im = [] for dim in range(im.ndim): r = shape[dim] / 2 lower_im = sp.amax((center[dim] - r, 0)) upper_im = sp.amin((center[dim] + r, im.shape[dim])) s_im.append(slice(int(lower_im), int(upper_im))) return im[tuple(s_im)]
0.000778
def get_vocab(text, score, max_feats=750, max_feats2=200): """ Uses a fisher test to find words that are significant in that they separate high scoring essays from low scoring essays. text is a list of input essays. score is a list of scores, with score[n] corresponding to text[n] max_feats is the maximum number of features to consider in the first pass max_feats2 is the maximum number of features to consider in the second (final) pass Returns a list of words that constitute the significant vocabulary """ dict = CountVectorizer(ngram_range=(1,2), max_features=max_feats) dict_mat = dict.fit_transform(text) set_score = numpy.asarray(score, dtype=numpy.int) med_score = numpy.median(set_score) new_score = set_score if(med_score == 0): med_score = 1 new_score[set_score < med_score] = 0 new_score[set_score >= med_score] = 1 fish_vals = [] for col_num in range(0, dict_mat.shape[1]): loop_vec = dict_mat.getcol(col_num).toarray() good_loop_vec = loop_vec[new_score == 1] bad_loop_vec = loop_vec[new_score == 0] good_loop_present = len(good_loop_vec[good_loop_vec > 0]) good_loop_missing = len(good_loop_vec[good_loop_vec == 0]) bad_loop_present = len(bad_loop_vec[bad_loop_vec > 0]) bad_loop_missing = len(bad_loop_vec[bad_loop_vec == 0]) fish_val = pvalue(good_loop_present, bad_loop_present, good_loop_missing, bad_loop_missing).two_tail fish_vals.append(fish_val) cutoff = 1 if(len(fish_vals) > max_feats2): cutoff = sorted(fish_vals)[max_feats2] good_cols = numpy.asarray([num for num in range(0, dict_mat.shape[1]) if fish_vals[num] <= cutoff]) getVar = lambda searchList, ind: [searchList[i] for i in ind] vocab = getVar(dict.get_feature_names(), good_cols) return vocab
0.003203
def filter(self,x): """ Filter the signal using second-order sections """ y = signal.sosfilt(self.sos,x) return y
0.026144
def child_removed(self, child): """ Handle the child removed event from the declaration. This handler will unparent the child toolkit widget. Subclasses which need more control should reimplement this method. """ super(UiKitView, self).child_removed(child) if child.widget is not None: child.widget.removeFromSuperview()
0.005236
def psql(self, args): r"""Invoke psql, passing the given command-line arguments. Typical <args> values: ['-c', <sql_string>] or ['-f', <pathname>]. Connection parameters are taken from self. STDIN, STDOUT, and STDERR are inherited from the parent. WARNING: This method uses the psql(1) program, which ignores SQL errors by default. That hides many real errors, making our software less reliable. To overcome this flaw, add this line to the head of your SQL: "\set ON_ERROR_STOP TRUE" @return: None. Raises an exception upon error, but *ignores SQL errors* unless "\set ON_ERROR_STOP TRUE" is used. """ argv = [ PostgresFinder.find_root() / 'psql', '--quiet', '-U', self.user, '-h', self.host, '-p', self.port, ] + args + [self.db_name] subprocess.check_call(argv)
0.002079
def update(self, unique_name=values.unset, callback_method=values.unset, callback_url=values.unset, friendly_name=values.unset, rate_plan=values.unset, status=values.unset, commands_callback_method=values.unset, commands_callback_url=values.unset, sms_fallback_method=values.unset, sms_fallback_url=values.unset, sms_method=values.unset, sms_url=values.unset, voice_fallback_method=values.unset, voice_fallback_url=values.unset, voice_method=values.unset, voice_url=values.unset): """ Update the SimInstance :param unicode unique_name: The unique_name :param unicode callback_method: The callback_method :param unicode callback_url: The callback_url :param unicode friendly_name: The friendly_name :param unicode rate_plan: The rate_plan :param unicode status: The status :param unicode commands_callback_method: The commands_callback_method :param unicode commands_callback_url: The commands_callback_url :param unicode sms_fallback_method: The sms_fallback_method :param unicode sms_fallback_url: The sms_fallback_url :param unicode sms_method: The sms_method :param unicode sms_url: The sms_url :param unicode voice_fallback_method: The voice_fallback_method :param unicode voice_fallback_url: The voice_fallback_url :param unicode voice_method: The voice_method :param unicode voice_url: The voice_url :returns: Updated SimInstance :rtype: twilio.rest.preview.wireless.sim.SimInstance """ return self._proxy.update( unique_name=unique_name, callback_method=callback_method, callback_url=callback_url, friendly_name=friendly_name, rate_plan=rate_plan, status=status, commands_callback_method=commands_callback_method, commands_callback_url=commands_callback_url, sms_fallback_method=sms_fallback_method, sms_fallback_url=sms_fallback_url, sms_method=sms_method, sms_url=sms_url, voice_fallback_method=voice_fallback_method, voice_fallback_url=voice_fallback_url, voice_method=voice_method, voice_url=voice_url, )
0.004559
def breeding_wean(request, breeding_id): """This view is used to generate a form by which to wean pups which belong to a particular breeding set. This view typically is used to wean existing pups. This includes the MouseID, Cage, Markings, Gender and Wean Date fields. For other fields use the breeding-change page. It takes a request in the form /breeding/(breeding_id)/wean/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage. This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/. This view is restricted to those with the permission animal.change_animal. """ breeding = Breeding.objects.get(id=breeding_id) strain = breeding.Strain PupsFormSet = inlineformset_factory(Breeding, Animal, extra=0, exclude=('Alive','Father', 'Mother', 'Breeding', 'Notes','Rack','Rack_Position','Strain','Background','Genotype','Death','Cause_of_Death','Backcross','Generation')) if request.method =="POST": formset = PupsFormSet(request.POST, instance=breeding, queryset=Animal.objects.filter(Alive=True, Weaned__isnull=True)) if formset.is_valid(): formset.save() return HttpResponseRedirect( breeding.get_absolute_url() ) else: formset = PupsFormSet(instance=breeding, queryset=Animal.objects.filter(Alive=True, Weaned__isnull=True)) return render(request, "breeding_wean.html", {"formset":formset, 'breeding':breeding})
0.017032
def eigenvectors_left_samples(self): r""" Samples of the left eigenvectors of the hidden transition matrix """ res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :, :] = self._sampled_hmms[i].eigenvectors_left return res
0.012085
def _get_csr_extensions(csr): ''' Returns a list of dicts containing the name, value and critical value of any extension contained in a csr object. ''' ret = OrderedDict() csrtempfile = tempfile.NamedTemporaryFile() csrtempfile.write(csr.as_pem()) csrtempfile.flush() csryaml = _parse_openssl_req(csrtempfile.name) csrtempfile.close() if csryaml and 'Requested Extensions' in csryaml['Certificate Request']['Data']: csrexts = csryaml['Certificate Request']['Data']['Requested Extensions'] if not csrexts: return ret for short_name, long_name in six.iteritems(EXT_NAME_MAPPINGS): if long_name in csrexts: csrexts[short_name] = csrexts[long_name] del csrexts[long_name] ret = csrexts return ret
0.00361
def set_deferred_transfer(self, enable): """ Allow transfers to be delayed and buffered By default deferred transfers are turned off. All reads and writes will be completed by the time the function returns. When enabled packets are buffered and sent all at once, which increases speed. When memory is written to, the transfer might take place immediately, or might take place on a future memory write. This means that an invalid write could cause an exception to occur on a later, unrelated write. To guarantee that previous writes are complete call the flush() function. The behaviour of read operations is determined by the modes READ_START, READ_NOW and READ_END. The option READ_NOW is the default and will cause the read to flush all previous writes, and read the data immediately. To improve performance, multiple reads can be made using READ_START and finished later with READ_NOW. This allows the reads to be buffered and sent at once. Note - All READ_ENDs must be called before a call using READ_NOW can be made. """ if self._deferred_transfer and not enable: self.flush() self._deferred_transfer = enable
0.001543
def find_completions_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env): """Find completions at the cursor. Return a dict of { name => Completion } objects. """ q = gcl.SourceQuery(filename, line, col - 1) rootpath = ast_tree.find_tokens(q) if is_identifier_position(rootpath): return find_inherited_key_completions(rootpath, root_env) try: ret = find_deref_completions(rootpath, root_env) or enumerate_scope(rootpath, root_env=root_env) assert isinstance(ret, dict) return ret except gcl.EvaluationError: # Probably an unbound value or something--just return an empty list return {}
0.014019
def fast_sync_snapshot(working_dir, export_path, private_key, block_number ): """ Export all the local state for fast-sync. If block_number is given, then the name database at that particular block number will be taken. The exported tarball will be signed with the given private key, and the signature will be appended to the end of the file. Return True if we succeed Return False if not """ db_paths = None found = True tmpdir = None namedb_path = None def _cleanup(path): try: shutil.rmtree(path) except Exception, e: log.exception(e) log.error("Failed to clear directory {}".format(path)) def _log_backup(path): sb = None try: sb = os.stat(path) except Exception, e: log.exception(e) log.error("Failed to stat {}".format(path)) return False log.debug("Copy {} ({} bytes)".format(path, sb.st_size)) def _copy_paths(src_paths, dest_dir): for db_path in src_paths: dest_path = os.path.join(dest_dir, os.path.basename(db_path)) try: _log_backup(db_path) shutil.copy(db_path, dest_path) except Exception, e: log.exception(e) log.error("Failed to copy {} to {}".format(db_path, dest_path)) return False return True def _zonefile_copy_progress(): def inner(src, names): # ugly hack to work around the lack of a `nonlocal` keyword in Python 2.x for name in names: if name.endswith('.txt'): inner.zonefile_count += 1 if inner.zonefile_count % 100 == 0: log.debug("{} zone files copied".format(inner.zonefile_count)) return [] inner.zonefile_count = 0 return inner _zonefile_copy_progress = _zonefile_copy_progress() # make sure we have the apppriate tools tools = ['sqlite3'] for tool in tools: rc = os.system("which {} > /dev/null".format(tool)) if rc != 0: log.error("'{}' command not found".format(tool)) return False if not os.path.exists(working_dir): log.error("No such directory {}".format(working_dir)) return False if block_number is None: # last backup all_blocks = BlockstackDB.get_backup_blocks(virtualchain_hooks, working_dir) if len(all_blocks) == 0: log.error("No backups available") return False block_number = max(all_blocks) log.debug("Snapshot from block {}".format(block_number)) # use a backup database db_paths = BlockstackDB.get_backup_paths(block_number, virtualchain_hooks, working_dir) # include namespace keychains db = virtualchain_hooks.get_db_state(working_dir) namespace_ids = db.get_all_namespace_ids() all_namespace_keychain_paths = [os.path.join(working_dir, '{}.keychain'.format(nsid)) for nsid in namespace_ids] namespace_keychain_paths = filter(lambda nsp: os.path.exists(nsp), all_namespace_keychain_paths) for p in db_paths: if not os.path.exists(p): log.error("Missing file: '%s'" % p) found = False if not found: return False try: tmpdir = tempfile.mkdtemp(prefix='.blockstack-export-') except Exception, e: log.exception(e) return False # copying from backups backups_path = os.path.join(tmpdir, "backups") try: os.makedirs(backups_path) except Exception, e: log.exception(e) log.error("Failed to make directory {}".format(backups_path)) _cleanup(tmpdir) return False rc = _copy_paths(db_paths, backups_path) if not rc: _cleanup(tmpdir) return False # copy over zone files zonefiles_path = os.path.join(working_dir, "zonefiles") dest_path = os.path.join(tmpdir, "zonefiles") try: shutil.copytree(zonefiles_path, dest_path, ignore=_zonefile_copy_progress) except Exception, e: log.exception(e) log.error('Failed to copy {} to {}'.format(zonefiles_path, dest_path)) _cleanup(tmpdir) return False # copy over namespace keychains rc = _copy_paths(namespace_keychain_paths, tmpdir) if not rc: log.error("Failed to copy namespace keychain paths") _cleanup(tmpdir) return False # compress export_path = os.path.abspath(export_path) res = fast_sync_snapshot_compress(tmpdir, export_path) if 'error' in res: log.error("Faield to compress {} to {}: {}".format(tmpdir, export_path, res['error'])) _cleanup(tmpdir) return False log.debug("Wrote {} bytes".format(os.stat(export_path).st_size)) # sign rc = fast_sync_sign_snapshot( export_path, private_key, first=True ) if not rc: log.error("Failed to sign snapshot {}".format(export_path)) return False _cleanup(tmpdir) return True
0.004472
def almost_eq(a, b, bits=32, tol=1, ignore_type=True, pad=0.): """ Almost equal, based on the amount of floating point significand bits. Alternative to "a == b" for float numbers and iterables with float numbers, and tests for sequence contents (i.e., an elementwise a == b, that also works with generators, nested lists, nested generators, etc.). If the type of both the contents and the containers should be tested too, set the ignore_type keyword arg to False. Default version is based on 32 bits IEEE 754 format (23 bits significand). Could use 64 bits (52 bits significand) but needs a native float type with at least that size in bits. If a and b sizes differ, at least one will be padded with the pad input value to keep going with the comparison. Note ---- Be careful with endless generators! """ if not (ignore_type or type(a) == type(b)): return False is_it_a = isinstance(a, Iterable) is_it_b = isinstance(b, Iterable) if is_it_a != is_it_b: return False if is_it_a: return all(almost_eq.bits(ai, bi, bits, tol, ignore_type) for ai, bi in xzip_longest(a, b, fillvalue=pad)) significand = {32: 23, 64: 52, 80: 63, 128: 112 }[bits] # That doesn't include the sign bit power = tol - significand - 1 return abs(a - b) <= 2 ** power * abs(a + b)
0.008922
def getThings(self): """ Get the things registered in your account :return: dict with things registered in the logged in account and API call status """ login_return = self._is_logged_in() # raise NameError("Please login first using the login function, with username and password") data = { "path": "/thing", "host": "https://sky.brunt.co" } resp = self._http.request(data, RequestTypes.GET) self._things = resp['things'] resp.update(login_return) return resp
0.011966
def switch_to_rich_text(self): """Switch to rich text mode""" self.rich_help = True self.plain_text.hide() self.rich_text.show() self.rich_text_action.setChecked(True) self.show_source_action.setChecked(False)
0.007605
def set_on_level(self, val): """Set on level for the button/group.""" on_cmd = self._create_set_property_msg("_on_level", 0x06, val) self._send_method(on_cmd, self._property_set) self._send_method(on_cmd, self._on_message_received)
0.006452
def sequence(self, line_data, child_type=None, reference=None): """ Get the sequence of line_data, according to the columns 'seqid', 'start', 'end', 'strand'. Requires fasta reference. When used on 'mRNA' type line_data, child_type can be used to specify which kind of sequence to return: * child_type=None: pre-mRNA, returns the sequence of line_data from start to end, reverse complement according to strand. (default) * child_type='exon': mature mRNA, concatenates the sequences of children type 'exon'. * child_type='CDS': coding sequence, concatenates the sequences of children type 'CDS'. Use the helper function translate(seq) on the returned value to obtain the protein sequence. :param line_data: line_data(dict) with line_data['line_index'] or line_index(int) :param child_type: None or feature type(string) :param reference: If None, will use self.fasta_external or self.fasta_embedded(dict) :return: sequence(string) """ # get start node reference = reference or self.fasta_external or self.fasta_embedded if not reference: raise Exception('External or embedded fasta reference needed') try: line_index = line_data['line_index'] except TypeError: line_index = self.lines[line_data]['line_index'] ld = self.lines[line_index] if ld['type'] != 'feature': return None seq = reference[ld['seqid']][ld['start']-1:ld['end']] if ld['strand'] == '-': seq = complement(seq[::-1]) return seq
0.00602
def apis(self): """List of API to test""" value = self.attributes['apis'] if isinstance(value, six.string_types): value = shlex.split(value) return value
0.010152
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.reset_processcount() # Do not process if disable tag is set if self.disable_tag: return # Time since last update (for disk_io rate computation) time_since_update = getTimeSinceLastUpdate('process_disk') # Grab standard stats ##################### standard_attrs = ['cmdline', 'cpu_percent', 'cpu_times', 'memory_info', 'memory_percent', 'name', 'nice', 'pid', 'ppid', 'status', 'username', 'status', 'num_threads'] # io_counters availability: Linux, BSD, Windows, AIX if not MACOS and not SUNOS: standard_attrs += ['io_counters'] # gids availability: Unix if not WINDOWS: standard_attrs += ['gids'] # and build the processes stats list (psutil>=5.3.0) self.processlist = [p.info for p in psutil.process_iter(attrs=standard_attrs, ad_value=None) # OS-related processes filter if not (BSD and p.info['name'] == 'idle') and not (WINDOWS and p.info['name'] == 'System Idle Process') and not (MACOS and p.info['name'] == 'kernel_task') and # Kernel threads filter not (self.no_kernel_threads and LINUX and p.info['gids'].real == 0) and # User filter not (self._filter.is_filtered(p.info))] # Sort the processes list by the current sort_key self.processlist = sort_stats(self.processlist, sortedby=self.sort_key, reverse=True) # Update the processcount self.update_processcount(self.processlist) # Loop over processes and add metadata first = True for proc in self.processlist: # Get extended stats, only for top processes (see issue #403). if first and not self.disable_extended_tag: # - cpu_affinity (Linux, Windows, FreeBSD) # - ionice (Linux and Windows > Vista) # - num_ctx_switches (not available on Illumos/Solaris) # - num_fds (Unix-like) # - num_handles (Windows) # - memory_maps (only swap, Linux) # https://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ # - connections (TCP and UDP) extended = {} try: top_process = psutil.Process(proc['pid']) extended_stats = ['cpu_affinity', 'ionice', 'num_ctx_switches'] if LINUX: # num_fds only avalable on Unix system (see issue #1351) extended_stats += ['num_fds'] if WINDOWS: extended_stats += ['num_handles'] # Get the extended stats extended = top_process.as_dict(attrs=extended_stats, ad_value=None) if LINUX: try: extended['memory_swap'] = sum([v.swap for v in top_process.memory_maps()]) except psutil.NoSuchProcess: pass except (psutil.AccessDenied, NotImplementedError): # NotImplementedError: /proc/${PID}/smaps file doesn't exist # on kernel < 2.6.14 or CONFIG_MMU kernel configuration option # is not enabled (see psutil #533/glances #413). extended['memory_swap'] = None try: extended['tcp'] = len(top_process.connections(kind="tcp")) extended['udp'] = len(top_process.connections(kind="udp")) except (psutil.AccessDenied, psutil.NoSuchProcess): # Manage issue1283 (psutil.AccessDenied) extended['tcp'] = None extended['udp'] = None except (psutil.NoSuchProcess, ValueError, AttributeError) as e: logger.error('Can not grab extended stats ({})'.format(e)) extended['extended_stats'] = False else: logger.debug('Grab extended stats for process {}'.format(proc['pid'])) extended['extended_stats'] = True proc.update(extended) first = False # /End of extended stats # Time since last update (for disk_io rate computation) proc['time_since_update'] = time_since_update # Process status (only keep the first char) proc['status'] = str(proc['status'])[:1].upper() # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied or first time (display "?") # If io_tag = 1 > No access denied (display the IO rate) if 'io_counters' in proc and proc['io_counters'] is not None: io_new = [proc['io_counters'].read_bytes, proc['io_counters'].write_bytes] # For IO rate computation # Append saved IO r/w bytes try: proc['io_counters'] = io_new + self.io_old[proc['pid']] io_tag = 1 except KeyError: proc['io_counters'] = io_new + [0, 0] io_tag = 0 # then save the IO r/w bytes self.io_old[proc['pid']] = io_new else: proc['io_counters'] = [0, 0] + [0, 0] io_tag = 0 # Append the IO tag (for display) proc['io_counters'] += [io_tag] # Compute the maximum value for keys in self._max_values_list: CPU, MEM # Usefull to highlight the processes with maximum values for k in self._max_values_list: values_list = [i[k] for i in self.processlist if i[k] is not None] if values_list != []: self.set_max_values(k, max(values_list))
0.001961
def _extract_all_python_namespaces(self, thrift_file_sources_by_target): """Extract the python namespace from each thrift source file.""" py_namespaces_by_target = OrderedDict() failing_py_thrift_by_target = defaultdict(list) for t, all_content in thrift_file_sources_by_target.items(): py_namespaces_by_target[t] = [] for (path, content) in all_content: try: py_namespaces_by_target[t].append( # File content is provided as a binary string, so we have to decode it. (path, self._extract_py_namespace_from_content(t, path, content.decode('utf-8')))) except self.NamespaceParseFailure: failing_py_thrift_by_target[t].append(path) if failing_py_thrift_by_target: # We dump the output to a file here because the output can be very long in some repos. no_py_namespace_output_file = os.path.join(self.workdir, 'no-python-namespace-output.txt') pretty_printed_failures = '\n'.join( '{}: [{}]'.format(t.address.spec, ', '.join(paths)) for t, paths in failing_py_thrift_by_target.items()) error = self.NamespaceExtractionError(no_py_namespace_output_file, """\ Python namespaces could not be extracted from some thrift sources. Declaring a `namespace py` in thrift sources for python thrift library targets will soon become required. {} python library target(s) contained thrift sources not declaring a python namespace. The targets and/or files which need to be edited will be dumped to: {} """.format(len(failing_py_thrift_by_target), no_py_namespace_output_file)) safe_file_dump(no_py_namespace_output_file, '{}\n'.format(pretty_printed_failures)) if self.get_options().strict: raise error else: self.context.log.warn(error) return py_namespaces_by_target
0.010411
def accumulate(iterable, func=operator.add): """ Cumulative calculations. (Summation, by default.) Via: https://docs.python.org/3/library/itertools.html#itertools.accumulate """ it = iter(iterable) total = next(it) yield total for element in it: total = func(total, element) yield total
0.002994
def render(self, filename): """Perform initialization of render, set quality and size video attributes and then call template method that is defined in child class. """ self.elapsed_time = -time() dpi = 100 fig = figure(figsize=(16, 9), dpi=dpi) with self.writer.saving(fig, filename, dpi): for frame_id in xrange(self.frames + 1): self.renderFrame(frame_id) self.writer.grab_frame() self.elapsed_time += time()
0.00578
def reformat_python_docstrings(top_dirs: List[str], correct_copyright_lines: List[str], show_only: bool = True, rewrite: bool = False, process_only_filenum: int = None) -> None: """ Walk a directory, finding Python files and rewriting them. Args: top_dirs: list of directories to descend into correct_copyright_lines: list of lines (without newlines) representing the copyright docstring block, including the transition lines of equals symbols show_only: show results (to stdout) only; don't rewrite rewrite: write the changes process_only_filenum: only process this file number (1-based index); for debugging only """ filenum = 0 for top_dir in top_dirs: for dirpath, dirnames, filenames in walk(top_dir): for filename in filenames: fullname = join(dirpath, filename) extension = splitext(filename)[1] if extension != PYTHON_EXTENSION: # log.debug("Skipping non-Python file: {}", fullname) continue filenum += 1 if process_only_filenum and filenum != process_only_filenum: continue log.info("Processing file {}: {}", filenum, fullname) proc = PythonProcessor( full_path=fullname, top_dir=top_dir, correct_copyright_lines=correct_copyright_lines) if show_only: proc.show() elif rewrite: proc.rewrite_file()
0.000565
def refine(video, **kwargs): """Refine a video by searching `OMDb API <http://omdbapi.com/>`_. Several :class:`~subliminal.video.Episode` attributes can be found: * :attr:`~subliminal.video.Episode.series` * :attr:`~subliminal.video.Episode.year` * :attr:`~subliminal.video.Episode.series_imdb_id` Similarly, for a :class:`~subliminal.video.Movie`: * :attr:`~subliminal.video.Movie.title` * :attr:`~subliminal.video.Movie.year` * :attr:`~subliminal.video.Video.imdb_id` """ if isinstance(video, Episode): # exit if the information is complete if video.series_imdb_id: logger.debug('No need to search') return # search the series results = search(video.series, 'series', video.year) if not results: logger.warning('No results for series') return logger.debug('Found %d results', len(results)) # filter the results results = [r for r in results if sanitize(r['Title']) == sanitize(video.series)] if not results: logger.warning('No matching series found') return # process the results found = False for result in sorted(results, key=operator.itemgetter('Year')): if video.original_series and video.year is None: logger.debug('Found result for original series without year') found = True break if video.year == int(result['Year'].split(u'\u2013')[0]): logger.debug('Found result with matching year') found = True break if not found: logger.warning('No matching series found') return # add series information logger.debug('Found series %r', result) video.series = result['Title'] video.year = int(result['Year'].split(u'\u2013')[0]) video.series_imdb_id = result['imdbID'] elif isinstance(video, Movie): # exit if the information is complete if video.imdb_id: return # search the movie results = search(video.title, 'movie', video.year) if not results: logger.warning('No results') return logger.debug('Found %d results', len(results)) # filter the results results = [r for r in results if sanitize(r['Title']) == sanitize(video.title)] if not results: logger.warning('No matching movie found') return # process the results found = False for result in results: if video.year is None: logger.debug('Found result for movie without year') found = True break if video.year == int(result['Year']): logger.debug('Found result with matching year') found = True break if not found: logger.warning('No matching movie found') return # add movie information logger.debug('Found movie %r', result) video.title = result['Title'] video.year = int(result['Year'].split(u'\u2013')[0]) video.imdb_id = result['imdbID']
0.000915
def multiply(self, x1, x2, out=None): """Return the pointwise product of ``x1`` and ``x2``. Parameters ---------- x1, x2 : `LinearSpaceElement` Multiplicands in the product. out : `LinearSpaceElement`, optional Element to which the result is written. Returns ------- out : `LinearSpaceElement` Product of the elements. If ``out`` was provided, the returned object is a reference to it. """ if out is None: out = self.element() if out not in self: raise LinearSpaceTypeError('`out` {!r} is not an element of ' '{!r}'.format(out, self)) if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) self._multiply(x1, x2, out) return out
0.001787
def send(self, data, sample_rate=None): '''Send the data over UDP while taking the sample_rate in account The sample rate should be a number between `0` and `1` which indicates the probability that a message will be sent. The sample_rate is also communicated to `statsd` so it knows what multiplier to use. :keyword data: The data to send :type data: dict :keyword sample_rate: The sample rate, defaults to `1` (meaning always) :type sample_rate: int ''' if self._disabled: self.logger.debug('Connection disabled, not sending data') return False if sample_rate is None: sample_rate = self._sample_rate sampled_data = {} if sample_rate < 1: if random.random() <= sample_rate: # Modify the data so statsd knows our sample_rate for stat, value in compat.iter_dict(data): sampled_data[stat] = '%s|@%s' % (data[stat], sample_rate) else: sampled_data = data try: for stat, value in compat.iter_dict(sampled_data): send_data = ('%s:%s' % (stat, value)).encode("utf-8") self.udp_sock.send(send_data) return True except Exception as e: self.logger.exception('unexpected error %r while sending data', e) return False
0.001399
def _ImageDimensions(images, dynamic_shape=False): """Returns the dimensions of an image tensor. Args: images: 4-D Tensor of shape [batch, height, width, channels] dynamic_shape: Whether the input image has undertermined shape. If set to `True`, shape information will be retrieved at run time. Default to `False`. Returns: list of integers [batch, height, width, channels] """ # A simple abstraction to provide names for each dimension. This abstraction # should make it simpler to switch dimensions in the future (e.g. if we ever # want to switch height and width.) if dynamic_shape: return array_ops.unpack(array_ops.shape(images)) else: return images.get_shape().as_list()
0.009629
def parse(cls, s, required=False): """ Parse string to create an instance :param str s: String with requirement to parse :param bool required: Is this requirement required to be fulfilled? If not, then it is a filter. """ req = pkg_resources.Requirement.parse(s) return cls(req, required=required)
0.008333
def getStatus(self): """Dumps different debug info about cluster to dict and return it""" status = {} status['version'] = VERSION status['revision'] = REVISION status['self'] = self.__selfNode status['state'] = self.__raftState status['leader'] = self.__raftLeader status['partner_nodes_count'] = len(self.__otherNodes) for node in self.__otherNodes: status['partner_node_status_server_' + node.id] = 2 if node in self.__connectedNodes else 0 status['readonly_nodes_count'] = len(self.__readonlyNodes) for node in self.__readonlyNodes: status['readonly_node_status_server_' + node.id] = 2 if node in self.__connectedNodes else 0 status['log_len'] = len(self.__raftLog) status['last_applied'] = self.__raftLastApplied status['commit_idx'] = self.__raftCommitIndex status['raft_term'] = self.__raftCurrentTerm status['next_node_idx_count'] = len(self.__raftNextIndex) for node, idx in iteritems(self.__raftNextIndex): status['next_node_idx_server_' + node.id] = idx status['match_idx_count'] = len(self.__raftMatchIndex) for node, idx in iteritems(self.__raftMatchIndex): status['match_idx_server_' + node.id] = idx status['leader_commit_idx'] = self.__leaderCommitIndex status['uptime'] = int(time.time() - self.__startTime) status['self_code_version'] = self.__selfCodeVersion status['enabled_code_version'] = self.__enabledCodeVersion return status
0.002516
def between(min_val, # type: Any max_val, # type: Any open_left=False, # type: bool open_right=False # type: bool ): """ 'Is between' validation_function generator. Returns a validation_function to check that min_val <= x <= max_val (default). open_right and open_left flags allow to transform each side into strict mode. For example setting open_left=True will enforce min_val < x <= max_val :param min_val: minimum value for x :param max_val: maximum value for x :param open_left: Boolean flag to turn the left inequality to strict mode :param open_right: Boolean flag to turn the right inequality to strict mode :return: """ if open_left and open_right: def between_(x): if (min_val < x) and (x < max_val): return True else: # raise Failure('{} < x < {} does not hold for x={}'.format(min_val, max_val, x)) raise NotInRange(wrong_value=x, min_value=min_val, left_strict=True, max_value=max_val, right_strict=True) elif open_left: def between_(x): if (min_val < x) and (x <= max_val): return True else: # raise Failure('between: {} < x <= {} does not hold for x={}'.format(min_val, max_val, x)) raise NotInRange(wrong_value=x, min_value=min_val, left_strict=True, max_value=max_val, right_strict=False) elif open_right: def between_(x): if (min_val <= x) and (x < max_val): return True else: # raise Failure('between: {} <= x < {} does not hold for x={}'.format(min_val, max_val, x)) raise NotInRange(wrong_value=x, min_value=min_val, left_strict=False, max_value=max_val, right_strict=True) else: def between_(x): if (min_val <= x) and (x <= max_val): return True else: # raise Failure('between: {} <= x <= {} does not hold for x={}'.format(min_val, max_val, x)) raise NotInRange(wrong_value=x, min_value=min_val, left_strict=False, max_value=max_val, right_strict=False) between_.__name__ = 'between_{}_and_{}'.format(min_val, max_val) return between_
0.004506
async def respond_rpc(self, msg, _context): """Respond to an RPC previously sent to a service.""" rpc_id = msg.get('response_uuid') result = msg.get('result') payload = msg.get('response') self.service_manager.send_rpc_response(rpc_id, result, payload)
0.006803
def thaw_decrypt(vault_client, src_file, tmp_dir, opt): """Decrypts the encrypted ice file""" if not os.path.isdir(opt.secrets): LOG.info("Creating secret directory %s", opt.secrets) os.mkdir(opt.secrets) zip_file = "%s/aomi.zip" % tmp_dir if opt.gpg_pass_path: gpg_path_bits = opt.gpg_pass_path.split('/') gpg_path = '/'.join(gpg_path_bits[0:len(gpg_path_bits) - 1]) gpg_field = gpg_path_bits[len(gpg_path_bits) - 1] resp = vault_client.read(gpg_path) gpg_pass = None if resp and 'data' in resp and gpg_field in resp['data']: gpg_pass = resp['data'][gpg_field] if not gpg_pass: raise aomi.exceptions.GPG("Unable to retrieve GPG password") LOG.debug("Retrieved GPG password from Vault") if not decrypt(src_file, zip_file, passphrase=gpg_pass): raise aomi.exceptions.GPG("Unable to gpg") else: raise aomi.exceptions.VaultData("Unable to retrieve GPG password") else: if not decrypt(src_file, zip_file): raise aomi.exceptions.GPG("Unable to gpg") return zip_file
0.000852
def get_next_parameters(self, n=None): """Gets the next set of ``Parameters`` in this list which must be less than or equal to the return from ``available()``. arg: n (cardinal): the number of ``Parameter`` elements requested which must be less than or equal to ``available()`` return: (osid.configuration.Parameter) - an array of ``Parameter`` elements.The length of the array is less than or equal to the number specified. raise: IllegalState - no more elements available in this list raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceList.get_next_resources if n > self.available(): # !!! This is not quite as specified (see method docs) !!! raise IllegalState('not enough elements available in this list') else: next_list = [] x = 0 while x < n: try: next_list.append(self.next()) except: # Need to specify exceptions here raise OperationFailed() x = x + 1 return next_list
0.00382
def __create_safari_driver(self): ''' Creates an instance of Safari webdriver. ''' # Check for selenium jar env file needed for safari driver. if not os.getenv(self.__SELENIUM_SERVER_JAR_ENV): # If not set, check if we have a config setting for it. try: selenium_server_path = self._config_reader.get( self.SELENIUM_SERVER_LOCATION) self._env_vars[ self.__SELENIUM_SERVER_JAR_ENV] = selenium_server_path except KeyError: raise RuntimeError(u("Missing selenium server path config {0}.").format( self.SELENIUM_SERVER_LOCATION)) return webdriver.Safari()
0.004032
def attach_volume(volume_id, instance_id, device, region=None, key=None, keyid=None, profile=None): ''' Attach an EBS volume to an EC2 instance. .. volume_id (string) – The ID of the EBS volume to be attached. instance_id (string) – The ID of the EC2 instance to attach the volume to. device (string) – The device on the instance through which the volume is exposed (e.g. /dev/sdh) returns (bool) - True on success, False on failure. CLI Example: .. code-block:: bash salt-call boto_ec2.attach_volume vol-12345678 i-87654321 /dev/sdh ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: return conn.attach_volume(volume_id, instance_id, device) except boto.exception.BotoServerError as error: log.error(error) return False
0.00225
def rhyming_part(phones): """Get the "rhyming part" of a string with CMUdict phones. "Rhyming part" here means everything from the vowel in the stressed syllable nearest the end of the word up to the end of the word. .. doctest:: >>> import pronouncing >>> phones = pronouncing.phones_for_word("purple") >>> pronouncing.rhyming_part(phones[0]) 'ER1 P AH0 L' :param phones: a string containing space-separated CMUdict phones :returns: a string with just the "rhyming part" of those phones """ phones_list = phones.split() for i in range(len(phones_list) - 1, 0, -1): if phones_list[i][-1] in '12': return ' '.join(phones_list[i:]) return phones
0.001353
def update_clr(self, aclr, bclr): """ Zip the two sequences together, using "left-greedy" rule ============= seqA |||| ====(===============) seqB """ print(aclr, bclr, file=sys.stderr) otype = self.otype if otype == 1: if aclr.orientation == '+': aclr.end = self.qstop else: aclr.start = self.qstart if bclr.orientation == '+': bclr.start = self.sstop + 1 else: bclr.end = self.sstart - 1 elif otype == 3: aclr.start = aclr.end elif otype == 4: bclr.start = bclr.end print(aclr, bclr, file=sys.stderr)
0.002581
def split_query(query): """ Handle the query as a WWW HTTP 1630 query, as this is how people usually thinks of URI queries in general. We do not decode anything in split operations, neither percent nor the terrible plus-to-space conversion. Return: >>> split_query("k1=v1&k2=v+2%12&k3=&k4&&&k5==&=k&==") (('k1', 'v1'), ('k2', 'v+2%12'), ('k3', ''), ('k4', None), ('k5', '='), ('', 'k'), ('', '=')) """ def split_assignment(a): sa = a.split('=', 1) return len(sa) == 2 and tuple(sa) or (sa[0], None) assignments = query.split('&') return tuple([split_assignment(a) for a in assignments if a])
0.003058
def upload_from_stream(self, filename, source, chunk_size_bytes=None, metadata=None, session=None): """Uploads a user file to a GridFS bucket. Reads the contents of the user file from `source` and uploads it to the file `filename`. Source can be a string or file-like object. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) file_id = fs.upload_from_stream( "test_file", "data I want to store!", chunk_size_bytes=4, metadata={"contentType": "text/plain"}) Returns the _id of the uploaded file. Raises :exc:`~gridfs.errors.NoFile` if no such version of that file exists. Raises :exc:`~ValueError` if `filename` is not a string. :Parameters: - `filename`: The name of the file to upload. - `source`: The source stream of the content to be uploaded. Must be a file-like object that implements :meth:`read` or a string. - `chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. - `metadata` (optional): User data for the 'metadata' field of the files collection document. If not provided the metadata field will be omitted from the files collection document. - `session` (optional): a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 Added ``session`` parameter. """ with self.open_upload_stream( filename, chunk_size_bytes, metadata, session=session) as gin: gin.write(source) return gin._id
0.001688
def wrap_list(item): """ Returns an object as a list. If the object is a list, it is returned directly. If it is a tuple or set, it is returned as a list. If it is another object, it is wrapped in a list and returned. """ if item is None: return [] elif isinstance(item, list): return item elif isinstance(item, (tuple, set)): return list(item) else: return [item]
0.004577
def solution_path(self, min_lambda, max_lambda, lambda_bins, verbose=0): '''Follows the solution path to find the best lambda value.''' lambda_grid = np.exp(np.linspace(np.log(max_lambda), np.log(min_lambda), lambda_bins)) aic_trace = np.zeros(lambda_grid.shape) # The AIC score for each lambda value aicc_trace = np.zeros(lambda_grid.shape) # The AICc score for each lambda value (correcting for finite sample size) bic_trace = np.zeros(lambda_grid.shape) # The BIC score for each lambda value dof_trace = np.zeros(lambda_grid.shape) # The degrees of freedom of each final solution log_likelihood_trace = np.zeros(lambda_grid.shape) beta_trace = [] best_idx = None best_plateaus = None # Solve the series of lambda values with warm starts at each point for i, lam in enumerate(lambda_grid): if verbose: print('#{0} Lambda = {1}'.format(i, lam)) # Fit to the final values beta = self.solve(lam) if verbose: print('Calculating degrees of freedom') # Count the number of free parameters in the grid (dof) plateaus = calc_plateaus(beta, self.edges) dof_trace[i] = len(plateaus) if verbose: print('Calculating AIC') # Get the negative log-likelihood log_likelihood_trace[i] = self.log_likelihood(beta) # Calculate AIC = 2k - 2ln(L) aic_trace[i] = 2. * dof_trace[i] - 2. * log_likelihood_trace[i] # Calculate AICc = AIC + 2k * (k+1) / (n - k - 1) aicc_trace[i] = aic_trace[i] + 2 * dof_trace[i] * (dof_trace[i]+1) / (len(beta) - dof_trace[i] - 1.) # Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi)) bic_trace[i] = -2 * log_likelihood_trace[i] + dof_trace[i] * (np.log(len(beta)) - np.log(2 * np.pi)) # Track the best model thus far if best_idx is None or bic_trace[i] < bic_trace[best_idx]: best_idx = i best_plateaus = plateaus # Save the trace of all the resulting parameters beta_trace.append(np.array(beta)) if verbose: print('DoF: {0} AIC: {1} AICc: {2} BIC: {3}'.format(dof_trace[i], aic_trace[i], aicc_trace[i], bic_trace[i])) if verbose: print('Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]'.format(lambda_grid[best_idx], dof_trace[best_idx], aic_trace[best_idx], aicc_trace[best_idx], bic_trace[best_idx])) return {'aic': aic_trace, 'aicc': aicc_trace, 'bic': bic_trace, 'dof': dof_trace, 'loglikelihood': log_likelihood_trace, 'beta': np.array(beta_trace), 'lambda': lambda_grid, 'best_idx': best_idx, 'best': beta_trace[best_idx], 'plateaus': best_plateaus}
0.005583
def turtle_to_texture(turtle_program, turn_amount=DEFAULT_TURN, initial_angle=DEFAULT_INITIAL_ANGLE, resolution=1): """Makes a texture from a turtle program. Args: turtle_program (str): a string representing the turtle program; see the docstring of `branching_turtle_generator` for more details turn_amount (float): amount to turn in degrees initial_angle (float): initial orientation of the turtle resolution (int): if provided, interpolation amount for visible lines Returns: texture: A texture. """ generator = branching_turtle_generator( turtle_program, turn_amount, initial_angle, resolution) return texture_from_generator(generator)
0.00134
def _term(g): """Applies the TERM rule on 'g' (see top comment).""" all_t = {x for rule in g.rules for x in rule.rhs if isinstance(x, T)} t_rules = {t: Rule(NT('__T_%s' % str(t)), [t], weight=0, alias='Term') for t in all_t} new_rules = [] for rule in g.rules: if len(rule.rhs) > 1 and any(isinstance(x, T) for x in rule.rhs): new_rhs = [t_rules[x].lhs if isinstance(x, T) else x for x in rule.rhs] new_rules.append(Rule(rule.lhs, new_rhs, weight=rule.weight, alias=rule.alias)) new_rules.extend(v for k, v in t_rules.items() if k in rule.rhs) else: new_rules.append(rule) return Grammar(new_rules)
0.005831
def update_scale(self, overflow): """dynamically update loss scale""" iter_since_rescale = self._num_steps - self._last_rescale_iter if overflow: self._last_overflow_iter = self._num_steps self._overflows_since_rescale += 1 percentage = self._overflows_since_rescale / float(iter_since_rescale) # we tolerate a certrain amount of NaNs before actually scaling it down if percentage >= self.tolerance: self.loss_scale /= self.scale_factor self._last_rescale_iter = self._num_steps self._overflows_since_rescale = 0 logging.info('DynamicLossScaler: overflow detected. set loss_scale = %s', self.loss_scale) elif (self._num_steps - self._last_overflow_iter) % self.scale_window == 0: self.loss_scale *= self.scale_factor self._last_rescale_iter = self._num_steps self._num_steps += 1
0.006024
def has_no_jumps(neuron, max_distance=30.0, axis='z'): '''Check if there are jumps (large movements in the `axis`) Arguments: neuron(Neuron): The neuron object to test max_distance(float): value above which consecutive z-values are considered a jump axis(str): one of x/y/z, which axis to check for jumps Returns: CheckResult with result list of ids of bad sections ''' bad_ids = [] axis = {'x': COLS.X, 'y': COLS.Y, 'z': COLS.Z, }[axis.lower()] for neurite in iter_neurites(neuron): section_segment = ((sec, seg) for sec in iter_sections(neurite) for seg in iter_segments(sec)) for sec, (p0, p1) in islice(section_segment, 1, None): # Skip neurite root segment if max_distance < abs(p0[axis] - p1[axis]): bad_ids.append((sec.id, [p0, p1])) return CheckResult(len(bad_ids) == 0, bad_ids)
0.002144
def _get_group_no(self, tag_name): """ Takes tag name and returns the number of the group to which tag belongs """ if tag_name in self.full: return self.groups.index(self.full[tag_name]["parent"]) else: return len(self.groups)
0.006873
def _config(self, **kargs): """ ReConfigure Package """ for key, value in kargs.items(): setattr(self, key, value)
0.014085
def getVersionString(): """ Function return string with version information. It is performed by use one of three procedures: git describe, file in .git dir and file __VERSION__. """ version_string = None try: version_string = subprocess.check_output(['git', 'describe']) except: logger.warning('Command "git describe" is not working') if version_string == None: # noqa try: path_to_version = os.path.join(path_to_script, '../.git/refs/heads/master') with file(path_to_version) as f: version_string = f.read() except: logger.warning('Problem with reading file ".git/refs/heads/master"') if version_string == None: # noqa try: path_to_version = os.path.join(path_to_script, '../__VERSION__') with file(path_to_version) as f: version_string = f.read() path_to_version = path_to_version + \ ' version number is created manually' except: logger.warning('Problem with reading file "__VERSION__"') return version_string
0.005
def _appendTrlFile(trlfile,drizfile): """ Append drizfile to already existing trlfile from CALXXX. """ if not os.path.exists(drizfile): return # Open already existing CALWF3 trailer file for appending ftrl = open(trlfile,'a') # Open astrodrizzle trailer file fdriz = open(drizfile) # Read in drizzle comments _dlines = fdriz.readlines() # Append them to CALWF3 trailer file ftrl.writelines(_dlines) # Close all files ftrl.close() fdriz.close() # Now, clean up astrodrizzle trailer file os.remove(drizfile)
0.005164
def list_associated_storage_groups( self, full_properties=False, filter_args=None): """ Return the :term:`storage groups <storage group>` that are associated to this CPC. If the CPC does not support the "dpm-storage-management" feature, or does not have it enabled, an empty list is returned. Storage groups for which the authenticated user does not have object-access permission are not included. Authorization requirements: * Object-access permission to any storage groups to be included in the result. Parameters: full_properties (bool): Controls that the full set of resource properties for each returned storage group is being retrieved, vs. only the following short set: "object-uri", "cpc-uri", "name", "fulfillment-state", and "type". filter_args (dict): Filter arguments that narrow the list of returned resources to those that match the specified filter arguments. For details, see :ref:`Filtering`. `None` causes no filtering to happen. The 'cpc-uri' property is automatically added to the filter arguments and must not be specified in this parameter. Returns: : A list of :class:`~zhmcclient.StorageGroup` objects. Raises: ValueError: The filter_args parameter specifies the 'cpc-uri' property :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ if filter_args is None: filter_args = {} else: filter_args = filter_args.copy() if 'cpc-uri' in filter_args: raise ValueError( "The filter_args parameter specifies the 'cpc-uri' property " "with value: %s" % filter_args['cpc-uri']) filter_args['cpc-uri'] = self.uri sg_list = self.manager.console.storage_groups.list( full_properties, filter_args) return sg_list
0.000919
def send_exception_to_sentry(self, exc_info): """Send an exception to Sentry if enabled. :param tuple exc_info: exception information as returned from :func:`sys.exc_info` """ if not self.sentry_client: LOGGER.debug('No sentry_client, aborting') return message = dict(self.active_message) try: duration = math.ceil(time.time() - self.delivery_time) * 1000 except TypeError: duration = 0 kwargs = {'extra': { 'consumer_name': self.consumer_name, 'env': dict(os.environ), 'message': message}, 'time_spent': duration} LOGGER.debug('Sending exception to sentry: %r', kwargs) self.sentry_client.captureException(exc_info, **kwargs)
0.002345
def agent_service_register(consul_url=None, token=None, **kwargs): ''' The used to add a new service, with an optional health check, to the local agent. :param consul_url: The Consul server URL. :param name: A name describing the service. :param address: The address used by the service, defaults to the address of the agent. :param port: The port used by the service. :param id: Unique ID to identify the service, if not provided the value of the name parameter is used. :param tags: Identifying tags for service, string or list. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param check_ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param check_interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_register name='redis' tags='["master", "v1"]' address="127.0.0.1" port="8080" check_script="/usr/local/bin/check_redis.py" interval="10s" ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret lc_kwargs = dict() for k, v in six.iteritems(kwargs): lc_kwargs[k.lower()] = v if 'name' in lc_kwargs: data['Name'] = lc_kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'address' in lc_kwargs: data['Address'] = lc_kwargs['address'] if 'port' in lc_kwargs: data['Port'] = lc_kwargs['port'] if 'id' in lc_kwargs: data['ID'] = lc_kwargs['id'] if 'tags' in lc_kwargs: _tags = lc_kwargs['tags'] if not isinstance(_tags, list): _tags = [_tags] data['Tags'] = _tags if 'enabletagoverride' in lc_kwargs: data['EnableTagOverride'] = lc_kwargs['enabletagoverride'] if 'check' in lc_kwargs: dd = dict() for k, v in six.iteritems(lc_kwargs['check']): dd[k.lower()] = v interval_required = False check_dd = dict() if 'script' in dd: interval_required = True check_dd['Script'] = dd['script'] if 'http' in dd: interval_required = True check_dd['HTTP'] = dd['http'] if 'ttl' in dd: check_dd['TTL'] = dd['ttl'] if 'interval' in dd: check_dd['Interval'] = dd['interval'] if interval_required: if 'Interval' not in check_dd: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret else: if 'Interval' in check_dd: del check_dd['Interval'] # not required, so ignore it if check_dd > 0: data['Check'] = check_dd # if empty, ignore it function = 'agent/service/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} registered on agent.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to register service {0}.'.format(kwargs['name']) return ret
0.001018
def row_completed(self, index): """Mark the row at index as completed. .. seealso:: :meth:`completed_row_indices` This method notifies the obsevrers from :meth:`on_row_completed`. """ self._completed_rows.append(index) for row_completed in self._on_row_completed: row_completed(index)
0.00578
def cleanup(self): """ removes inactive clients (will be run in its own thread, about once every second) """ while self.inactive_timeout > 0: self.time = time.time() keys = [] for key, client in self.clients.items.items(): t = getattr(client, "active_at", 0) if t > 0 and self.time - t > self.inactive_timeout: client.kwargs["link"].disconnect() keys.append(key) for key in keys: self.log_debug("Removing client (inactive): %s" % key) del self.clients[key] time.sleep(1)
0.002946
def get_broadcast(self, broadcast_id): """ Use this method to get details on a broadcast that is in-progress. :param String broadcast_id: The ID of the broadcast you want to stop :rtype A Broadcast object, which contains information of the broadcast: id, sessionId projectId, createdAt, updatedAt, resolution, broadcastUrls and status """ endpoint = self.endpoints.broadcast_url(broadcast_id) response = requests.get( endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: return Broadcast(response.json()) elif response.status_code == 400: raise BroadcastError( 'Invalid request. This response may indicate that data in your request ' 'data is invalid JSON.') elif response.status_code == 403: raise AuthError('Authentication error.') elif response.status_code == 409: raise BroadcastError('No matching broadcast found (with the specified ID).') else: raise RequestError('OpenTok server error.', response.status_code)
0.004052
def reduce_log_sum(attrs, inputs, proto_obj): """Reduce the array along a given axis by log sum value""" keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims') sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'), keepdims=keep_dims) log_sym = symbol.log(sum_op) return log_sym, attrs, inputs
0.002833
def fileserver(opts, backends): ''' Returns the file server modules ''' return LazyLoader(_module_dirs(opts, 'fileserver'), opts, tag='fileserver', whitelist=backends, pack={'__utils__': utils(opts)})
0.003289
def rank(self, X, algorithm=None): """ Returns the feature ranking. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features algorithm : str or None The ranking mechanism to use, or None for the default Returns ------- ranks : ndarray An n-dimensional, symmetric array of rank scores, where n is the number of features. E.g. for 1D ranking, it is (n,), for a 2D ranking it is (n,n) and so forth. """ algorithm = algorithm or self.ranking_ algorithm = algorithm.lower() if algorithm not in self.ranking_methods: raise YellowbrickValueError( "'{}' is unrecognized ranking method".format(algorithm) ) # Extract matrix from dataframe if necessary if is_dataframe(X): X = X.values return self.ranking_methods[algorithm](X)
0.001984
def update(self): """Update core stats. Stats is a dict (with both physical and log cpu number) instead of a integer. """ # Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib # The psutil 2.0 include psutil.cpu_count() and psutil.cpu_count(logical=False) # Return a dict with: # - phys: physical cores only (hyper thread CPUs are excluded) # - log: logical CPUs in the system # Return None if undefine try: stats["phys"] = psutil.cpu_count(logical=False) stats["log"] = psutil.cpu_count() except NameError: self.reset() elif self.input_method == 'snmp': # Update stats using SNMP # http://stackoverflow.com/questions/5662467/how-to-find-out-the-number-of-cpus-using-snmp pass # Update the stats self.stats = stats return self.stats
0.003742
def get_atom_name(self, atom): """Look up the name of atom, returning it as a string. Will raise BadAtom if atom does not exist.""" r = request.GetAtomName(display = self.display, atom = atom) return r.name
0.022222
def subscriptlist(self, subscripts): """subscriptlist: subscript (',' subscript)* [',']""" if len(subscripts) == 1: return ast.Subscript(slice=subscripts[0], ctx=None, loc=None) elif all([isinstance(x, ast.Index) for x in subscripts]): elts = [x.value for x in subscripts] loc = subscripts[0].loc.join(subscripts[-1].loc) index = ast.Index(value=ast.Tuple(elts=elts, ctx=None, begin_loc=None, end_loc=None, loc=loc), loc=loc) return ast.Subscript(slice=index, ctx=None, loc=None) else: extslice = ast.ExtSlice(dims=subscripts, loc=subscripts[0].loc.join(subscripts[-1].loc)) return ast.Subscript(slice=extslice, ctx=None, loc=None)
0.006961
def reply_to(self, message, text, **kwargs): """ Convenience function for `send_message(message.chat.id, text, reply_to_message_id=message.message_id, **kwargs)` """ return self.send_message(message.chat.id, text, reply_to_message_id=message.message_id, **kwargs)
0.013559
def project_process(index, start, end): """Compute the metrics for the project process section of the enriched github issues index. Returns a dictionary containing "bmi_metrics", "time_to_close_metrics", "time_to_close_review_metrics" and patchsets_metrics as the keys and the related Metrics as the values. time_to_close_title and time_to_close_review_title contain the file names to be used for time_to_close_metrics and time_to_close_review_metrics metrics data. :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics """ results = { "bmi_metrics": [BMIPR(index, start, end)], "time_to_close_metrics": [], "time_to_close_review_metrics": [DaysToClosePRAverage(index, start, end), DaysToClosePRMedian(index, start, end)], "patchsets_metrics": [] } return results
0.002956
def find_near_matches_levenshtein(subsequence, sequence, max_l_dist): """Find near-matches of the subsequence in the sequence. This chooses a suitable fuzzy search implementation according to the given parameters. Returns a list of fuzzysearch.Match objects describing the matching parts of the sequence. """ if not subsequence: raise ValueError('Given subsequence is empty!') if max_l_dist < 0: raise ValueError('Maximum Levenshtein distance must be >= 0!') if max_l_dist == 0: return [ Match(start_index, start_index + len(subsequence), 0) for start_index in search_exact(subsequence, sequence) ] elif len(subsequence) // (max_l_dist + 1) >= 3: return find_near_matches_levenshtein_ngrams(subsequence, sequence, max_l_dist) else: matches = find_near_matches_levenshtein_linear_programming(subsequence, sequence, max_l_dist) match_groups = group_matches(matches) best_matches = [get_best_match_in_group(group) for group in match_groups] return sorted(best_matches)
0.001484
def append(self, item): """ Append to object, if object is list. """ if self.meta_type == 'dict': raise AssertionError('Cannot append to object of `dict` base type!') if self.meta_type == 'list': self._list.append(item) return
0.010067
def deserialize(self, content_type, strdata): """Deserialize string of given content type. `self` unused in this implementation. >>> s = teststore() >>> s.deserialize('application/json', '{"id": "1", "name": "Toto"}') {u'id': u'1', u'name': u'Toto'} >>> s.deserialize('text/plain', 'id: 1, name: Toto') Traceback (most recent call last): ... ValueError: Unsupported content type "text/plain" """ if content_type != 'application/json': raise ValueError('Unsupported content type "' + content_type + '"') return json.loads(strdata)
0.00311
def _default_step_sizes(reference_vertex): """Chooses default step sizes according to [Gao and Han(2010)][3].""" # Step size to choose when the coordinate is zero. small_sizes = tf.ones_like(reference_vertex) * 0.00025 # Step size to choose when the coordinate is non-zero. large_sizes = reference_vertex * 0.05 return tf.where(tf.abs(reference_vertex) < _EPSILON, small_sizes, large_sizes)
0.015982
def calc_contours(data, num_contours): """Get sets of contour points for numpy array `data`. `num_contours` specifies the number (int) of contours to make. Returns a list of numpy arrays of points--each array makes a polygon if plotted as such. """ mn = np.nanmean(data) top = np.nanmax(data) levels = np.linspace(mn, top, num_contours) return get_contours(data, levels)
0.002463
def update_log_type(self, logType, name=None, level=None, stdoutFlag=None, fileFlag=None, color=None, highlight=None, attributes=None): """ update a logtype. :Parameters: #. logType (string): The logtype. #. name (None, string): The logtype name. If None, name will be set to logtype. #. level (number): The level of logging. #. stdoutFlag (None, boolean): Force standard output logging flag. If None, flag will be set according to minimum and maximum levels. #. fileFlag (None, boolean): Force file logging flag. If None, flag will be set according to minimum and maximum levels. #. color (None, string): The logging text color. The defined colors are:\n black , red , green , orange , blue , magenta , cyan , grey , dark grey , light red , light green , yellow , light blue , pink , light cyan #. highlight (None, string): The logging text highlight color. The defined highlights are:\n black , red , green , orange , blue , magenta , cyan , grey #. attributes (None, string): The logging text attribute. The defined attributes are:\n bold , underline , blink , invisible , strike through **N.B** *logging color, highlight and attributes are not allowed on all types of streams.* """ # check logType assert logType in self.__logTypeStdoutFlags.keys(), "logType '%s' is not defined" %logType # get None updates if name is None: name = self.__logTypeNames[logType] if level is None: level = self.__logTypeLevels[logType] if stdoutFlag is None: stdoutFlag = self.__logTypeStdoutFlags[logType] if fileFlag is None: fileFlag = self.__logTypeFileFlags[logType] if color is None: color = self.__logTypeColor[logType] if highlight is None: highlight = self.__logTypeHighlight[logType] if attributes is None: attributes = self.__logTypeAttributes[logType] # update log type self.__set_log_type(logType=logType, name=name, level=level, stdoutFlag=stdoutFlag, fileFlag=fileFlag, color=color, highlight=highlight, attributes=attributes)
0.011159
def parse_chains(data): """ Parse the chain definitions. """ chains = odict() for line in data.splitlines(True): m = re_chain.match(line) if m: policy = None if m.group(2) != '-': policy = m.group(2) chains[m.group(1)] = { 'policy': policy, 'packets': int(m.group(3)), 'bytes': int(m.group(4)), } return chains
0.002165
def clean(context, days_ago, yes): """Clean up files from "old" analyses runs.""" number_of_days_ago = dt.datetime.now() - dt.timedelta(days=days_ago) analyses = context.obj['store'].analyses( status='completed', before=number_of_days_ago, deleted=False, ) for analysis_obj in analyses: LOG.debug(f"checking analysis: {analysis_obj.family} ({analysis_obj.id})") latest_analysis = context.obj['store'].analyses(family=analysis_obj.family).first() if analysis_obj != latest_analysis: print(click.style(f"{analysis_obj.family}: family has been re-started", fg='yellow')) else: print(f"delete analysis: {analysis_obj.family} ({analysis_obj.id})") context.invoke(delete, analysis_id=analysis_obj.id, yes=yes)
0.006135
def _prepare_fetch(self, request: Request, response: Response): '''Prepare for a fetch. Coroutine. ''' self._request = request self._response = response yield from self._init_stream() connection_closed = self._control_connection.closed() if connection_closed: self._login_table.pop(self._control_connection, None) yield from self._control_stream.reconnect() request.address = self._control_connection.address connection_reused = not connection_closed self.event_dispatcher.notify(self.Event.begin_control, request, connection_reused=connection_reused) if connection_closed: yield from self._commander.read_welcome_message() yield from self._log_in() self._response.request = request
0.003576
def config_oauth(app): " Configure oauth support. " for name in PROVIDERS: config = app.config.get('OAUTH_%s' % name.upper()) if not config: continue if not name in oauth.remote_apps: remote_app = oauth.remote_app(name, **config) else: remote_app = oauth.remote_apps[name] client_class = CLIENTS.get(name) client_class(app, remote_app)
0.00463