text
stringlengths
78
104k
score
float64
0
0.18
def get_network_addresses(self): """For each network configured, return corresponding address and hostnamr or vip (if available). Returns a list of tuples of the form: [(address_in_net_a, hostname_in_net_a), (address_in_net_b, hostname_in_net_b), ...] or, if no hostnames(s) available: [(address_in_net_a, vip_in_net_a), (address_in_net_b, vip_in_net_b), ...] or, if no vip(s) available: [(address_in_net_a, address_in_net_a), (address_in_net_b, address_in_net_b), ...] """ addresses = [] for net_type in [INTERNAL, ADMIN, PUBLIC]: net_config = config(ADDRESS_MAP[net_type]['config']) # NOTE(jamespage): Fallback must always be private address # as this is used to bind services on the # local unit. fallback = unit_get("private-address") if net_config: addr = get_address_in_network(net_config, fallback) else: try: addr = network_get_primary_address( ADDRESS_MAP[net_type]['binding'] ) except (NotImplementedError, NoNetworkBinding): addr = fallback endpoint = resolve_address(net_type) addresses.append((addr, endpoint)) return sorted(set(addresses))
0.001267
def get_real_stored_key(self, session_key): """Return the real key name in redis storage @return string """ prefix = settings.SESSION_REDIS_PREFIX if not prefix: return session_key return ':'.join([prefix, session_key])
0.007168
def resolve_label_components(self, module = None, function = None, offset = None): """ Resolve the memory address of the given module, function and/or offset. @note: If multiple modules with the same name are loaded, the label may be resolved at any of them. For a more precise way to resolve functions use the base address to get the L{Module} object (see L{Process.get_module}) and then call L{Module.resolve}. If no module name is specified in the label, the function may be resolved in any loaded module. If you want to resolve all functions with that name in all processes, call L{Process.iter_modules} to iterate through all loaded modules, and then try to resolve the function in each one of them using L{Module.resolve}. @type module: None or str @param module: (Optional) Module name. @type function: None, str or int @param function: (Optional) Function name or ordinal. @type offset: None or int @param offset: (Optional) Offset value. If C{function} is specified, offset from the function. If C{function} is C{None}, offset from the module. @rtype: int @return: Memory address pointed to by the label. @raise ValueError: The label is malformed or impossible to resolve. @raise RuntimeError: Cannot resolve the module or function. """ # Default address if no module or function are given. # An offset may be added later. address = 0 # Resolve the module. # If the module is not found, check for the special symbol "main". if module: modobj = self.get_module_by_name(module) if not modobj: if module == "main": modobj = self.get_main_module() else: raise RuntimeError("Module %r not found" % module) # Resolve the exported function or debugging symbol. # If all else fails, check for the special symbol "start". if function: address = modobj.resolve(function) if address is None: address = modobj.resolve_symbol(function) if address is None: if function == "start": address = modobj.get_entry_point() if address is None: msg = "Symbol %r not found in module %s" raise RuntimeError(msg % (function, module)) # No function, use the base address. else: address = modobj.get_base() # Resolve the function in any module. # If all else fails, check for the special symbols "main" and "start". elif function: for modobj in self.iter_modules(): address = modobj.resolve(function) if address is not None: break if address is None: if function == "start": modobj = self.get_main_module() address = modobj.get_entry_point() elif function == "main": modobj = self.get_main_module() address = modobj.get_base() else: msg = "Function %r not found in any module" % function raise RuntimeError(msg) # Return the address plus the offset. if offset: address = address + offset return address
0.003196
def generate_if_then_else(self): """ Implementation of if-then-else. .. code-block:: python { 'if': { 'exclusiveMaximum': 0, }, 'then': { 'minimum': -10, }, 'else': { 'multipleOf': 2, }, } Valid values are any between -10 and 0 or any multiplication of two. """ with self.l('try:'): self.generate_func_code_block( self._definition['if'], self._variable, self._variable_name, clear_variables=True ) with self.l('except JsonSchemaException:'): if 'else' in self._definition: self.generate_func_code_block( self._definition['else'], self._variable, self._variable_name, clear_variables=True ) else: self.l('pass') if 'then' in self._definition: with self.l('else:'): self.generate_func_code_block( self._definition['then'], self._variable, self._variable_name, clear_variables=True )
0.001438
def to_segmentlistdict(self): """ Return a segmentlistdict object describing the instruments and times spanned by the entries in this Cache. The return value is coalesced. """ d = segments.segmentlistdict() for entry in self: d |= entry.segmentlistdict return d
0.039146
def expand_envvars(env): ''' Expand all environment variables in an environment dict :param env: Environment dict ''' out_env = {} for k, v in env.iteritems(): out_env[k] = Template(v).safe_substitute(env) # Expand twice to make sure we expand everything we possibly can for k, v in out_env.items(): out_env[k] = Template(v).safe_substitute(out_env) return out_env
0.002375
def parse(cls, j): """ :param dict j: JSON response from TryHaskell. :rtype: TryHaskell.Result """ error = j.get('error') if error: return cls._bad_result(error) success = j.get('success') if success: try: return cls.Result(ok=True, **j.get('success')) except (TypeError, ValueError) as e: raise cls.Error(e) # If there was neither a success nor an error, the service # is probably expecting something from stdin, which is not # currently implemented. # TODO: Implement stdin. return cls._bad_result('Unsupported operation.')
0.002869
def plot_dropout_rate_heterogeneity( model, suptitle="Heterogeneity in Dropout Probability", xlabel="Dropout Probability p", ylabel="Density", suptitle_fontsize=14, **kwargs ): """ Plot the estimated gamma distribution of p. p - (customers' probability of dropping out immediately after a transaction). Parameters ---------- model: lifetimes model A fitted lifetimes model, for now only for BG/NBD suptitle: str, optional Figure suptitle xlabel: str, optional Figure xlabel ylabel: str, optional Figure ylabel kwargs Passed into the matplotlib.pyplot.plot command. Returns ------- axes: matplotlib.AxesSubplot """ from matplotlib import pyplot as plt a, b = model._unload_params("a", "b") beta_mean = a / (a + b) beta_var = a * b / ((a + b) ** 2) / (a + b + 1) rv = stats.beta(a, b) lim = rv.ppf(0.99) x = np.linspace(0, lim, 100) fig, ax = plt.subplots(1) fig.suptitle(suptitle, fontsize=suptitle_fontsize, fontweight="bold") ax.set_title("mean: {:.3f}, var: {:.3f}".format(beta_mean, beta_var)) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) fig.tight_layout(rect=[0, 0.03, 1, 0.95]) plt.plot(x, rv.pdf(x), **kwargs) return ax
0.001522
def candle_lighting(self): """Return the time for candle lighting, or None if not applicable.""" today = HDate(gdate=self.date, diaspora=self.location.diaspora) tomorrow = HDate(gdate=self.date + dt.timedelta(days=1), diaspora=self.location.diaspora) # If today is a Yom Tov or Shabbat, and tomorrow is a Yom Tov or # Shabbat return the havdalah time as the candle lighting time. if ((today.is_yom_tov or today.is_shabbat) and (tomorrow.is_yom_tov or tomorrow.is_shabbat)): return self._havdalah_datetime # Otherwise, if today is Friday or erev Yom Tov, return candle # lighting. if tomorrow.is_shabbat or tomorrow.is_yom_tov: return (self.zmanim["sunset"] - dt.timedelta(minutes=self.candle_lighting_offset)) return None
0.00225
def setCollapsed(self, state): """ Sets whether or not this toolbar is in a collapsed state. :return <bool> changed """ if state == self._collapsed: return False self._collapsed = state self.refreshButton() if not self.signalsBlocked(): self.collapseToggled.emit(state) return True
0.014388
def as_dash_app(self): ''' Return a DjangoDash instance of the dash application ''' dateless_dash_app = getattr(self, '_stateless_dash_app_instance', None) if not dateless_dash_app: dateless_dash_app = get_stateless_by_name(self.app_name) setattr(self, '_stateless_dash_app_instance', dateless_dash_app) return dateless_dash_app
0.005
async def parse_user_results(soup): """ Parse a page of user results :param soup: Bs4 Class object :return: A list of dictionaries containing a name and join date """ soup = list(soup.find_all('table', class_='stripe')[0].children)[1:] users = [] for item in soup: t_u = {'name': None, 'joined': None} t_u['name'] = list(item.children)[0].a.string t_u['joined'] = list(item.children)[1].string users.append(t_u) del t_u return users
0.001965
def stop_all(self, run_order=-1): """Runs stop method on all modules less than the passed-in run_order. Used when target is exporting itself mid-build, so we clean up state before committing run files etc. """ shutit_global.shutit_global_object.yield_to_draw() # sort them so they're stopped in reverse order for module_id in self.module_ids(rev=True): shutit_module_obj = self.shutit_map[module_id] if run_order == -1 or shutit_module_obj.run_order <= run_order: if self.is_installed(shutit_module_obj): if not shutit_module_obj.stop(self): self.fail('failed to stop: ' + module_id, shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').shutit_pexpect_child)
0.020921
def list_stack(profile=None): ''' Return a list of available stack (heat stack-list) profile Profile to use CLI Example: .. code-block:: bash salt '*' heat.list_stack profile=openstack1 ''' ret = {} h_client = _auth(profile) for stack in h_client.stacks.list(): links = {} for link in stack.links: links[link['rel']] = link['href'] ret[stack.stack_name] = { 'status': stack.stack_status, 'id': stack.id, 'name': stack.stack_name, 'creation': stack.creation_time, 'owner': stack.stack_owner, 'reason': stack.stack_status_reason, 'links': links, } return ret
0.001344
def opendocs(where='index', how='default'): ''' Rebuild documentation and opens it in your browser. Use the first argument to specify how it should be opened: `d` or `default`: Open in new tab or new window, using the default method of your browser. `t` or `tab`: Open documentation in new tab. `n`, `w` or `window`: Open documentation in new window. ''' import webbrowser docs_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'docs') index = os.path.join(docs_dir, '_build/html/%s.html' % where) builddocs('html') url = 'file://%s' % os.path.abspath(index) if how in ('d', 'default'): webbrowser.open(url) elif how in ('t', 'tab'): webbrowser.open_new_tab(url) elif how in ('n', 'w', 'window'): webbrowser.open_new(url)
0.001164
def __is_control_flow(self): """ Private method to tell if the instruction pointed to by the program counter is a control flow instruction. Currently only works for x86 and amd64 architectures. """ jump_instructions = ( 'jmp', 'jecxz', 'jcxz', 'ja', 'jnbe', 'jae', 'jnb', 'jb', 'jnae', 'jbe', 'jna', 'jc', 'je', 'jz', 'jnc', 'jne', 'jnz', 'jnp', 'jpo', 'jp', 'jpe', 'jg', 'jnle', 'jge', 'jnl', 'jl', 'jnge', 'jle', 'jng', 'jno', 'jns', 'jo', 'js' ) call_instructions = ( 'call', 'ret', 'retn' ) loop_instructions = ( 'loop', 'loopz', 'loopnz', 'loope', 'loopne' ) control_flow_instructions = call_instructions + loop_instructions + \ jump_instructions isControlFlow = False instruction = None if self.pc is not None and self.faultDisasm: for disasm in self.faultDisasm: if disasm[0] == self.pc: instruction = disasm[2].lower().strip() break if instruction: for x in control_flow_instructions: if x in instruction: isControlFlow = True break return isControlFlow
0.005376
def process_objects(kls): """ Applies default Meta properties. """ # first add a Meta object if not exists if 'Meta' not in kls.__dict__: kls.Meta = type('Meta', (object,), {}) if 'unique_together' not in kls.Meta.__dict__: kls.Meta.unique_together = [] # set verbose_name(s) if not already set if 'verbose_name' not in kls.Meta.__dict__: kls.Meta.verbose_name = kls.__name__ if 'verbose_name_plural' not in kls.Meta.__dict__: kls.Meta.verbose_name_plural = kls.Meta.verbose_name + 's'
0.003306
def end(self): """Close the V interface. Args:: No argument Returns:: None C library equivalent : Vend """ # Note: Vend is just a macro; use 'Vfinish' instead # Note also the the same C function is used to end # the VS interface _checkErr('vend', _C.Vfinish(self._hdf_inst._id), "cannot terminate V interface") self._hdf_inst = None
0.004032
def bucket(self, bucket_name, user_project=None): """Factory constructor for bucket object. .. note:: This will not make an HTTP request; it simply instantiates a bucket object owned by this client. :type bucket_name: str :param bucket_name: The name of the bucket to be instantiated. :type user_project: str :param user_project: (Optional) the project ID to be billed for API requests made via the bucket. :rtype: :class:`google.cloud.storage.bucket.Bucket` :returns: The bucket object created. """ return Bucket(client=self, name=bucket_name, user_project=user_project)
0.002841
def clear(self): """ Remove all cache entries. """ db = sqlite3.connect(self.path) c = db.cursor() c.execute("DELETE FROM dirhashcache") db.commit() db.close()
0.008969
def sum(self, values, axis=0, dtype=None): """compute the sum over each group Parameters ---------- values : array_like, [keys, ...] values to sum per group axis : int, optional alternative reduction axis for values dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, self.reduce(values, axis=axis, dtype=dtype)
0.0033
def _add_namespaces(self): """ Add namespaces to NIDM document. """ self.doc.add_namespace(NIDM) self.doc.add_namespace(NIIRI) self.doc.add_namespace(CRYPTO) self.doc.add_namespace(DCT) self.doc.add_namespace(DC) self.doc.add_namespace(NFO) self.doc.add_namespace(OBO) self.doc.add_namespace(SCR) self.doc.add_namespace(NIF)
0.004762
def is_valid(arxiv_id): """ Check that a given arXiv ID is a valid one. :param arxiv_id: The arXiv ID to be checked. :returns: Boolean indicating whether the arXiv ID is valid or not. >>> is_valid('1506.06690') True >>> is_valid('1506.06690v1') True >>> is_valid('arXiv:1506.06690') True >>> is_valid('arXiv:1506.06690v1') True >>> is_valid('arxiv:1506.06690') True >>> is_valid('arxiv:1506.06690v1') True >>> is_valid('math.GT/0309136') True >>> is_valid('abcdf') False >>> is_valid('bar1506.06690foo') False >>> is_valid('mare.GG/0309136') False """ match = REGEX.match(arxiv_id) return (match is not None) and (match.group(0) == arxiv_id)
0.002628
def run(xmin, ymin, xmax, ymax, step, range_, range_x, range_y, t): pt = zeros((range_x, range_y, 3)) "omp parallel for private(i,j,k,tmp)" for i in xrange(range_x): for j in xrange(range_y): pt[i,j,0], pt[i,j,1] = (xmin+step*i)*180/math.pi, (ymin+step*j)*180/math.pi for k in xrange(t.shape[0]): tmp = 6368.* math.acos( math.cos(xmin+step*i)*math.cos( t[k,0] ) * math.cos((ymin+step*j)-t[k,1])+ math.sin(xmin+step*i)*math.sin(t[k,0])) if tmp < range_: pt[i,j,2]+= t[k,2] / (1+tmp) return pt
0.033784
def transform_metadata(blob): """ Transforms metadata types about channels / users / bots / etc. into a dict rather than a list in order to enable faster lookup. """ o = {} for e in blob: i = e[u'id'] o[i] = e return o
0.043478
def detach(self, attachments): """Remove an attachment, or a list of attachments, from this item. If the item has already been saved, the attachments will be deleted on the server immediately. If the item has not yet been saved, the attachments will simply not be created on the server the item is saved. Removing attachments from an existing item will update the changekey of the item. """ if not is_iterable(attachments, generators_allowed=True): attachments = [attachments] for a in attachments: if a.parent_item is not self: raise ValueError('Attachment does not belong to this item') if self.id: # Item is already created. Detach the attachment server-side now a.detach() if a in self.attachments: self.attachments.remove(a)
0.006674
def search(self, paths_rows=None, lat=None, lon=None, address=None, start_date=None, end_date=None, cloud_min=None, cloud_max=None, limit=1, geojson=False): """ The main method of Search class. It searches Development Seed's Landsat API. :param paths_rows: A string in this format: "003,003,004,004". Must be in pairs and separated by comma. :type paths_rows: String :param lat: The latitude :type lat: String, float, integer :param lon: The The longitude :type lon: String, float, integer :param address: The address :type address: String :param start_date: Date string. format: YYYY-MM-DD :type start_date: String :param end_date: date string. format: YYYY-MM-DD :type end_date: String :param cloud_min: float specifying the minimum percentage. e.g. 4.3 :type cloud_min: float :param cloud_max: float specifying the maximum percentage. e.g. 78.9 :type cloud_max: float :param limit: integer specigying the maximum results return. :type limit: integer :param geojson: boolean specifying whether to return a geojson object :type geojson: boolean :returns: dict :example: >>> search = Search() >>> search('003,003', '2014-01-01', '2014-06-01') >>> { 'status': u'SUCCESS', 'total_returned': 1, 'total': 1, 'limit': 1 'results': [ { 'sat_type': u'L8', 'sceneID': u'LC80030032014142LGN00', 'date': u'2014-05-22', 'path': u'003', 'thumbnail': u'http://....../landsat_8/2014/003/003/LC80030032014142LGN00.jpg', 'cloud': 33.36, 'row': u'003 } ] } """ search_string = self.query_builder(paths_rows, lat, lon, address, start_date, end_date, cloud_min, cloud_max) # Have to manually build the URI to bypass requests URI encoding # The api server doesn't accept encoded URIs r = requests.get('%s?search=%s&limit=%s' % (self.api_url, search_string, limit)) r_dict = json.loads(r.text) result = {} if 'error' in r_dict: result['status'] = u'error' result['code'] = r_dict['error']['code'] result['message'] = r_dict['error']['message'] elif 'meta' in r_dict: if geojson: result = { 'type': 'FeatureCollection', 'features': [] } for r in r_dict['results']: feature = { 'type': 'Feature', 'properties': { 'sceneID': r['sceneID'], 'row': three_digit(r['row']), 'path': three_digit(r['path']), 'thumbnail': r['browseURL'], 'date': r['acquisitionDate'], 'cloud': r['cloud_coverage'] }, 'geometry': { 'type': 'Polygon', 'coordinates': [ [ [r['upperLeftCornerLongitude'], r['upperLeftCornerLatitude']], [r['lowerLeftCornerLongitude'], r['lowerLeftCornerLatitude']], [r['lowerRightCornerLongitude'], r['lowerRightCornerLatitude']], [r['upperRightCornerLongitude'], r['upperRightCornerLatitude']], [r['upperLeftCornerLongitude'], r['upperLeftCornerLatitude']] ] ] } } result['features'].append(feature) else: result['status'] = u'SUCCESS' result['total'] = r_dict['meta']['found'] result['limit'] = r_dict['meta']['limit'] result['total_returned'] = len(r_dict['results']) result['results'] = [{'sceneID': i['sceneID'], 'sat_type': u'L8', 'path': three_digit(i['path']), 'row': three_digit(i['row']), 'thumbnail': i['browseURL'], 'date': i['acquisitionDate'], 'cloud': i['cloud_coverage']} for i in r_dict['results']] return result
0.002669
def validate_cmd_response_nonce(got, used): """ Check that the returned nonce matches nonce used in request. A request nonce of 000000000000 means the HSM should generate a nonce internally though, so if 'used' is all zeros we actually check that 'got' does NOT match 'used'. """ if used == '000000000000'.decode('hex'): if got == used: raise(pyhsm.exception.YHSM_Error("Bad nonce in response (got %s, expected HSM generated nonce)" \ % (got.encode('hex')))) return got return validate_cmd_response_str('nonce', got, used)
0.009554
def format_version(version, build_number=None, build_tag=BUILD_TAG): """ Format a version string for use in packaging. >>> format_version([0,3,5]) '0.3.5' >>> format_version([8, 8, 9], 23676) '8.8.9-jenkins-23676' >>> format_version([8, 8, 9], 23676, 'koekjes') '8.8.9-koekjes-23676' """ formatted_version = ".".join(map(str, version)) if build_number is not None: return "{formatted_version}-{build_tag}-{build_number}".format(**locals()) return formatted_version
0.007533
def get_unique_scan_parameter_combinations(meta_data_array, scan_parameters=None, scan_parameter_columns_only=False): '''Takes the numpy meta data array and returns the first rows with unique combinations of different scan parameter values for selected scan parameters. If selected columns only is true, the returned histogram only contains the selected columns. Parameters ---------- meta_data_array : numpy.ndarray scan_parameters : list of string, None Scan parameter names taken. If None all are used. selected_columns_only : bool Returns ------- numpy.Histogram ''' try: last_not_parameter_column = meta_data_array.dtype.names.index('error_code') # for interpreted meta_data except ValueError: last_not_parameter_column = meta_data_array.dtype.names.index('error') # for raw data file meta_data if last_not_parameter_column == len(meta_data_array.dtype.names) - 1: # no meta_data found return if scan_parameters is None: return unique_row(meta_data_array, use_columns=range(4, len(meta_data_array.dtype.names)), selected_columns_only=scan_parameter_columns_only) else: use_columns = [] for scan_parameter in scan_parameters: try: use_columns.append(meta_data_array.dtype.names.index(scan_parameter)) except ValueError: logging.error('No scan parameter ' + scan_parameter + ' found') raise RuntimeError('Scan parameter not found') return unique_row(meta_data_array, use_columns=use_columns, selected_columns_only=scan_parameter_columns_only)
0.006042
def api_version_elb_backend(*args, **kwargs): """ ELB and ELBV2 (Classic and Application load balancers) use the same hostname and url space. To differentiate them we must read the `Version` parameter out of the url-encoded request body. TODO: There has _got_ to be a better way to do this. Please help us think of one. """ request = args[0] if hasattr(request, 'values'): # boto3 version = request.values.get('Version') elif isinstance(request, AWSPreparedRequest): # boto in-memory version = parse_qs(request.body).get('Version')[0] else: # boto in server mode request.parse_request() version = request.querystring.get('Version')[0] if '2012-06-01' == version: return ELBResponse.dispatch(*args, **kwargs) elif '2015-12-01' == version: return ELBV2Response.dispatch(*args, **kwargs) else: raise Exception("Unknown ELB API version: {}".format(version))
0.001009
def update_priority(self, tree_idx_list, priority_list): """ Update priorities of the elements in the tree """ for tree_idx, priority, segment_tree in zip(tree_idx_list, priority_list, self.segment_trees): segment_tree.update(tree_idx, priority)
0.010989
def query_framebuffer(self, screen_id): """Queries the graphics updates targets for a screen. in screen_id of type int return framebuffer of type :class:`IFramebuffer` """ if not isinstance(screen_id, baseinteger): raise TypeError("screen_id can only be an instance of type baseinteger") framebuffer = self._call("queryFramebuffer", in_p=[screen_id]) framebuffer = IFramebuffer(framebuffer) return framebuffer
0.007859
def log_value(self, name, value, step=None): """Log new value for given name on given step. Args: name (str): name of the variable (it will be converted to a valid tensorflow summary name). value (float): this is a real number to be logged as a scalar. step (int): non-negative integer used for visualization: you can log several different variables on one step, but should not log different values of the same variable on the same step (this is not checked). """ if isinstance(value, six.string_types): raise TypeError('"value" should be a number, got {}' .format(type(value))) value = float(value) self._check_step(step) tf_name = self._ensure_tf_name(name) summary = self._scalar_summary(tf_name, value, step) self._log_summary(tf_name, summary, value, step=step)
0.002039
def get_credentials(options, environment): """ Get credentials or prompt for them from options """ if options['--username'] or options['--auth']: if not options['--username']: options['<username>'] = lib.prompt("Please enter the username for %s..." % environment) if not options['--password']: options['<password>'] = lib.prompt("Please enter the password for %s..." % environment, secret=True) return options
0.006508
def set_environment(self, environment): """Set the environment for all nodes.""" todo = deque([self]) while todo: node = todo.popleft() node.environment = environment todo.extend(node.iter_child_nodes()) return self
0.007067
def delete_sql(table, filter): ''' >>> delete_sql('tbl', {'foo': 10, 'bar': 'baz'}) ('DELETE FROM tbl WHERE bar=$1 AND foo=$2', ['baz', 10]) ''' keys, values = _split_dict(filter) where = _pairs(keys) sql = 'DELETE FROM {} WHERE {}'.format(table, where) return sql, values
0.003289
def swagger_ui_script_template(request, **kwargs): """ :param request: :return: Generates the <script> code that bootstraps Swagger UI, it will be injected into index template """ swagger_spec_url = request.route_url('cornice_swagger.open_api_path') template = pkg_resources.resource_string( 'cornice_swagger', 'templates/index_script_template.html' ).decode('utf8') return Template(template).safe_substitute( swagger_spec_url=swagger_spec_url, )
0.001942
def get(self, hook_id): """Get a webhook.""" path = '/'.join(['notification', 'webhook', hook_id]) return self.rachio.get(path)
0.013245
def record_to_objects(self): """Create config records to match the file metadata""" from ..util import AttrDict fr = self.record contents = fr.unpacked_contents if not contents: return ad = AttrDict(contents) # Get time that filessystem was synchronized to the File record. # Maybe use this to avoid overwriting configs that changed by bundle program. # fs_sync_time = self._dataset.config.sync[self.file_const][self.file_to_record] self._dataset.config.metadata.set(ad) self._dataset._database.commit() return ad
0.007937
def _set_m2ms(self, old_m2ms): """ Creates the same m2m relationships that the old object had. """ for k, v in old_m2ms.items(): if v: setattr(self, k, v)
0.008969
def _set_avg_session_metrics(session_group): """Sets the metrics for the group to be the average of its sessions. The resulting session group metrics consist of the union of metrics across the group's sessions. The value of each session group metric is the average of that metric values across the sessions in the group. The 'step' and 'wall_time_secs' fields of the resulting MetricValue field in the session group are populated with the corresponding averages (truncated for 'step') as well. Args: session_group: A SessionGroup protobuffer. """ assert session_group.sessions, 'SessionGroup cannot be empty.' # Algorithm: Iterate over all (session, metric) pairs and maintain a # dict from _MetricIdentifier to _MetricStats objects. # Then use the final dict state to compute the average for each metric. metric_stats = collections.defaultdict(_MetricStats) for session in session_group.sessions: for metric_value in session.metric_values: metric_name = _MetricIdentifier(group=metric_value.name.group, tag=metric_value.name.tag) stats = metric_stats[metric_name] stats.total += metric_value.value stats.count += 1 stats.total_step += metric_value.training_step stats.total_wall_time_secs += metric_value.wall_time_secs del session_group.metric_values[:] for (metric_name, stats) in six.iteritems(metric_stats): session_group.metric_values.add( name=api_pb2.MetricName(group=metric_name.group, tag=metric_name.tag), value=float(stats.total)/float(stats.count), training_step=stats.total_step // stats.count, wall_time_secs=stats.total_wall_time_secs / stats.count)
0.009302
def tuple_type_compare(types0, types1): """doufo.tuple_type_compare: compare two types if `types0` is 'bigger' than `types1`, return negative (<0); otherwise, return positive (>0). Here 'bigger' is defined by whether they are 'parent and child', or ituitively bigger Args: types0 (`type`): types0 types1 (`type`): types1 Returns: return (`int`): comparison results Raises: """ compares = [single_type_compare(types0[0], types1[0]), single_type_compare(types0[1], types1[1])] if compares[0] != 0: return compares[0] if compares[1] != 0: return compares[1] if types0[0] is types1[0] and types0[1] is types1[1]: return 0 return hash(types1) - hash(types0)
0.012469
def _shellcomplete(cli, prog_name, complete_var=None): """Internal handler for the bash completion support. Parameters ---------- cli : click.Command The main click Command of the program prog_name : str The program name on the command line complete_var : str The environment variable name used to control the completion behavior (Default value = None) """ if complete_var is None: complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper() complete_instr = os.environ.get(complete_var) if not complete_instr: return if complete_instr == 'source': echo(get_code(prog_name=prog_name, env_name=complete_var)) elif complete_instr == 'source-bash': echo(get_code('bash', prog_name, complete_var)) elif complete_instr == 'source-fish': echo(get_code('fish', prog_name, complete_var)) elif complete_instr == 'source-powershell': echo(get_code('powershell', prog_name, complete_var)) elif complete_instr == 'source-zsh': echo(get_code('zsh', prog_name, complete_var)) elif complete_instr in ['complete', 'complete-bash']: # keep 'complete' for bash for backward compatibility do_bash_complete(cli, prog_name) elif complete_instr == 'complete-fish': do_fish_complete(cli, prog_name) elif complete_instr == 'complete-powershell': do_powershell_complete(cli, prog_name) elif complete_instr == 'complete-zsh': do_zsh_complete(cli, prog_name) elif complete_instr == 'install': shell, path = install(prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-bash': shell, path = install(shell='bash', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-fish': shell, path = install(shell='fish', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-zsh': shell, path = install(shell='zsh', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-powershell': shell, path = install(shell='powershell', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) sys.exit()
0.002362
def accept_key(pki_dir, pub, id_): ''' If the master config was available then we will have a pki_dir key in the opts directory, this method places the pub key in the accepted keys dir and removes it from the unaccepted keys dir if that is the case. ''' for key_dir in 'minions', 'minions_pre', 'minions_rejected': key_path = os.path.join(pki_dir, key_dir) if not os.path.exists(key_path): os.makedirs(key_path) key = os.path.join(pki_dir, 'minions', id_) with salt.utils.files.fopen(key, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(pub)) oldkey = os.path.join(pki_dir, 'minions_pre', id_) if os.path.isfile(oldkey): with salt.utils.files.fopen(oldkey) as fp_: if fp_.read() == pub: os.remove(oldkey)
0.001214
def get_declared_items(self): """ Override to do it manually """ for k, v in super(AndroidListView, self).get_declared_items(): if k == 'layout': yield k, v break
0.012552
def incrby(self, key, increment): """Increments the number stored at key by increment. If the key does not exist, it is set to 0 before performing the operation. An error is returned if the key contains a value of the wrong type or contains a string that can not be represented as integer. This operation is limited to 64 bit signed integers. See :meth:`~tredis.RedisClient.incr` for extra information on increment/decrement operations. .. versionadded:: 0.2.0 .. note:: **Time complexity**: ``O(1)`` :param key: The key to increment :type key: :class:`str`, :class:`bytes` :param int increment: The amount to increment by :returns: The value of key after the increment :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'INCRBY', key, ascii(increment)])
0.002153
def _parse_contract(self, player_info): """ Parse the player's contract. Depending on the player's contract status, a contract table is located at the bottom of the stats page and includes player wages by season. If found, create a dictionary housing the wages by season. Parameters ---------- player_info : PyQuery object A PyQuery object containing the HTML from the player's stats page. """ tables = player_info('table').items() for table in tables: id_attr = table.attr('id') if id_attr: if id_attr.startswith('contracts_'): years = self._parse_contract_headers(table) wages = self._parse_contract_wages(table) contract = self._combine_contract(years, wages) # If the contract is empty, the player likely doesn't have # a contract and should have a value of None instead. if contract == {}: contract = None setattr(self, '_contract', contract) break
0.001696
def get_state(self): """ Return the sampler and step methods current state in order to restart sampling at a later time. """ self.step_methods = set() for s in self.stochastics: self.step_methods |= set(self.step_method_dict[s]) state = Sampler.get_state(self) state['step_methods'] = {} # The state of each StepMethod. for sm in self.step_methods: state['step_methods'][sm._id] = sm.current_state().copy() return state
0.003738
def execute_catch(c, sql, vars=None): """Run a query, but ignore any errors. For error recovery paths where the error handler should not raise another.""" try: c.execute(sql, vars) except Exception as err: cmd = sql.split(' ', 1)[0] log.error("Error executing %s: %s", cmd, err)
0.006369
def safe_decode(text, incoming=None, errors='strict'): """Decodes incoming text/bytes string using `incoming` if they're not already unicode. This function was copied from novaclient.openstack.strutils :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a unicode `incoming` encoded representation of it. :raises TypeError: If text is not an instance of str """ if not isinstance(text, (six.string_types, six.binary_type)): raise TypeError("%s can't be decoded" % type(text)) if isinstance(text, six.text_type): return text if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) try: return text.decode(incoming, errors) except UnicodeDecodeError: # Note(flaper87) If we get here, it means that # sys.stdin.encoding / sys.getdefaultencoding # didn't return a suitable encoding to decode # text. This happens mostly when global LANG # var is not set correctly and there's no # default encoding. In this case, most likely # python will use ASCII or ANSI encoders as # default encodings but they won't be capable # of decoding non-ASCII characters. # # Also, UTF-8 is being used since it's an ASCII # extension. return text.decode('utf-8', errors)
0.000657
def start(self): """Start the sensor. """ if rospy.get_name() == '/unnamed': raise ValueError('Weight sensor must be run inside a ros node!') self._weight_subscriber = rospy.Subscriber('weight_sensor/weights', Float32MultiArray, self._weights_callback) self._running = True
0.009231
def parse_frame(self, buf: bytes) -> List[Tuple[bool, Optional[int], bytearray, Optional[bool]]]: """Return the next frame from the socket.""" frames = [] if self._tail: buf, self._tail = self._tail + buf, b'' start_pos = 0 buf_length = len(buf) while True: # read header if self._state == WSParserState.READ_HEADER: if buf_length - start_pos >= 2: data = buf[start_pos:start_pos+2] start_pos += 2 first_byte, second_byte = data fin = (first_byte >> 7) & 1 rsv1 = (first_byte >> 6) & 1 rsv2 = (first_byte >> 5) & 1 rsv3 = (first_byte >> 4) & 1 opcode = first_byte & 0xf # frame-fin = %x0 ; more frames of this message follow # / %x1 ; final frame of this message # frame-rsv1 = %x0 ; # 1 bit, MUST be 0 unless negotiated otherwise # frame-rsv2 = %x0 ; # 1 bit, MUST be 0 unless negotiated otherwise # frame-rsv3 = %x0 ; # 1 bit, MUST be 0 unless negotiated otherwise # # Remove rsv1 from this test for deflate development if rsv2 or rsv3 or (rsv1 and not self._compress): raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, 'Received frame with non-zero reserved bits') if opcode > 0x7 and fin == 0: raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, 'Received fragmented control frame') has_mask = (second_byte >> 7) & 1 length = second_byte & 0x7f # Control frames MUST have a payload # length of 125 bytes or less if opcode > 0x7 and length > 125: raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, 'Control frame payload cannot be ' 'larger than 125 bytes') # Set compress status if last package is FIN # OR set compress status if this is first fragment # Raise error if not first fragment with rsv1 = 0x1 if self._frame_fin or self._compressed is None: self._compressed = True if rsv1 else False elif rsv1: raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, 'Received frame with non-zero reserved bits') self._frame_fin = bool(fin) self._frame_opcode = opcode self._has_mask = bool(has_mask) self._payload_length_flag = length self._state = WSParserState.READ_PAYLOAD_LENGTH else: break # read payload length if self._state == WSParserState.READ_PAYLOAD_LENGTH: length = self._payload_length_flag if length == 126: if buf_length - start_pos >= 2: data = buf[start_pos:start_pos+2] start_pos += 2 length = UNPACK_LEN2(data)[0] self._payload_length = length self._state = ( WSParserState.READ_PAYLOAD_MASK if self._has_mask else WSParserState.READ_PAYLOAD) else: break elif length > 126: if buf_length - start_pos >= 8: data = buf[start_pos:start_pos+8] start_pos += 8 length = UNPACK_LEN3(data)[0] self._payload_length = length self._state = ( WSParserState.READ_PAYLOAD_MASK if self._has_mask else WSParserState.READ_PAYLOAD) else: break else: self._payload_length = length self._state = ( WSParserState.READ_PAYLOAD_MASK if self._has_mask else WSParserState.READ_PAYLOAD) # read payload mask if self._state == WSParserState.READ_PAYLOAD_MASK: if buf_length - start_pos >= 4: self._frame_mask = buf[start_pos:start_pos+4] start_pos += 4 self._state = WSParserState.READ_PAYLOAD else: break if self._state == WSParserState.READ_PAYLOAD: length = self._payload_length payload = self._frame_payload chunk_len = buf_length - start_pos if length >= chunk_len: self._payload_length = length - chunk_len payload.extend(buf[start_pos:]) start_pos = buf_length else: self._payload_length = 0 payload.extend(buf[start_pos:start_pos+length]) start_pos = start_pos + length if self._payload_length == 0: if self._has_mask: assert self._frame_mask is not None _websocket_mask(self._frame_mask, payload) frames.append(( self._frame_fin, self._frame_opcode, payload, self._compressed)) self._frame_payload = bytearray() self._state = WSParserState.READ_HEADER else: break self._tail = buf[start_pos:] return frames
0.000623
def plot(self, **kwargs): """Plot xvg file data. The first column of the data is always taken as the abscissa X. Additional columns are plotted as ordinates Y1, Y2, ... In the special case that there is only a single column then this column is plotted against the index, i.e. (N, Y). :Keywords: *columns* : list Select the columns of the data to be plotted; the list is used as a numpy.array extended slice. The default is to use all columns. Columns are selected *after* a transform. *transform* : function function ``transform(array) -> array`` which transforms the original array; must return a 2D numpy array of shape [X, Y1, Y2, ...] where X, Y1, ... are column vectors. By default the transformation is the identity [``lambda x: x``]. *maxpoints* : int limit the total number of data points; matplotlib has issues processing png files with >100,000 points and pdfs take forever to display. Set to ``None`` if really all data should be displayed. At the moment we simply decimate the data at regular intervals. [10000] *method* method to decimate the data to *maxpoints*, see :meth:`XVG.decimate` for details *color* single color (used for all plots); sequence of colors (will be repeated as necessary); or a matplotlib colormap (e.g. "jet", see :mod:`matplotlib.cm`). The default is to use the :attr:`XVG.default_color_cycle`. *ax* plot into given axes or create new one if ``None`` [``None``] *kwargs* All other keyword arguments are passed on to :func:`matplotlib.pyplot.plot`. :Returns: *ax* axes instance """ columns = kwargs.pop('columns', Ellipsis) # slice for everything maxpoints = kwargs.pop('maxpoints', self.maxpoints_default) transform = kwargs.pop('transform', lambda x: x) # default is identity transformation method = kwargs.pop('method', "mean") ax = kwargs.pop('ax', None) if columns is Ellipsis or columns is None: columns = numpy.arange(self.array.shape[0]) if len(columns) == 0: raise MissingDataError("plot() needs at least one column of data") if len(self.array.shape) == 1 or self.array.shape[0] == 1: # special case: plot against index; plot would do this automatically but # we'll just produce our own xdata and pretend that this was X all along a = numpy.ravel(self.array) X = numpy.arange(len(a)) a = numpy.vstack((X, a)) columns = [0] + [c+1 for c in columns] else: a = self.array color = kwargs.pop('color', self.default_color_cycle) try: cmap = matplotlib.cm.get_cmap(color) colors = cmap(matplotlib.colors.Normalize()(numpy.arange(len(columns[1:]), dtype=float))) except TypeError: colors = cycle(utilities.asiterable(color)) if ax is None: ax = plt.gca() # (decimate/smooth o slice o transform)(array) a = self.decimate(method, numpy.asarray(transform(a))[columns], maxpoints=maxpoints) # now deal with infs, nans etc AFTER all transformations (needed for plotting across inf/nan) ma = numpy.ma.MaskedArray(a, mask=numpy.logical_not(numpy.isfinite(a))) # finally plot (each column separately to catch empty sets) for column, color in zip(range(1,len(columns)), colors): if len(ma[column]) == 0: warnings.warn("No data to plot for column {column:d}".format(**vars()), category=MissingDataWarning) kwargs['color'] = color ax.plot(ma[0], ma[column], **kwargs) # plot all other columns in parallel return ax
0.004155
def set_theme(self, theme_name): """ Set new theme to use. Uses a direct tk call to allow usage of the themes supplied with this package. :param theme_name: name of theme to activate """ package = theme_name if theme_name not in self.PACKAGES else self.PACKAGES[theme_name] self.tk.call("package", "require", "ttk::theme::{}".format(package)) self.tk.call("ttk::setTheme", theme_name)
0.006682
def _agent_is_gene(agent, specific_only): """Returns whether an agent is for a gene. Parameters ---------- agent: Agent The agent to evaluate specific_only : Optional[bool] If True, only elementary genes/proteins evaluate as genes and families will be filtered out. If False, families are also included. Returns ------- is_gene: bool Whether the agent is a gene """ if not specific_only: if not(agent.db_refs.get('HGNC') or \ agent.db_refs.get('UP') or \ agent.db_refs.get('FPLX')): return False else: if not(agent.db_refs.get('HGNC') or \ agent.db_refs.get('UP')): return False return True
0.005284
def bbox_rot90(bbox, factor, rows, cols): """Rotates a bounding box by 90 degrees CCW (see np.rot90) Args: bbox (tuple): A tuple (x_min, y_min, x_max, y_max). factor (int): Number of CCW rotations. Must be in range [0;3] See np.rot90. rows (int): Image rows. cols (int): Image cols. """ if factor < 0 or factor > 3: raise ValueError('Parameter n must be in range [0;3]') x_min, y_min, x_max, y_max = bbox if factor == 1: bbox = [y_min, 1 - x_max, y_max, 1 - x_min] if factor == 2: bbox = [1 - x_max, 1 - y_max, 1 - x_min, 1 - y_min] if factor == 3: bbox = [1 - y_max, x_min, 1 - y_min, x_max] return bbox
0.002837
def _compare_frame_rankings(ref, est, transitive=False): '''Compute the number of ranking disagreements in two lists. Parameters ---------- ref : np.ndarray, shape=(n,) est : np.ndarray, shape=(n,) Reference and estimate ranked lists. `ref[i]` is the relevance score for point `i`. transitive : bool If true, all pairs of reference levels are compared. If false, only adjacent pairs of reference levels are compared. Returns ------- inversions : int The number of pairs of indices `i, j` where `ref[i] < ref[j]` but `est[i] >= est[j]`. normalizer : float The total number of pairs (i, j) under consideration. If transitive=True, then this is |{(i,j) : ref[i] < ref[j]}| If transitive=False, then this is |{i,j) : ref[i] +1 = ref[j]}| ''' idx = np.argsort(ref) ref_sorted = ref[idx] est_sorted = est[idx] # Find the break-points in ref_sorted levels, positions, counts = np.unique(ref_sorted, return_index=True, return_counts=True) positions = list(positions) positions.append(len(ref_sorted)) index = collections.defaultdict(lambda: slice(0)) ref_map = collections.defaultdict(lambda: 0) for level, cnt, start, end in zip(levels, counts, positions[:-1], positions[1:]): index[level] = slice(start, end) ref_map[level] = cnt # Now that we have values sorted, apply the inversion-counter to # pairs of reference values if transitive: level_pairs = itertools.combinations(levels, 2) else: level_pairs = [(i, i+1) for i in levels] level_pairs, lcounter = itertools.tee(level_pairs) normalizer = float(sum([ref_map[i] * ref_map[j] for (i, j) in lcounter])) if normalizer == 0: return 0, 0.0 inversions = 0 for level_1, level_2 in level_pairs: inversions += _count_inversions(est_sorted[index[level_1]], est_sorted[index[level_2]]) return inversions, float(normalizer)
0.000458
def make_config_file(guided=False): """ Options: --auto, --guided, --manual Places for the file: --inplace, --user """ config_path = _make_config_location(guided=guided) config_data = make_config_data(guided=guided) write_config_file(config_path, config_data)
0.00346
def _determine_unfiltered_package_names(self): """ Return a list of package names to be filtered base on the configuration file. """ # This plugin only processes packages, if the line in the packages # configuration contains a PEP440 specifier it will be processed by the # blacklist release filter. So we need to remove any packages that # are not applicable for this plugin. unfiltered_packages = set() try: lines = self.configuration["whitelist"]["packages"] package_lines = lines.split("\n") except KeyError: package_lines = [] for package_line in package_lines: package_line = package_line.strip() if not package_line or package_line.startswith("#"): continue unfiltered_packages.add(package_line) return list(unfiltered_packages)
0.002155
def dropout_no_scaling(x, keep_prob): """Like tf.nn.dropout, but does not scale up. Works on integers also. Args: x: a Tensor keep_prob: a floating point number Returns: Tensor of the same shape as x. """ if keep_prob == 1.0: return x mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob) return x * cast_like(mask, x)
0.014085
def parse_config_output(self, output): """ This method will parse a string containing FortiOS config and will load it into the current :class:`~pyFG.forticonfig.FortiConfig` object. Args: - **output** (string) - A string containing a supported version of FortiOS config """ regexp = re.compile('^(config |edit |set |end$|next$)(.*)') current_block = self if isinstance(output, py23_compat.string_types): output = output.splitlines() for line in output: if 'uuid' in line: continue if 'snmp-index' in line: continue line = line.strip() result = regexp.match(line) if result is not None: action = result.group(1).strip() data = result.group(2).strip() if action == 'config' or action == 'edit': data = data.replace('"', '') if data not in current_block.get_block_names(): config_block = FortiConfig(data, action, current_block) current_block[data] = config_block else: config_block = current_block[data] current_block = config_block elif action == 'end' or action == 'next': current_block = current_block.get_parent() elif action == 'set': split_data = data.split(' ') parameter = split_data[0] data = split_data[1:] current_block.set_param(parameter, ' '.join(data))
0.002367
def get_ir_reciprocal_mesh(self, mesh=(10, 10, 10), is_shift=(0, 0, 0)): """ k-point mesh of the Brillouin zone generated taken into account symmetry.The method returns the irreducible kpoints of the mesh and their weights Args: mesh (3x1 array): The number of kpoint for the mesh needed in each direction is_shift (3x1 array): Whether to shift the kpoint grid. (1, 1, 1) means all points are shifted by 0.5, 0.5, 0.5. Returns: A list of irreducible kpoints and their weights as a list of tuples [(ir_kpoint, weight)], with ir_kpoint given in fractional coordinates """ shift = np.array([1 if i else 0 for i in is_shift]) mapping, grid = spglib.get_ir_reciprocal_mesh( np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec) results = [] for i, count in zip(*np.unique(mapping, return_counts=True)): results.append(((grid[i] + shift * (0.5, 0.5, 0.5)) / mesh, count)) return results
0.001767
def connect(self, interface, event, handler): """Connect to a DBus signal. Returns subscription id (int).""" object_path = self.object_path return self.bus.connect(interface, event, object_path, handler)
0.008811
def get_program(self, program_path, controller=None): """ Find the program within this manifest. If key is found, and it contains a list, iterate over the list and return the program that matches the controller tag. NOTICE: program_path must have a leading slash. """ if not program_path or program_path[0] != '/': raise ValueError("program_path must be a full path with leading slash") items = program_path[1:].split('/') result = self for item in items: result = result[item] if hasattr(result, "lower"): # string redirect return self.get_program(result) elif type(result) is Manifest: return result.get_program('/') elif hasattr(result, 'append'): matching_blank = [] for program in result: if controller in program.controllers: return program if not program.controllers or not controller: # no exact matching controllers for this program. # Use the first controller with no matching_blank.append(program) if matching_blank: return matching_blank[0] else: raise ProgramNotFound("No matcning program for %s controller" % controller) return result
0.003516
def extract_attrs(x, n): """Extracts attributes from element list 'x' beginning at index 'n'. The elements encapsulating the attributes (typically a series of Str and Space elements) are removed from 'x'. Items before index 'n' are left unchanged. Returns the attributes in pandoc format. A ValueError is raised if attributes aren't found. An IndexError is raised if the index 'n' is out of range.""" # Check for the start of the attributes string if not (x[n]['t'] == 'Str' and x[n]['c'].startswith('{')): raise ValueError('Attributes not found.') # It starts with {, so this *may* be an attributes list. Search for where # the attributes end. Do not consider } in quoted elements. seq = [] # A sequence of saved values quotechar = None # Used to keep track of quotes in strings flag = False # Flags that an attributes list was found i = 0 # Initialization for i, v in enumerate(x[n:]): # Scan through the list if v and v['t'] == 'Str': # Scan for } outside of a quote for j, c in enumerate(v['c']): if c == quotechar: # This is an end quote quotechar = None elif c in ['"', "'"]: # This is an open quote quotechar = c elif c == '}' and quotechar is None: # The attributes end here # Split the string at the } and save the pieces head, tail = v['c'][:j+1], v['c'][j+1:] x[n+i] = copy.deepcopy(v) x[n+i]['c'] = tail v['c'] = head flag = True break seq.append(v) if flag: break if flag: # Attributes string was found, so process it # Delete empty and extracted elements if x[n+i]['t'] == 'Str' and not x[n+i]['c']: del x[n+i] del x[n:n+i] # Process the attrs attrstr = stringify(dollarfy(quotify(seq))).strip() attrs = PandocAttributes(attrstr, 'markdown').to_pandoc() # Remove extranneous quotes from kvs for i, (k, v) in enumerate(attrs[2]): # pylint: disable=unused-variable if v[0] == v[-1] == '"' or v[0] == "'" == v[-1] == "'": attrs[2][i][1] = attrs[2][i][1][1:-1] # We're done return attrs # Attributes not found raise ValueError('Attributes not found.')
0.000799
def find(self, state, recp): """WARNING: This function do not return partners currently being initialized.""" if recipient.IRecipient.providedBy(recp): agent_id = recipient.IRecipient(recp).key else: agent_id = recp desc = state.agent.get_descriptor() match = [x for x in desc.partners if x.recipient.key == agent_id] if len(match) == 0: return None elif len(match) > 1: raise FindPartnerError('More than one partner was matched by the ' 'recipient %r!. Matched: %r' % \ (recp, match, )) else: return match[0]
0.004219
def show_doc_from_name(mod_name, ft_name:str, doc_string:bool=True, arg_comments:dict={}, alt_doc_string:str=''): "Show documentation for `ft_name`, see `show_doc`." mod = import_mod(mod_name) splits = str.split(ft_name, '.') assert hasattr(mod, splits[0]), print(f"Module {mod_name} doesn't have a function named {splits[0]}.") elt = getattr(mod, splits[0]) for i,split in enumerate(splits[1:]): assert hasattr(elt, split), print(f"Class {'.'.join(splits[:i+1])} doesn't have a function named {split}.") elt = getattr(elt, split) show_doc(elt, doc_string, ft_name, arg_comments, alt_doc_string)
0.023474
def process(self, state, irsb=None, skip_stmts=0, last_stmt=99999999, whitelist=None, inline=False, force_addr=None, insn_bytes=None, size=None, num_inst=None, traceflags=0, thumb=False, extra_stop_points=None, opt_level=None, **kwargs): """ :param state: The state with which to execute :param irsb: The PyVEX IRSB object to use for execution. If not provided one will be lifted. :param skip_stmts: The number of statements to skip in processing :param last_stmt: Do not execute any statements after this statement :param whitelist: Only execute statements in this set :param inline: This is an inline execution. Do not bother copying the state. :param force_addr: Force execution to pretend that we're working at this concrete address :param thumb: Whether the block should be lifted in ARM's THUMB mode. :param extra_stop_points: An extra set of points at which to break basic blocks :param opt_level: The VEX optimization level to use. :param insn_bytes: A string of bytes to use for the block instead of the project. :param size: The maximum size of the block, in bytes. :param num_inst: The maximum number of instructions. :param traceflags: traceflags to be passed to VEX. (default: 0) :returns: A SimSuccessors object categorizing the block's successors """ if 'insn_text' in kwargs: if insn_bytes is not None: raise SimEngineError("You cannot provide both 'insn_bytes' and 'insn_text'!") insn_bytes = \ self.project.arch.asm(kwargs['insn_text'], addr=kwargs.get('addr', 0), thumb=thumb, as_bytes=True) if insn_bytes is None: raise AngrAssemblyError("Assembling failed. Please make sure keystone is installed, and the assembly" " string is correct.") return super(SimEngineVEX, self).process(state, irsb, skip_stmts=skip_stmts, last_stmt=last_stmt, whitelist=whitelist, inline=inline, force_addr=force_addr, insn_bytes=insn_bytes, size=size, num_inst=num_inst, traceflags=traceflags, thumb=thumb, extra_stop_points=extra_stop_points, opt_level=opt_level)
0.008814
def render_template(template): """ takes a template to render to and returns a function that takes an object to render the data for this template. If callable_or_dict is callable, it will be called with the request and any additional arguments to produce the template paramaters. This is useful for a view-like function that returns a dict-like object instead of an HttpResponse. Otherwise, callable_or_dict is used as the parameters for the rendered response. """ def outer_wrapper(callable_or_dict=None, statuscode=None, **kwargs): def wrapper(request, *args, **wrapper_kwargs): if callable(callable_or_dict): params = callable_or_dict(request, *args, **wrapper_kwargs) else: params = callable_or_dict # If we want to return some other response type we can, # that simply overrides the default behavior if params is None or isinstance(params, dict): resp = render(request, template, params, **kwargs) else: resp = params if statuscode: resp.status_code = statuscode return resp return wrapper return outer_wrapper
0.000793
def nl_list_del(obj): """https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L49. Positional arguments: obj -- nl_list_head class instance. """ obj.next.prev = obj.prev obj.prev.next_ = obj.next_
0.004149
def dicom_to_nifti(dicom_input, output_file=None): """ This is the main dicom to nifti conversion function for ge images. As input ge images are required. It will then determine the type of images and do the correct conversion :param output_file: filepath to the output nifti :param dicom_input: directory with dicom files for 1 scan """ assert common.is_siemens(dicom_input) if _is_4d(dicom_input): logger.info('Found sequence type: MOSAIC 4D') return _mosaic_4d_to_nifti(dicom_input, output_file) grouped_dicoms = _classic_get_grouped_dicoms(dicom_input) if _is_classic_4d(grouped_dicoms): logger.info('Found sequence type: CLASSIC 4D') return _classic_4d_to_nifti(grouped_dicoms, output_file) logger.info('Assuming anatomical data') return convert_generic.dicom_to_nifti(dicom_input, output_file)
0.002262
def ParseFileObject(self, parser_mediator, file_object): """Parses a plist file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ filename = parser_mediator.GetFilename() file_size = file_object.get_size() if file_size <= 0: raise errors.UnableToParseFile( 'File size: {0:d} bytes is less equal 0.'.format(file_size)) # 50MB is 10x larger than any plist seen to date. if file_size > 50000000: raise errors.UnableToParseFile( 'File size: {0:d} bytes is larger than 50 MB.'.format(file_size)) top_level_object = self.GetTopLevel(file_object) if not top_level_object: raise errors.UnableToParseFile( 'Unable to parse: {0:s} skipping.'.format(filename)) # TODO: add a parser filter. matching_plugin = None for plugin in self._plugins: try: plugin.UpdateChainAndProcess( parser_mediator, plist_name=filename, top_level=top_level_object) matching_plugin = plugin except errors.WrongPlistPlugin as exception: logger.debug('Wrong plugin: {0:s} for: {1:s}'.format( exception.args[0], exception.args[1])) if not matching_plugin and self._default_plugin: self._default_plugin.UpdateChainAndProcess( parser_mediator, plist_name=filename, top_level=top_level_object)
0.004447
def Cleanse(obj, encoding='utf-8'): """Makes Python object appropriate for JSON serialization. - Replaces instances of Infinity/-Infinity/NaN with strings. - Turns byte strings into unicode strings. - Turns sets into sorted lists. - Turns tuples into lists. Args: obj: Python data structure. encoding: Charset used to decode byte strings. Returns: Unicode JSON data structure. """ if isinstance(obj, int): return obj elif isinstance(obj, float): if obj == _INFINITY: return 'Infinity' elif obj == _NEGATIVE_INFINITY: return '-Infinity' elif math.isnan(obj): return 'NaN' else: return obj elif isinstance(obj, bytes): return tf.compat.as_text(obj, encoding) elif isinstance(obj, (list, tuple)): return [Cleanse(i, encoding) for i in obj] elif isinstance(obj, set): return [Cleanse(i, encoding) for i in sorted(obj)] elif isinstance(obj, dict): return {Cleanse(k, encoding): Cleanse(v, encoding) for k, v in obj.items()} else: return obj
0.012452
def get_dos(self): '''Find the total DOS shifted by the Fermi energy''' # find the dos file fildos = '' for f in self._files: with open(f, 'r') as fp: first_line = next(fp) if "E (eV)" in first_line and "Int dos(E)" in first_line: fildos = f ndoscol = len(next(fp).split())-2 # number of spin channels fp.close() break fp.close() if not fildos: return None # cannot find DOS # get the Fermi energy line = self._get_line('the Fermi energy is', self.outputf) efermi = float(line.split('is')[-1].split()[0]) # grab the DOS energy = [] ; dos = [] fp = open(fildos, 'r') next(fp) # comment line for line in fp: ls = line.split() energy.append(Scalar(value=float(ls[0])-efermi)) dos.append(Scalar(value=sum([float(i) for i in ls[1:1+ndoscol]]))) return Property(scalars=dos, units='number of states per unit cell', conditions=Value(name='energy', scalars=energy, units='eV'))
0.006832
def get_random_condcount(mode): """ HITs can be in one of three states: - jobs that are finished - jobs that are started but not finished - jobs that are never going to finish (user decided not to do it) Our count should be based on the first two, so we count any tasks finished or any tasks not finished that were started in the last cutoff_time minutes, as specified in the cutoff_time variable in the config file. Returns a tuple: (cond, condition) """ cutofftime = datetime.timedelta(minutes=-CONFIG.getint('Server Parameters', 'cutoff_time')) starttime = datetime.datetime.now() + cutofftime try: conditions = json.load(open(os.path.join(app.root_path, 'conditions.json'))) numconds = len(conditions.keys()) numcounts = 1 except IOError as e: numconds = CONFIG.getint('Task Parameters', 'num_conds') numcounts = CONFIG.getint('Task Parameters', 'num_counters') participants = Participant.query.\ filter(Participant.codeversion == \ CONFIG.get('Task Parameters', 'experiment_code_version')).\ filter(Participant.mode == mode).\ filter(or_(Participant.status == COMPLETED, Participant.status == CREDITED, Participant.status == SUBMITTED, Participant.status == BONUSED, Participant.beginhit > starttime)).all() counts = Counter() for cond in range(numconds): for counter in range(numcounts): counts[(cond, counter)] = 0 for participant in participants: condcount = (participant.cond, participant.counterbalance) if condcount in counts: counts[condcount] += 1 mincount = min(counts.values()) minima = [hsh for hsh, count in counts.iteritems() if count == mincount] chosen = choice(minima) #conds += [ 0 for _ in range(1000) ] #conds += [ 1 for _ in range(1000) ] app.logger.info("given %(a)s chose %(b)s" % {'a': counts, 'b': chosen}) return chosen
0.00282
def audio_set_format(self, format, rate, channels): '''Set decoded audio format. This only works in combination with L{audio_set_callbacks}(), and is mutually exclusive with L{audio_set_format_callbacks}(). @param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32"). @param rate: sample rate (expressed in Hz). @param channels: channels count. @version: LibVLC 2.0.0 or later. ''' return libvlc_audio_set_format(self, str_to_bytes(format), rate, channels)
0.007117
def create(self, configuration, name, description): """ Creates a data view from the search template and ml template given :param configuration: Information to construct the data view from (eg descriptors, datasets etc) :param name: Name of the data view :param description: Description for the data view :return: The data view id """ data = { "configuration": configuration, "name": name, "description": description } failure_message = "Dataview creation failed" result = self._get_success_json(self._post_json( 'v1/data_views', data, failure_message=failure_message)) data_view_id = result['data']['id'] return data_view_id
0.003614
def validate(cls, mapper_spec): """Validates mapper spec and all mapper parameters. Args: mapper_spec: The MapperSpec for this InputReader. Raises: BadReaderParamsError: required parameters are missing or invalid. """ if mapper_spec.input_reader_class() != cls: raise BadReaderParamsError("Input reader class mismatch") params = _get_params(mapper_spec) if cls.ENTITY_KIND_PARAM not in params: raise BadReaderParamsError("Missing mapper parameter 'entity_kind'") if cls.BATCH_SIZE_PARAM in params: try: batch_size = int(params[cls.BATCH_SIZE_PARAM]) if batch_size < 1: raise BadReaderParamsError("Bad batch size: %s" % batch_size) except ValueError, e: raise BadReaderParamsError("Bad batch size: %s" % e) if cls.NAMESPACE_PARAM in params: if not isinstance(params[cls.NAMESPACE_PARAM], (str, unicode, type(None))): raise BadReaderParamsError( "Expected a single namespace string") if cls.NAMESPACES_PARAM in params: raise BadReaderParamsError("Multiple namespaces are no longer supported") if cls.FILTERS_PARAM in params: filters = params[cls.FILTERS_PARAM] if not isinstance(filters, list): raise BadReaderParamsError("Expected list for filters parameter") for f in filters: if not isinstance(f, (tuple, list)): raise BadReaderParamsError("Filter should be a tuple or list: %s", f) if len(f) != 3: raise BadReaderParamsError("Filter should be a 3-tuple: %s", f) if not isinstance(f[0], basestring): raise BadReaderParamsError("First element should be string: %s", f) if f[1] != "=": raise BadReaderParamsError( "Only equality filters are supported: %s", f)
0.008143
def getLaplaceCovar(self): """ USES LAPLACE APPROXIMATION TO CALCULATE THE COVARIANCE MATRIX OF THE OPTIMIZED PARAMETERS """ assert self.init, 'GP not initialised' assert self.fast==False, 'Not supported for fast implementation' if self.cache['Sigma']==None: self.cache['Sigma'] = SP.linalg.inv(self.getHessian()) return self.cache['Sigma']
0.018868
def from_decimal(number, width=1): """ Takes a decimal and returns base91 char string. With optional parameter for fix with output """ text = [] if not isinstance(number, int_type): raise TypeError("Expected number to be int, got %s", type(number)) elif not isinstance(width, int_type): raise TypeError("Expected width to be int, got %s", type(number)) elif number < 0: raise ValueError("Expected number to be positive integer") elif number > 0: max_n = ceil(log(number) / log(91)) for n in _range(int(max_n), -1, -1): quotient, number = divmod(number, 91**n) text.append(chr(33 + quotient)) return "".join(text).lstrip('!').rjust(max(1, width), '!')
0.001321
def on_connect(self, connection): "Re-subscribe to any channels and patterns previously subscribed to" # NOTE: for python3, we can't pass bytestrings as keyword arguments # so we need to decode channel/pattern names back to unicode strings # before passing them to [p]subscribe. self.pending_unsubscribe_channels.clear() self.pending_unsubscribe_patterns.clear() if self.channels: channels = {} for k, v in iteritems(self.channels): channels[self.encoder.decode(k, force=True)] = v self.subscribe(**channels) if self.patterns: patterns = {} for k, v in iteritems(self.patterns): patterns[self.encoder.decode(k, force=True)] = v self.psubscribe(**patterns)
0.00243
def _load_config(self, path): """Return YAML values from given config file. @param path file to load """ try: with open(path) as f: values = yaml.safe_load(f) if isinstance(values, dict): return values else: raise yaml.YAMLError('Unable to parse/load {}'.format(path)) except(IOError, yaml.YAMLError) as e: if self.ignore_errors: return None else: raise e
0.005435
def address(self): """ IP Address using bacpypes Address format """ port = "" if self._port: port = ":{}".format(self._port) return Address( "{}/{}{}".format( self.interface.ip.compressed, self.interface.exploded.split("/")[-1], port, ) )
0.005291
def get_existing_pipelines(self): """Get existing pipeline configs for specific application. Returns: str: Pipeline config json """ url = "{0}/applications/{1}/pipelineConfigs".format(API_URL, self.app_name) resp = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT) assert resp.ok, 'Failed to lookup pipelines for {0}: {1}'.format(self.app_name, resp.text) return resp.json()
0.008715
def get_metadata(self, path, include_entities=False, **kwargs): """Return metadata found in JSON sidecars for the specified file. Args: path (str): Path to the file to get metadata for. include_entities (bool): If True, all available entities extracted from the filename (rather than JSON sidecars) are included in the returned metadata dictionary. kwargs (dict): Optional keyword arguments to pass onto get_nearest(). Returns: A dictionary of key/value pairs extracted from all of the target file's associated JSON sidecars. Notes: A dictionary containing metadata extracted from all matching .json files is returned. In cases where the same key is found in multiple files, the values in files closer to the input filename will take precedence, per the inheritance rules in the BIDS specification. """ f = self.get_file(path) # For querying efficiency, store metadata in the MetadataIndex cache self.metadata_index.index_file(f.path) if include_entities: entities = f.entities results = entities else: results = {} results.update(self.metadata_index.file_index[path]) return results
0.001463
def id(self): """获取用户id,就是网址最后那一部分. :return: 用户id :rtype: str """ return re.match(r'^.*/([^/]+)/$', self.url).group(1) \ if self.url is not None else ''
0.009756
def mark_checked(self, institute, case, user, link, unmark=False): """Mark a case as checked from an analysis point of view. Arguments: institute (dict): A Institute object case (dict): Case object user (dict): A User object link (str): The url to be used in the event unmark (bool): If case should ve unmarked Return: updated_case """ LOG.info("Updating checked status of {}" .format(case['display_name'])) status = 'not checked' if unmark else 'checked' self.create_event( institute=institute, case=case, user=user, link=link, category='case', verb='check_case', subject=status ) LOG.info("Updating {0}'s checked status {1}" .format(case['display_name'], status)) analysis_checked = False if unmark else True updated_case = self.case_collection.find_one_and_update( {'_id': case['_id']}, { '$set': {'analysis_checked': analysis_checked} }, return_document=pymongo.ReturnDocument.AFTER ) LOG.debug("Case updated") return updated_case
0.003754
def is_true(entity, prop, name): "bool: True if the value of a property is True." return is_not_empty(entity, prop, name) and name in entity._data and bool(getattr(entity, name))
0.010753
def is_syscall_addr(self, addr): """ Return whether or not the given address corresponds to a syscall implementation. """ if self.kernel_base is None or addr < self.kernel_base: return False addr -= self.kernel_base if addr % self.syscall_addr_alignment != 0: return False addr //= self.syscall_addr_alignment return addr <= self.unknown_syscall_number
0.006772
def validate_email(self, email): """ Validate the provided email address. The email address is first modified to match the RFC spec. Namely, the domain portion of the email is lowercased. Returns: The validated email address. Raises: serializers.ValidationError: If the serializer is bound and the provided email doesn't match the existing address. """ user, domain = email.rsplit("@", 1) email = "@".join([user, domain.lower()]) if self.instance and email and self.instance.email != email: raise serializers.ValidationError( _( "Existing emails may not be edited. Create a new one " "instead." ) ) return email
0.002336
def seek_file_end(file): '''Seek to the end of the file.''' try: file.seek(0, 2) except ValueError: # gzip files don't support seek from end while True: data = file.read(4096) if not data: break
0.003704
def distance_to_arc(alon, alat, aazimuth, plons, plats): """ Calculate a closest distance between a great circle arc and a point (or a collection of points). :param float alon, alat: Arc reference point longitude and latitude, in decimal degrees. :param azimuth: Arc azimuth (an angle between direction to a north and arc in clockwise direction), measured in a reference point, in decimal degrees. :param float plons, plats: Longitudes and latitudes of points to measure distance. Either scalar values or numpy arrays of decimal degrees. :returns: Distance in km, a scalar value or numpy array depending on ``plons`` and ``plats``. A distance is negative if the target point lies on the right hand side of the arc. Solves a spherical triangle formed by reference point, target point and a projection of target point to a reference great circle arc. """ azimuth_to_target = azimuth(alon, alat, plons, plats) distance_to_target = geodetic_distance(alon, alat, plons, plats) # find an angle between an arc and a great circle arc connecting # arc's reference point and a target point t_angle = (azimuth_to_target - aazimuth + 360) % 360 # in a spherical right triangle cosine of the angle of a cathetus # augmented to pi/2 is equal to sine of an opposite angle times # sine of hypotenuse, see # http://en.wikipedia.org/wiki/Spherical_trigonometry#Napier.27s_Pentagon angle = numpy.arccos( (numpy.sin(numpy.radians(t_angle)) * numpy.sin(distance_to_target / EARTH_RADIUS)) ) return (numpy.pi / 2 - angle) * EARTH_RADIUS
0.000593
def get_memberships(self): """Fetches all group memberships. Returns: dict: key: group name value: (array of users, array of groups) """ response = self._get_xml(self.rest_url + "/group/membership") if not response.ok: return None xmltree = etree.fromstring(response.content) memberships = {} for mg in xmltree.findall('membership'): # coerce values to unicode in a python 2 and 3 compatible way group = u'{}'.format(mg.get('group')) users = [u'{}'.format(u.get('name')) for u in mg.find('users').findall('user')] groups = [u'{}'.format(g.get('name')) for g in mg.find('groups').findall('group')] memberships[group] = {u'users': users, u'groups': groups} return memberships
0.004711
def delete_peer(self, peer_address, stale=False): """ Remove the Nomad server with given address from the Raft configuration. The return code signifies success or failure. https://www.nomadproject.io/docs/http/operator.html arguments: - peer_address, The address specifies the server to remove and is given as an IP:port optional arguments: - stale, (defaults to False), Specifies if the cluster should respond without an active leader. This is specified as a querystring parameter. returns: Boolean raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException """ params = {"address": peer_address, "stale": stale} return self.request("raft", "peer", params=params, method="delete").ok
0.006445
def frommatrix(cls, apart, dpart, init_matrix, **kwargs): """Create an instance of `Parallel3dAxisGeometry` using a matrix. This alternative constructor uses a matrix to rotate and translate the default configuration. It is most useful when the transformation to be applied is already given as a matrix. Parameters ---------- apart : 1-dim. `RectPartition` Partition of the parameter interval. dpart : 2-dim. `RectPartition` Partition of the detector parameter set. init_matrix : `array_like`, shape ``(3, 3)`` or ``(3, 4)``, optional Transformation matrix whose left ``(3, 3)`` block is multiplied with the default ``det_pos_init`` and ``det_axes_init`` to determine the new vectors. If present, the fourth column acts as a translation after the initial transformation. The resulting ``det_axes_init`` will be normalized. kwargs : Further keyword arguments passed to the class constructor. Returns ------- geometry : `Parallel3dAxisGeometry` Examples -------- Map unit vectors ``e_y -> e_z`` and ``e_z -> -e_y``, keeping the right-handedness: >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20)) >>> matrix = np.array([[1, 0, 0], ... [0, 0, -1], ... [0, 1, 0]]) >>> geom = Parallel3dAxisGeometry.frommatrix( ... apart, dpart, init_matrix=matrix) >>> geom.axis array([ 0., -1., 0.]) >>> geom.det_pos_init array([ 0., 0., 1.]) >>> geom.det_axes_init array([[ 1., 0., 0.], [ 0., -1., 0.]]) Adding a translation with a fourth matrix column: >>> matrix = np.array([[0, 0, -1, 0], ... [0, 1, 0, 1], ... [1, 0, 0, 1]]) >>> geom = Parallel3dAxisGeometry.frommatrix(apart, dpart, matrix) >>> geom.translation array([ 0., 1., 1.]) >>> geom.det_pos_init # (0, 1, 0) + (0, 1, 1) array([ 0., 2., 1.]) """ # Get transformation and translation parts from `init_matrix` init_matrix = np.asarray(init_matrix, dtype=float) if init_matrix.shape not in ((3, 3), (3, 4)): raise ValueError('`matrix` must have shape (3, 3) or (3, 4), ' 'got array with shape {}' ''.format(init_matrix.shape)) trafo_matrix = init_matrix[:, :3] translation = init_matrix[:, 3:].squeeze() # Transform the default vectors default_axis = cls._default_config['axis'] default_det_pos_init = cls._default_config['det_pos_init'] default_det_axes_init = cls._default_config['det_axes_init'] vecs_to_transform = (default_det_pos_init,) + default_det_axes_init transformed_vecs = transform_system( default_axis, None, vecs_to_transform, matrix=trafo_matrix) # Use the standard constructor with these vectors axis, det_pos, det_axis_0, det_axis_1 = transformed_vecs if translation.size != 0: kwargs['translation'] = translation return cls(apart, dpart, axis, det_pos_init=det_pos, det_axes_init=[det_axis_0, det_axis_1], **kwargs)
0.000565
def extract(self, html_contents, css_contents=None, base_url=None): """ Extracts the cleaned html tree as a string and only css rules matching the cleaned html tree :param html_contents: The HTML contents to parse :type html_contents: str :param css_contents: The CSS contents to parse :type css_contents: str :param base_url: The base page URL to use for relative to absolute links :type base_url: str :returns: cleaned HTML contents, cleaned CSS contents :rtype: str or tuple """ # Clean HTML html_extractor = self.html_extractor( html_contents, self._xpaths_to_keep, self._xpaths_to_discard) has_matches = html_extractor.parse() if has_matches: # Relative to absolute URLs if base_url is not None: html_extractor.rel_to_abs(base_url) # Convert ElementTree to string cleaned_html = html_extractor.to_string() else: cleaned_html = None # Clean CSS if css_contents is not None: if cleaned_html is not None: css_extractor = self.css_extractor(css_contents, cleaned_html) css_extractor.parse() # Relative to absolute URLs if base_url is not None: css_extractor.rel_to_abs(base_url) cleaned_css = css_extractor.to_string() else: cleaned_css = None else: return cleaned_html return (cleaned_html, cleaned_css)
0.001844
def pipeline(*functions, funcs=None): """like pipe, but curried: pipline(f, g, h)(*args, **kwargs) == h(g(f(*args, **kwargs))) """ if funcs: functions = funcs head, *tail = functions return lambda *args, **kwargs: pipe(head(*args, **kwargs), funcs=tail)
0.003448
def execution_engine_model_changed(self, model, prop_name, info): """Active observation of state machine and show and hide widget. """ if not self._view_initialized: return active_sm_id = rafcon.gui.singleton.state_machine_manager_model.state_machine_manager.active_state_machine_id if active_sm_id is None: # relieve all state machines that have no active execution and hide the widget self.disable() else: # observe all state machines that have an active execution and show the widget self.check_configuration()
0.008143
def get_top_n_action_types(self, top_n): """Returns the top N actions by count.""" # Count action types action_type_to_counts = dict() for action in self.actions: actiontype = action.actiontype if actiontype not in action_type_to_counts: action_type_to_counts[actiontype] = 1 else: action_type_to_counts[actiontype] = \ action_type_to_counts[actiontype] + 1 # Convert the dictionary representation into a pair of lists action_types = list() counts = list() for actiontype in action_type_to_counts.keys(): action_types.append(actiontype) counts.append(action_type_to_counts[actiontype]) # How many actions in total? num_actions = len(self.actions) num_actions2 = 0 for count in counts: num_actions2 = num_actions2 + count if num_actions != num_actions2: raise(Exception('Problem counting everything up!')) # Sort action types by count (lowest to highest) sorted_inds = np.argsort(counts) last_ind = len(sorted_inds)-1 # Return the top N actions top_actions = list() if top_n > len(sorted_inds): raise Exception('Asked for top %d action types, ' + 'but there are only %d action types' % (top_n, len(sorted_inds))) for i in range(top_n): top_actions.append(action_types[sorted_inds[last_ind-i]]) return top_actions
0.001248