text
stringlengths
78
104k
score
float64
0
0.18
def is_child_of(self, parent): """Asserts that val is an existing path to a file and that file is a child of parent.""" self.is_file() if not isinstance(parent, str_types): raise TypeError('given parent directory arg must be a path') val_abspath = os.path.abspath(self.val) parent_abspath = os.path.abspath(parent) if not val_abspath.startswith(parent_abspath): self._err('Expected file <%s> to be a child of <%s>, but was not.' % (val_abspath, parent_abspath)) return self
0.007233
def WaitForReport(self, report_job): """Runs a report, then waits (blocks) for the report to finish generating. Args: report_job: The report job to wait for. This may be a dictionary or an instance of the SOAP ReportJob class. Returns: The completed report job's ID as a string. Raises: An AdManagerReportError if the report job fails to complete. """ service = self._GetReportService() report_job_id = service.runReportJob(report_job)['id'] if self._version > 'v201502': status = service.getReportJobStatus(report_job_id) else: status = service.getReportJob(report_job_id)['reportJobStatus'] while status != 'COMPLETED' and status != 'FAILED': _data_downloader_logger.debug('Report job status: %s', status) time.sleep(30) if self._version > 'v201502': status = service.getReportJobStatus(report_job_id) else: status = service.getReportJob(report_job_id)['reportJobStatus'] if status == 'FAILED': raise googleads.errors.AdManagerReportError(report_job_id) else: _data_downloader_logger.debug('Report has completed successfully') return report_job_id
0.008326
def scan_results(self, obj): """Get the AP list after scanning.""" bsses = [] bsses_summary = self._send_cmd_to_wpas(obj['name'], 'SCAN_RESULTS', True) bsses_summary = bsses_summary[:-1].split('\n') if len(bsses_summary) == 1: return bsses for l in bsses_summary[1:]: values = l.split('\t') bss = Profile() bss.bssid = values[0] bss.freq = int(values[1]) bss.signal = int(values[2]) bss.ssid = values[4] bss.akm = [] if 'WPA-PSK' in values[3]: bss.akm.append(AKM_TYPE_WPAPSK) if 'WPA2-PSK' in values[3]: bss.akm.append(AKM_TYPE_WPA2PSK) if 'WPA-EAP' in values[3]: bss.akm.append(AKM_TYPE_WPA) if 'WPA2-EAP' in values[3]: bss.akm.append(AKM_TYPE_WPA2) bss.auth = AUTH_ALG_OPEN bsses.append(bss) return bsses
0.004004
def get_vault_form_for_update(self, vault_id): """Gets the vault form for updating an existing vault. A new vault form should be requested for each update transaction. arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` return: (osid.authorization.VaultForm) - the vault form raise: NotFound - ``vault_id`` is not found raise: NullArgument - ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.get_bin_form_for_update_template if self._catalog_session is not None: return self._catalog_session.get_catalog_form_for_update(catalog_id=vault_id) collection = JSONClientValidated('authorization', collection='Vault', runtime=self._runtime) if not isinstance(vault_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') result = collection.find_one({'_id': ObjectId(vault_id.get_identifier())}) cat_form = objects.VaultForm(osid_object_map=result, runtime=self._runtime, proxy=self._proxy) self._forms[cat_form.get_id().get_identifier()] = not UPDATED return cat_form
0.003401
def log_transition(self, transition, from_state, instance, *args, **kwargs): """Generic transition logging.""" save = kwargs.pop('save', True) log = kwargs.pop('log', True) super(Workflow, self).log_transition( transition, from_state, instance, *args, **kwargs) if save: instance.save() if log: self.db_log(transition, from_state, instance, *args, **kwargs)
0.004535
def _t_update_b(self): r""" A method to update 'b' array at each time step according to 't_scheme' and the source term value """ network = self.project.network phase = self.project.phases()[self.settings['phase']] Vi = network['pore.volume'] dt = self.settings['t_step'] s = self.settings['t_scheme'] if (s == 'implicit'): f1, f2, f3 = 1, 1, 0 elif (s == 'cranknicolson'): f1, f2, f3 = 0.5, 1, 0 elif (s == 'steady'): f1, f2, f3 = 1, 0, 1 x_old = self[self.settings['quantity']] b = (f2*(1-f1)*(-self._A_steady)*x_old + f2*(Vi/dt)*x_old + f3*np.zeros(shape=(self.Np, ), dtype=float)) self._update_physics() for item in self.settings['sources']: Ps = self.pores(item) # Update b b[Ps] = b[Ps] - f2*(1-f1)*(phase[item+'.'+'rate'][Ps]) self._b = b return b
0.00201
def reset_for_retry(self): """Reset self for shard retry.""" self.retries += 1 self.last_work_item = "" self.active = True self.result_status = None self.input_finished = False self.counters_map = CountersMap() self.slice_id = 0 self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False
0.002604
def _parse_ranges(ranges): """ Converts a list of string ranges to a list of [low, high] tuples. """ for txt in ranges: if '-' in txt: low, high = txt.split('-') else: low, high = txt, txt yield int(low), int(high)
0.003704
def find_effect_class(self, path) -> Type[Effect]: """ Find an effect class by class name or full python path to class Args: path (str): effect class name or full python path to effect class Returns: Effect class Raises: EffectError if no class is found """ package_name, class_name = parse_package_string(path) if package_name: package = self.get_package(package_name) return package.find_effect_class(class_name, raise_for_error=True) for package in self.packages: effect_cls = package.find_effect_class(class_name) if effect_cls: return effect_cls raise EffectError("No effect class '{}' found in any packages".format(class_name))
0.003663
def remove(self, path): """Remove the FakeFile object at the specified file path. Args: path: Path to file to be removed. Raises: OSError: if path points to a directory. OSError: if path does not exist. OSError: if removal failed. """ norm_path = self.absnormpath(path) if self.ends_with_path_separator(path): self._handle_broken_link_with_trailing_sep(norm_path) if self.exists(norm_path): obj = self.resolve(norm_path) if S_IFMT(obj.st_mode) == S_IFDIR: link_obj = self.lresolve(norm_path) if S_IFMT(link_obj.st_mode) != S_IFLNK: if self.is_windows_fs: error = errno.EACCES elif self.is_macos: error = errno.EPERM else: error = errno.EISDIR self.raise_os_error(error, norm_path) norm_path = make_string_path(norm_path) if path.endswith(self.path_separator): if self.is_windows_fs: error = errno.EACCES elif self.is_macos: error = errno.EPERM else: error = errno.ENOTDIR self.raise_os_error(error, norm_path) else: self.raise_for_filepath_ending_with_separator(path, obj) try: self.remove_object(norm_path) except IOError as exc: self.raise_os_error(exc.errno, exc.filename)
0.001208
def realtime_learning_curves(runs): """ example how to extract a different kind of learning curve. The x values are now the time the runs finished, not the budget anymore. We no longer plot the validation loss on the y axis, but now the test accuracy. This is just to show how to get different information into the interactive plot. """ sr = sorted(runs, key=lambda r: r.budget) lc = list(filter(lambda t: not t[1] is None, [(r.time_stamps['finished'], r.info['test accuracy']) for r in sr])) return([lc,])
0.038388
def login(self, username, *, token=None): """Log in to Google Music. Parameters: username (str, Optional): Your Google Music username. Used for keeping stored OAuth tokens for multiple accounts separate. device_id (str, Optional): A mobile device ID or music manager uploader ID. Default: MAC address is used. token (dict, Optional): An OAuth token compatible with ``requests-oauthlib``. Returns: bool: ``True`` if successfully authenticated, ``False`` if not. """ self._username = username self._oauth(username, token=token) return self.is_authenticated
0.027027
def _process_counter_example(self, mma, w_string): """" Process a counterexample in the Rivest-Schapire way. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Returns: None """ w_string = self._find_bad_transition(mma, w_string) diff = len(w_string) same = 0 while True: i = (same + diff) / 2 access_string = self._run_in_hypothesis(mma, w_string, i) is_diff = self._check_suffix(w_string, access_string, i) if is_diff: diff = i else: same = i if diff - same == 1: break exp = w_string[diff:] self.observation_table.em_vector.append(exp) for row in self.observation_table.sm_vector + self.observation_table.smi_vector: self._fill_table_entry(row, exp)
0.003145
def start_consuming(self, to_tuple=False, auto_decode=True): """Start consuming messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ while not self.is_closed: self.process_data_events( to_tuple=to_tuple, auto_decode=auto_decode ) if self.consumer_tags: sleep(IDLE_WAIT) continue break
0.002519
def wait_for_simulation_stop(self, timeout=None): """Block until the simulation is done or timeout seconds exceeded. If the simulation stops before timeout, siminfo is returned. """ start = datetime.now() while self.get_is_sim_running(): sleep(0.5) if timeout is not None: if (datetime.now() - start).seconds >= timeout: ret = None break else: ret = self.simulation_info() return ret
0.003759
def catch(func, *args, **kwargs): """ Call the supplied function with the supplied arguments, catching and returning any exception that it throws. Arguments: func: the function to run. *args: positional arguments to pass into the function. **kwargs: keyword arguments to pass into the function. Returns: If the function throws an exception, return the exception. If the function does not throw an exception, return None. """ try: func(*args, **kwargs) except Exception as e: return e
0.001748
def substitute_globals(config_dict): """ Set global variables to values defined in `config_dict`. Args: config_dict (dict): dictionary with data, which are used to set \ `globals`. Note: `config_dict` have to be dictionary, or it is ignored. Also all variables, that are not already in globals, or are not types defined in :attr:`_ALLOWED` (str, int, float) or starts with ``_`` are silently ignored. """ constants = get_all_constants() if type(config_dict) != dict: return for key in config_dict.keys(): if key in constants and type(config_dict[key]) in _ALLOWED: globals()[key] = config_dict[key]
0.00137
def get_series_by_name(self, series_name): """Perform lookup for series :param str series_name: series name found within filename :returns: instance of series :rtype: object """ try: return self.api.search_series(name=series_name), None except exceptions.TVDBRequestException as err: LOG.exception('search for series %s failed', series_name) return None, _as_str(err)
0.004348
def _Rforce(self,R,phi=0.,t=0.): """ NAME: _Rforce PURPOSE: evaluate the radial force INPUT: R phi t OUTPUT: F_R(R(,\phi,t)) HISTORY: 2010-07-13 - Written - Bovy (NYU) """ return self._Pot.Rforce(R,0.,t=t,use_physical=False)
0.024725
def str2chars(strings) -> numpy.ndarray: """Return |numpy.ndarray| containing the byte characters (second axis) of all given strings (first axis). >>> from hydpy.core.netcdftools import str2chars >>> str2chars(['zeros', 'ones']) array([[b'z', b'e', b'r', b'o', b's'], [b'o', b'n', b'e', b's', b'']], dtype='|S1') >>> str2chars([]) array([], shape=(0, 0), dtype='|S1') """ maxlen = 0 for name in strings: maxlen = max(maxlen, len(name)) # noinspection PyTypeChecker chars = numpy.full( (len(strings), maxlen), b'', dtype='|S1') for idx, name in enumerate(strings): for jdx, char in enumerate(name): chars[idx, jdx] = char.encode('utf-8') return chars
0.001292
def add_caption(self, image, caption, colour=None): """ Add a caption to the image """ if colour is None: colour = "white" width, height = image.size draw = ImageDraw.Draw(image) draw.font = self.font draw.font = self.font draw.text((width // 10, height//20), caption, fill=colour) return image
0.007557
def add(self, handler, priority=10): """ Add handler. :param callable handler: callable handler :return callable: The handler you added is given back so this can be used as a decorator. """ self.container.add_handler(handler, priority=priority) return handler
0.009494
def _apply(self, ctx: ExtensionContext) -> AugmentedDict: """ Replaces any {{env::*}} directives with it's actual environment variable value or a default. Args: ctx: The processing context. Returns: Returns the altered node key and value. """ node_key, node_value = ctx.node def process(pattern: Pattern[str], _str: str) -> str: _match = pattern.match(_str) if _match is None: return _str # We got a match # Group 0: Whole match; Group 1: Our placeholder; Group 2: The environment variable placeholder, envvar = _match.group(1), _match.group(2) envvalue = os.environ.get(envvar, None) if envvalue is None and self.fail_on_unset: raise ExtensionError("Environment variable '{}' is unset.".format(envvar)) return _str.replace(placeholder, envvalue or self.default) _pattern = re.compile(self.__pattern__) node_key = process(_pattern, node_key) node_value = process(_pattern, node_value) return {node_key: node_value}
0.00431
def generate_salt_cmd(target, module, args=None, kwargs=None): """ Generates a command (the arguments) for the `salt` or `salt-ssh` CLI """ args = args or [] kwargs = kwargs or {} target = target or '*' target = '"%s"' % target cmd = [target, module] for arg in args: cmd.append(arg) for key in kwargs: cmd.append('{0}={1}'.format(key, kwargs[key])) return cmd
0.002381
def vol_tehrahedron(poly): """volume of a irregular tetrahedron""" p_a = np.array(poly[0]) p_b = np.array(poly[1]) p_c = np.array(poly[2]) p_d = np.array(poly[3]) return abs(np.dot( np.subtract(p_a, p_d), np.cross( np.subtract(p_b, p_d), np.subtract(p_c, p_d))) / 6)
0.00303
def _collect_dirty_tabs(self, exept=None): """ Collects the list of dirty tabs """ widgets = [] filenames = [] for i in range(self.count()): widget = self.widget(i) try: if widget.dirty and widget != exept: widgets.append(widget) filenames.append(widget.file.path) except AttributeError: pass return widgets, filenames
0.004149
def get_nagios_unit_name(relation_name='nrpe-external-master'): """ Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to """ host_context = get_nagios_hostcontext(relation_name) if host_context: unit = "%s:%s" % (host_context, local_unit()) else: unit = local_unit() return unit
0.002488
def connect(host=DEFAULT_HOST, port=DEFAULT_PORT, base=DEFAULT_BASE, chunk_size=multipart.default_chunk_size, **defaults): """Create a new :class:`~ipfsapi.Client` instance and connect to the daemon to validate that its version is supported. Raises ------ ~ipfsapi.exceptions.VersionMismatch ~ipfsapi.exceptions.ErrorResponse ~ipfsapi.exceptions.ConnectionError ~ipfsapi.exceptions.ProtocolError ~ipfsapi.exceptions.StatusError ~ipfsapi.exceptions.TimeoutError All parameters are identical to those passed to the constructor of the :class:`~ipfsapi.Client` class. Returns ------- ~ipfsapi.Client """ # Create client instance client = Client(host, port, base, chunk_size, **defaults) # Query version number from daemon and validate it assert_version(client.version()['Version']) return client
0.001115
def line(surf, start, end, color=BLACK, width=1, style=FLAT): """Draws an antialiased line on the surface.""" width = round(width, 1) if width == 1: # return pygame.draw.aaline(surf, color, start, end) return gfxdraw.line(surf, *start, *end, color) start = V2(*start) end = V2(*end) line_vector = end - start half_side = line_vector.normnorm() * width / 2 point1 = start + half_side point2 = start - half_side point3 = end - half_side point4 = end + half_side # noinspection PyUnresolvedReferences liste = [ (point1.x, point1.y), (point2.x, point2.y), (point3.x, point3.y), (point4.x, point4.y) ] rect = polygon(surf, liste, color) if style == ROUNDED: _ = circle(surf, start, width / 2, color) rect = merge_rects(rect, _) _ = circle(surf, end, width / 2, color) rect = merge_rects(rect, _) return rect
0.001044
def bifurcation_partition(bif_point): '''Calculate the partition at a bifurcation point We first ensure that the input point has only two children. The number of nodes in each child tree is counted. The partition is defined as the ratio of the largest number to the smallest number.''' assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children' n = float(sum(1 for _ in bif_point.children[0].ipreorder())) m = float(sum(1 for _ in bif_point.children[1].ipreorder())) return max(n, m) / min(n, m)
0.003578
def set_with_conversion(self, variable, value_string): """Convert user supplied string to Python type. Lets user use values such as True, False and integers. All variables can be set to None, regardless of type. Handle the case where a string is typed by the user and is not quoted, as a string literal. """ self._assert_valid_variable(variable) try: v = ast.literal_eval(value_string) except (ValueError, SyntaxError): v = value_string if v is None or v == "none": self._variables[variable] = None else: try: type_converter = variable_type_map[variable] value_string = self._validate_variable_type( value_string, type_converter ) value = type_converter(value_string) self._variables[variable] = value except ValueError: raise d1_cli.impl.exceptions.InvalidArguments( "Invalid value for {}: {}".format(variable, value_string) )
0.003568
def main(command_line_arguments=None): """Reads score files, computes error measures and plots curves.""" args = command_line_options(command_line_arguments) # get some colors for plotting cmap = mpl.cm.get_cmap(name='hsv') count = len(args.files) + (len(args.baselines) if args.baselines else 0) colors = [cmap(i) for i in numpy.linspace(0, 1.0, count+1)] # First, read the score files logger.info("Loading %d score files" % len(args.files)) scores = [read_score_file(os.path.join(args.directory, f)) for f in args.files] false_alarms = [] detection_rate = [] logger.info("Computing FROC curves") for score in scores: # compute some thresholds tmin = min(score[2]) tmax = max(score[2]) count = 100 thresholds = [tmin + float(x)/count * (tmax - tmin) for x in range(count+2)] false_alarms.append([]) detection_rate.append([]) for threshold in thresholds: detection_rate[-1].append(numpy.count_nonzero(numpy.array(score[1]) >= threshold) / float(score[0])) false_alarms[-1].append(numpy.count_nonzero(numpy.array(score[2]) >= threshold)) # to display 0 in a semilogx plot, we have to add a little # false_alarms[-1][-1] += 1e-8 # also read baselines if args.baselines is not None: for baseline in args.baselines: dr = [] fa = [] with open(os.path.join(args.baseline_directory, baseline)) as f: for line in f: splits = line.rstrip().split() dr.append(float(splits[0])) fa.append(int(splits[1])) false_alarms.append(fa) detection_rate.append(dr) logger.info("Plotting FROC curves to file '%s'", args.output) # create a multi-page PDF for the ROC curve pdf = PdfPages(args.output) figure = _plot_froc(false_alarms, detection_rate, colors, args.legends, args.title, args.max) mpl.xlabel('False Alarm (of %d pruned)' % len(scores[0][2])) mpl.ylabel('Detection Rate in \%% (total %d faces)' % scores[0][0]) pdf.savefig(figure) pdf.close() if args.count_detections: for i, f in enumerate(args.files): det, all = count_detections(f) print("The number of detected faces for %s is %d out of %d" % (args.legends[i], det, all))
0.019919
def handle_endtag(self, tag): """Return representation of html end tag.""" if tag in self.mathml_elements: self.fed.append("</{0}>".format(tag))
0.011628
def _aggr_mean(inList): """ Returns mean of non-None elements of the list """ aggrSum = 0 nonNone = 0 for elem in inList: if elem != SENTINEL_VALUE_FOR_MISSING_DATA: aggrSum += elem nonNone += 1 if nonNone != 0: return aggrSum / nonNone else: return None
0.030822
def set_input_divide_by_period(holder, period, array): """ This function can be declared as a ``set_input`` attribute of a variable. In this case, the variable will accept inputs on larger periods that its definition period, and the value for the larger period will be divided between its subperiods. To read more about ``set_input`` attributes, check the `documentation <https://openfisca.org/doc/coding-the-legislation/35_periods.html#set-input-automatically-process-variable-inputs-defined-for-periods-not-matching-the-definition-period>`_. """ if not isinstance(array, np.ndarray): array = np.array(array) period_size = period.size period_unit = period.unit if holder.variable.definition_period == MONTH: cached_period_unit = periods.MONTH elif holder.variable.definition_period == YEAR: cached_period_unit = periods.YEAR else: raise ValueError('set_input_divide_by_period can be used only for yearly or monthly variables.') after_instant = period.start.offset(period_size, period_unit) # Count the number of elementary periods to change, and the difference with what is already known. remaining_array = array.copy() sub_period = period.start.period(cached_period_unit) sub_periods_count = 0 while sub_period.start < after_instant: existing_array = holder.get_array(sub_period) if existing_array is not None: remaining_array -= existing_array else: sub_periods_count += 1 sub_period = sub_period.offset(1) # Cache the input data if sub_periods_count > 0: divided_array = remaining_array / sub_periods_count sub_period = period.start.period(cached_period_unit) while sub_period.start < after_instant: if holder.get_array(sub_period) is None: holder._set(sub_period, divided_array) sub_period = sub_period.offset(1) elif not (remaining_array == 0).all(): raise ValueError("Inconsistent input: variable {0} has already been set for all months contained in period {1}, and value {2} provided for {1} doesn't match the total ({3}). This error may also be thrown if you try to call set_input twice for the same variable and period.".format(holder.variable.name, period, array, array - remaining_array))
0.002969
def target_range(self): """Get the range covered on the target/reference strand :returns: Genomic range of the target strand :rtype: GenomicRange """ a = self.alignment_ranges return GenomicRange(a[0][0].chr,a[0][0].start,a[-1][0].end)
0.011494
def write(self, fileobj = sys.stdout, indent = u""): """ Recursively write an element and it's children to a file. """ fileobj.write(self.start_tag(indent)) fileobj.write(u"\n") for c in self.childNodes: if c.tagName not in self.validchildren: raise ElementError("invalid child %s for %s" % (c.tagName, self.tagName)) c.write(fileobj, indent + Indent) if self.pcdata is not None: fileobj.write(xmlescape(self.pcdata)) fileobj.write(u"\n") fileobj.write(self.end_tag(indent)) fileobj.write(u"\n")
0.037807
def save(cls, network, phases=[], filename='', delim=' | ', fill_nans=None): r""" Save network and phase data to a single vtp file for visualizing in Paraview Parameters ---------- network : OpenPNM Network Object The Network containing the data to be written phases : list, optional A list containing OpenPNM Phase object(s) containing data to be written filename : string, optional Filename to write data. If no name is given the file is named after the network delim : string Specify which character is used to delimit the data names. The default is ' | ' which creates a nice clean output in the Paraview pipeline viewer (e.g. net | property | pore | diameter) fill_nans : scalar The value to use to replace NaNs with. The VTK file format does not work with NaNs, so they must be dealt with. The default is `None` which means property arrays with NaNs are not written to the file. Other useful options might be 0 or -1, but the user must be aware that these are not real values, only place holders. """ project, network, phases = cls._parse_args(network=network, phases=phases) am = Dict.to_dict(network=network, phases=phases, interleave=True, categorize_by=['object', 'data']) am = FlatDict(am, delimiter=delim) key_list = list(sorted(am.keys())) network = network[0] points = network['pore.coords'] pairs = network['throat.conns'] num_points = np.shape(points)[0] num_throats = np.shape(pairs)[0] root = ET.fromstring(VTK._TEMPLATE) piece_node = root.find('PolyData').find('Piece') piece_node.set("NumberOfPoints", str(num_points)) piece_node.set("NumberOfLines", str(num_throats)) points_node = piece_node.find('Points') coords = VTK._array_to_element("coords", points.T.ravel('F'), n=3) points_node.append(coords) lines_node = piece_node.find('Lines') connectivity = VTK._array_to_element("connectivity", pairs) lines_node.append(connectivity) offsets = VTK._array_to_element("offsets", 2*np.arange(len(pairs))+2) lines_node.append(offsets) point_data_node = piece_node.find('PointData') cell_data_node = piece_node.find('CellData') for key in key_list: array = am[key] if array.dtype == 'O': logger.warning(key + ' has dtype object,' + ' will not write to file') else: if array.dtype == np.bool: array = array.astype(int) if np.any(np.isnan(array)): if fill_nans is None: logger.warning(key + ' has nans,' + ' will not write to file') continue else: array[np.isnan(array)] = fill_nans element = VTK._array_to_element(key, array) if (array.size == num_points): point_data_node.append(element) elif (array.size == num_throats): cell_data_node.append(element) if filename == '': filename = project.name filename = cls._parse_filename(filename=filename, ext='vtp') tree = ET.ElementTree(root) tree.write(filename) with open(filename, 'r+') as f: string = f.read() string = string.replace('</DataArray>', '</DataArray>\n\t\t\t') f.seek(0) # consider adding header: '<?xml version="1.0"?>\n'+ f.write(string)
0.000509
def copy_script(self, filename, id_=-1): """Copy a script to the repo's Script subdirectory. Scripts are copied as files to a path, or, on a "migrated" JSS, are POSTed to the JSS (pass an id if you wish to associate the script with an existing Script object). Args: filename: Path for file to copy. id_: Int ID, used _only_ for migrated repos. Default is -1, which creates a new Script. """ if ("jss" in self.connection.keys() and self.connection["jss"].jss_migrated): self._copy_script_migrated(filename, id_, SCRIPT_FILE_TYPE) else: basename = os.path.basename(filename) self._copy(filename, os.path.join(self.connection["mount_point"], "Scripts", basename))
0.002315
def _extract_symbols(self, symbols, default=None): """! @brief Fill 'symbols' field with required flash algo symbols""" to_ret = {} for symbol in symbols: symbolInfo = self.elf.symbol_decoder.get_symbol_for_name(symbol) if symbolInfo is None: if default is not None: to_ret[symbol] = default continue raise FlashAlgoException("Missing symbol %s" % symbol) to_ret[symbol] = symbolInfo.address return to_ret
0.00367
def update_col(self, column_name, series): """ Add or replace a column in the underlying DataFrame. Parameters ---------- column_name : str Column to add or replace. series : pandas.Series or sequence Column data. """ logger.debug('updating column {!r} in table {!r}'.format( column_name, self.name)) self.local[column_name] = series
0.004515
def create_processors_from_settings(self): """ Expects the Django setting "EVENT_TRACKING_PROCESSORS" to be defined and point to a list of backend engine configurations. Example:: EVENT_TRACKING_PROCESSORS = [ { 'ENGINE': 'some.arbitrary.Processor' }, { 'ENGINE': 'some.arbitrary.OtherProcessor', 'OPTIONS': { 'user': 'foo' } }, ] """ config = getattr(settings, DJANGO_PROCESSOR_SETTING_NAME, []) processors = self.instantiate_objects(config) return processors
0.004167
def exclude(self, *fields): """ Projection columns which not included in the fields :param fields: field names :return: new collection :rtype: :class:`odps.df.expr.expression.CollectionExpr` """ if len(fields) == 1 and isinstance(fields[0], list): exclude_fields = fields[0] else: exclude_fields = list(fields) exclude_fields = [self._defunc(it) for it in exclude_fields] exclude_fields = [field.name if not isinstance(field, six.string_types) else field for field in exclude_fields] fields = [name for name in self._schema.names if name not in exclude_fields] return self._project(fields)
0.003958
def _process(self, resource=None, data={}): """Processes the current transaction Sends an HTTP request to the PAYDUNYA API server """ # use object's data if no data is passed _data = data or self._data rsc_url = self.get_rsc_endpoint(resource) if _data: req = requests.post(rsc_url, data=json.dumps(_data), headers=self.headers) else: req = requests.get(rsc_url, params=_data, headers=self.headers) if req.status_code == 200: self._response = json.loads(req.text) if int(self._response['response_code']) == 00: return (True, self._response) else: return (False, self._response['response_text']) else: return (500, "Request Failed")
0.002275
def get_normalized_elevation_array(world): ''' Convert raw elevation into normalized values between 0 and 255, and return a numpy array of these values ''' e = world.layers['elevation'].data ocean = world.layers['ocean'].data mask = numpy.ma.array(e, mask=ocean) # only land min_elev_land = mask.min() max_elev_land = mask.max() elev_delta_land = max_elev_land - min_elev_land mask = numpy.ma.array(e, mask=numpy.logical_not(ocean)) # only ocean min_elev_sea = mask.min() max_elev_sea = mask.max() elev_delta_sea = max_elev_sea - min_elev_sea c = numpy.empty(e.shape, dtype=numpy.float) c[numpy.invert(ocean)] = (e[numpy.invert(ocean)] - min_elev_land) * 127 / elev_delta_land + 128 c[ocean] = (e[ocean] - min_elev_sea) * 127 / elev_delta_sea c = numpy.rint(c).astype(dtype=numpy.int32) # proper rounding return c
0.00224
async def call(self, method, **params): """ Call an Slack Web API method :param method: Slack Web API method to call :param params: {str: object} parameters to method :return: dict() """ url = self.SLACK_RPC_PREFIX + method data = FormData() data.add_fields(MultiDict(token=self.bot_token, charset='utf-8', **params)) response_body = await self.request( method='POST', url=url, data=data ) if 'warning' in response_body: logger.warning(f'Warnings received from API call {method}: {response_body["warning"]}') if 'ok' not in response_body: logger.error(f'No ok marker in slack API call {method} {params} => {response_body}') raise SlackCallException('There is no ok marker, ... strange', method=method) if not response_body['ok']: logger.error(f'Slack API call failed {method} {params} => {response_body}') raise SlackCallException(f'No OK response returned', method=method) return response_body
0.006301
def walk(self, top, file_list={}): """Walks the walk. nah, seriously: reads the file and stores a hashkey corresponding to its content.""" for root, dirs, files in os.walk(top, topdown=False): if os.path.basename(root) in self.ignore_dirs: # Do not dig in ignored dirs continue for name in files: full_path = os.path.join(root, name) if self.include(full_path): if os.path.isfile(full_path): # preventing fail if the file vanishes content = open(full_path).read() hashcode = hashlib.sha224(content).hexdigest() file_list[full_path] = hashcode for name in dirs: if name not in self.ignore_dirs: self.walk(os.path.join(root, name), file_list) return file_list
0.002123
def Register(self, name, constructor): """Registers a new constructor in the factory. Args: name: A name associated with given constructor. constructor: A constructor function that creates instances. Raises: ValueError: If there already is a constructor associated with given name. """ precondition.AssertType(name, Text) if name in self._constructors: message = "Duplicated constructors %r and %r for name '%s'" message %= (constructor, self._constructors[name], name) raise ValueError(message) self._constructors[name] = constructor
0.006645
def image_recognition(image, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ Given an input image, returns a dictionary of image classifications with associated scores * Input can be either grayscale or rgb color and should either be a numpy array or nested list format. * Input data should be either uint8 0-255 range values or floating point between 0 and 1. * Large images (i.e. 1024x768+) are much bigger than needed, minaxis resizing will be done internally to 144 if needed. * For ideal performance, images should be square aspect ratio but non-square aspect ratios are supported as well. Example usage: .. code-block:: python >>> from indicoio import image_recognition >>> features = image_recognition(<filename>) :param image: The image to be analyzed. :type image: str :rtype: dict containing classifications """ image = data_preprocess(image, batch=batch, size=144, min_axis=True) url_params = {"batch": batch, "api_key": api_key, "version": version} return api_handler(image, cloud=cloud, api="imagerecognition", url_params=url_params, **kwargs)
0.006926
def _add_query_parameter(url, name, value): """Adds a query parameter to a url. Replaces the current value if it already exists in the URL. Args: url: string, url to add the query parameter to. name: string, query parameter name. value: string, query parameter value. Returns: Updated query parameter. Does not update the url if value is None. """ if value is None: return url else: return update_query_params(url, {name: value})
0.001969
def handle_tls_connected_event(self, event): """Verify the peer certificate on the `TLSConnectedEvent`. """ if self.settings["tls_verify_peer"]: valid = self.settings["tls_verify_callback"](event.stream, event.peer_certificate) if not valid: raise SSLError("Certificate verification failed") event.stream.tls_established = True with event.stream.lock: event.stream._restart_stream()
0.00566
def _inline_tcl_xfer( self, source_file=None, source_config=None, dest_file=None, file_system=None ): """ Use Netmiko InlineFileTransfer (TCL) to transfer file or config to remote device. Return (status, msg) status = boolean msg = details on what happened """ if source_file: return self._xfer_file( source_file=source_file, dest_file=dest_file, file_system=file_system, TransferClass=InLineTransfer, ) if source_config: return self._xfer_file( source_config=source_config, dest_file=dest_file, file_system=file_system, TransferClass=InLineTransfer, ) raise ValueError("File source not specified for transfer.")
0.005727
def delete(self, index, doc_type, id, bulk=False, **query_params): """ Delete a typed JSON document from a specific index based on its id. If bulk is True, the delete operation is put in bulk mode. """ if bulk: cmd = {"delete": {"_index": index, "_type": doc_type, "_id": id}} self.bulker.add(json.dumps(cmd, cls=self.encoder)) return self.flush_bulk() path = make_path(index, doc_type, id) return self._send_request('DELETE', path, params=query_params)
0.003472
def join(self, other): r""" Args: other (?): CommandLine: python -m sortedcontainers.sortedlist join2 Example: >>> from utool.experimental.dynamic_connectivity import * # NOQA >>> self = EulerTourList([1, 2, 3, 2, 4, 2, 1], load=3) >>> other = EulerTourList([0, 5, 9, 5, 0], load=3) >>> result = self.join(other) >>> print(result) """ assert self._load == other._load, 'loads must be the same' self._lists.extend(other._lists) self._cumlen.extend([c + self._len for c in other._cumlen]) self._len += other._len
0.00299
def get_operation_root_type( schema: GraphQLSchema, operation: Union[OperationDefinitionNode, OperationTypeDefinitionNode], ) -> GraphQLObjectType: """Extract the root type of the operation from the schema.""" operation_type = operation.operation if operation_type == OperationType.QUERY: query_type = schema.query_type if not query_type: raise GraphQLError( "Schema does not define the required query root type.", operation ) return query_type elif operation_type == OperationType.MUTATION: mutation_type = schema.mutation_type if not mutation_type: raise GraphQLError("Schema is not configured for mutations.", operation) return mutation_type elif operation_type == OperationType.SUBSCRIPTION: subscription_type = schema.subscription_type if not subscription_type: raise GraphQLError("Schema is not configured for subscriptions.", operation) return subscription_type else: raise GraphQLError( "Can only have query, mutation and subscription operations.", operation )
0.004296
def check_recommended_global_attributes(self, dataset): ''' Check the global recommended attributes for 2.0 templates. These go an extra step besides just checking that they exist. :param netCDF4.Dataset dataset: An open netCDF dataset :id = "" ; //.................................................. RECOMMENDED - Should be a human readable unique identifier for data set. (ACDD) :naming_authority = "" ; //.................................... RECOMMENDED - Backward URL of institution (for example, gov.noaa.ncei). (ACDD) :history = "" ; //............................................. RECOMMENDED - Provides an audit trail for modifications to the original data. (ACDD) :source = "" ; //.............................................. RECOMMENDED - The method of production of the original data. (CF) :processing_level = "" ; //.................................... RECOMMENDED - Provide a description of the processing or quality control level of the data. (ACDD) :comment = "" ; //............................................. RECOMMENDED - Provide useful additional information here. (CF) :acknowledgment = "" ; //...................................... RECOMMENDED - A place to acknowledge various types of support for the project that produced this data. (ACDD) :license = "" ; //............................................. RECOMMENDED - Describe the restrictions to data access and distribution. (ACDD) :standard_name_vocabulary = "CF Standard Name Table vNN" ; //.. RECOMMENDED - If using CF standard name attribute for variables. Replace NN with the CF standard name table number (CF) :date_created = "" ; //........................................ RECOMMENDED - Creation date of this version of the data(netCDF). Use ISO 8601:2004 for date and time. (ACDD) :creator_name = "" ; //........................................ RECOMMENDED - The name of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD) :creator_email = "" ; //....................................... RECOMMENDED - The email address of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD) :creator_url = "" ; //......................................... RECOMMENDED - The URL of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD) :institution = "" ; //......................................... RECOMMENDED -The name of the institution principally responsible for originating this data.. An institution attribute can be used for each variable if variables come from more than one institution. (CF/ACDD) :project = "" ; //............................................. RECOMMENDED - The name of the project(s) principally responsible for originating this data. Multiple projects can be separated by commas. (ACDD) :publisher_name = "" ; //...................................... RECOMMENDED - The name of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD) :publisher_email = "" ; //..................................... RECOMMENDED - The email address of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD) :publisher_url = "" ; //....................................... RECOMMENDED - The URL of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD) :geospatial_bounds = "" ; //................................... RECOMMENDED - Describes the data's 2D or 3D geospatial extent in OGC's Well-Known Text (WKT) Geometry format. (ACDD) :geospatial_bounds_crs = "" ; //............................... RECOMMENDED - The coordinate reference system (CRS) of the point coordinates in the geospatial_bounds attribute. (ACDD) :geospatial_bounds_vertical_crs = "" ; //...................... RECOMMENDED - The vertical coordinate reference system (CRS) for the Z axis of the point coordinates in the geospatial_bounds attribute. (ACDD) :geospatial_lat_min = 0.0d ; //................................ RECOMMENDED - Describes a simple lower latitude limit. (ACDD) :geospatial_lat_max = 0.0d ; //................................ RECOMMENDED - Describes a simple upper latitude limit. (ACDD) :geospatial_lon_min = 0.0d ; //................................ RECOMMENDED - Describes a simple lower longitude limit. (ACDD) :geospatial_lon_max = 0.0d ; //................................ RECOMMENDED - Describes a simple upper longitude limit. (ACDD) :geospatial_vertical_min = 0.0d ; //........................... RECOMMENDED - Describes the numerically smaller vertical limit. (ACDD) :geospatial_vertical_max = 0.0d ; //........................... RECOMMENDED - Describes the numerically larger vertical limit. (ACDD) :geospatial_vertical_positive = "" ; //........................ RECOMMENDED - Use "up" or "down". (ACDD) :time_coverage_start = "" ; //................................. RECOMMENDED - Describes the time of the first data point in the data set. Use ISO 8601:2004 for date and time. (ACDD) :time_coverage_end = "" ; //................................... RECOMMENDED - Describes the time of the last data point in the data set. Use ISO 8601:2004 for date and time.(ACDD) :time_coverage_duration = "" ; //.............................. RECOMMENDED - Describes the duration of the data set. Use ISO 8601:2004 for date and time. (ACDD) :time_coverage_resolution = "" ; //............................ RECOMMENDED - Describes the targeted time period between each value in the data set. Use ISO 8601:2004 for date and time. (ACDD) :uuid = "" ; //................................................ RECOMMENDED - Machine readable unique identifier for each file. A new uuid is created whenever the file is changed. (NCEI) :sea_name = "" ; //............................................ RECOMMENDED - The names of the sea in which the data were collected. Use NCEI sea names table. (NCEI) ''' recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes') sea_names = [sn.lower() for sn in util.get_sea_names()] sea_name = getattr(dataset, 'sea_name', '') sea_name = sea_name.replace(', ', ',') sea_name = sea_name.split(',') if sea_name else [] for sea in sea_name: recommended_ctx.assert_true( sea.lower() in sea_names, 'sea_name attribute should exist and should be from the NODC sea names list: {} is not a valid sea name'.format(sea) ) # Parse dates, check for ISO 8601 for attr in ['time_coverage_start', 'time_coverage_end', 'date_created', 'date_modified']: attr_value = getattr(dataset, attr, '') try: parse_datetime(attr_value) recommended_ctx.assert_true(True, '') # Score it True! except ISO8601Error: recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value)) value = getattr(dataset, 'geospatial_vertical_positive', '') recommended_ctx.assert_true(value.lower() in ['up', 'down'], 'geospatial_vertical_positive attribute should be up or down: {}'.format(value)) # I hate english. ack_exists = any((getattr(dataset, attr, '') != '' for attr in ['acknowledgment', 'acknowledgement'])) recommended_ctx.assert_true(ack_exists, 'acknowledgement attribute should exist and not be empty') standard_name_vocab = getattr(dataset, 'standard_name_vocabulary', '') regex = re.compile(r'[sS]tandard [nN]ame [tT]able') recommended_ctx.assert_true(regex.search(standard_name_vocab), "standard_name_vocabulary doesn't contain 'Standard Name Table': {}".format(standard_name_vocab)) if hasattr(dataset, 'comment'): recommended_ctx.assert_true(getattr(dataset, 'comment', '') != '', 'comment attribute should not be empty if specified') return recommended_ctx.to_result()
0.0053
def plot_vgp_basemap(mapname, vgp_lon=None, vgp_lat=None, di_block=None, label='', color='k', marker='o', markersize=20, legend='no'): """ This function plots a paleomagnetic pole on whatever current map projection has been set using the basemap plotting library. Before this function is called, a plot needs to be initialized with code that looks something like: >from mpl_toolkits.basemap import Basemap >mapname = Basemap(projection='ortho',lat_0=35,lon_0=200) >plt.figure(figsize=(6, 6)) >mapname.drawcoastlines(linewidth=0.25) >mapname.fillcontinents(color='bisque',lake_color='white',zorder=1) >mapname.drawmapboundary(fill_color='white') >mapname.drawmeridians(np.arange(0,360,30)) >mapname.drawparallels(np.arange(-90,90,30)) Required Parameters ----------- mapname : the name of the current map that has been developed using basemap plon : the longitude of the paleomagnetic pole being plotted (in degrees E) plat : the latitude of the paleomagnetic pole being plotted (in degrees) Optional Parameters (defaults are used if not specified) ----------- color : the color desired for the symbol and its A95 ellipse (default is 'k' aka black) marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle) label : the default is no label. Labels can be assigned. legend : the default is no legend ('no'). Putting 'yes' will plot a legend. """ if di_block != None: di_lists = unpack_di_block(di_block) if len(di_lists) == 3: vgp_lon, vgp_lat, intensity = di_lists if len(di_lists) == 2: vgp_lon, vgp_lat = di_lists centerlon, centerlat = mapname(vgp_lon, vgp_lat) mapname.scatter(centerlon, centerlat, marker=marker, s=markersize, color=color, label=label, zorder=100) if legend == 'yes': plt.legend(loc=2)
0.002593
def parse(self): """Parse show subcommand.""" parser = self.subparser.add_parser( "show", help="Show workspace details", description="Show workspace details.") group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--all', action='store_true', help="All workspaces") group.add_argument('name', type=str, help="Workspace name", nargs='?')
0.004566
def compute_texptime(imageObjectList): """ Add up the exposure time for all the members in the pattern, since 'drizzle' doesn't have the necessary information to correctly set this itself. """ expnames = [] exptimes = [] start = [] end = [] for img in imageObjectList: expnames += img.getKeywordList('_expname') exptimes += img.getKeywordList('_exptime') start += img.getKeywordList('_expstart') end += img.getKeywordList('_expend') exptime = 0. expstart = min(start) expend = max(end) exposure = None for n in range(len(expnames)): if expnames[n] != exposure: exposure = expnames[n] exptime += exptimes[n] return (exptime,expstart,expend)
0.003901
def get(self, key): """ Returns the value for the specified key, or None if this map does not contain this key. **Warning: This method uses hashCode and equals of the binary form of the key, not the actual implementations of hashCode and equals defined in the key's class.** :param key: (object), the specified key. :return: (Sequence), the list of the values associated with the specified key. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._encode_invoke_on_key(replicated_map_get_codec, key_data, key=key_data)
0.009331
def masses_of_galaxies_within_ellipses_in_units(self, major_axis : dim.Length, unit_mass='angular', critical_surface_density=None): """Compute the total mass of all galaxies in this plane within a ellipse of specified major-axis. See *galaxy.angular_mass_within_ellipse* and *mass_profiles.angular_mass_within_ellipse* for details \ of how this is performed. Parameters ---------- major_axis : float The major-axis radius of the ellipse. units_luminosity : str The units the luminosity is returned in (eps | counts). exposure_time : float The exposure time of the observation, which converts luminosity from electrons per second units to counts. """ return list(map(lambda galaxy: galaxy.mass_within_ellipse_in_units( major_axis=major_axis, unit_mass=unit_mass, kpc_per_arcsec=self.kpc_per_arcsec, critical_surface_density=critical_surface_density), self.galaxies))
0.009033
def b58decode(v, length=None): """ decode v into a string of len bytes.""" if isinstance(v, bytes): v = v.decode() for c in v: if c not in __b58chars: raise ValueError("invalid Base58 string") long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += __b58chars.find(c) * (__b58base ** i) result = b"" while long_value >= 256: div, mod = divmod(long_value, 256) result = struct.pack("B", mod) + result long_value = div result = struct.pack("B", long_value) + result nPad = 0 for c in v: if c == __b58chars[0]: nPad += 1 else: break result = b"\x00" * nPad + result if length is not None and len(result) != length: return None return result
0.001235
def create_state(self, value: dict = None, *, namespace: str = None): """ Creates a new :py:class:`State` object, sharing the same zproc server as this Context. :param value: If provided, call ``state.update(value)``. :param namespace: Use this as the namespace for the :py:class:`State` object, instead of this :py:class:`Context`\ 's namespace. :return: A :py:class:`State` object. """ if namespace is None: namespace = self.namespace state = State(self.server_address, namespace=namespace) if value is not None: state.update(value) return state
0.005714
def validate_v_rgbw(value): """Validate a V_RGBW value.""" if len(value) != 8: raise vol.Invalid( '{} is not eight characters long'.format(value)) return validate_hex(value)
0.004878
def __flags(self): """ Internal method. Turns arguments into flags. """ flags = [] if self._capture: flags.append("-capture") if self._spy: flags.append("-spy") if self._dbpath: flags += ["-db-path", self._dbpath] flags += ["-db", "boltdb"] else: flags += ["-db", "memory"] if self._synthesize: assert(self._middleware) flags += ["-synthesize"] if self._simulation: flags += ["-import", self._simulation] if self._proxyPort: flags += ["-pp", str(self._proxyPort)] if self._adminPort: flags += ["-ap", str(self._adminPort)] if self._modify: flags += ["-modify"] if self._verbose: flags += ["-v"] if self._dev: flags += ["-dev"] if self._metrics: flags += ["-metrics"] if self._auth: flags += ["-auth"] if self._middleware: flags += ["-middleware", self._middleware] if self._cert: flags += ["-cert", self._cert] if self._certName: flags += ["-cert-name", self._certName] if self._certOrg: flags += ["-cert-org", self._certOrg] if self._destination: flags += ["-destination", self._destination] if self._key: flags += ["-key", self._key] if self._dest: for i in range(len(self._dest)): flags += ["-dest", self._dest[i]] if self._generateCACert: flags += ["-generate-ca-cert"] if not self._tlsVerification: flags += ["-tls-verification", "false"] logging.debug("flags:" + str(flags)) return flags
0.001098
def toc(self): """ Return a html table of contents :return: """ fails = "" skips = "" if len(self.failed()): faillist = list() for failure in self.failed(): faillist.append( """ <li> <a href="#{anchor}">{name}</a> </li> """.format(anchor=failure.anchor(), name=tag.text( failure.testclass.name + '.' + failure.name))) fails = """ <li>Failures <ul>{faillist}</ul> </li> """.format(faillist="".join(faillist)) if len(self.skipped()): skiplist = list() for skipped in self.skipped(): skiplist.append( """ <li> <a href="#{anchor}">{name}</a> </li> """.format(anchor=skipped.anchor(), name=tag.text( skipped.testclass.name + skipped.name))) skips = """ <li>Skipped <ul>{skiplist}</ul> </li> """.format(skiplist="".join(skiplist)) classlist = list() for classname in self.classes: testclass = self.classes[classname] cases = list() for testcase in testclass.cases: if "pkcs11" in testcase.name: assert True cases.append( """ <li> <a href="#{anchor}">{name}</a> </li> """.format(anchor=testcase.anchor(), name=tag.text(testcase.name))) classlist.append(""" <li> <a href="#{anchor}">{name}</a> <ul> {cases} </ul> </li> """.format(anchor=testclass.anchor(), name=testclass.name, cases="".join(cases))) return """ <ul> {failed} {skips} <li>All Test Classes <ul>{classlist}</ul> </li> </ul> """.format(failed=fails, skips=skips, classlist="".join(classlist))
0.001206
def _peek_async_request(self, msg_id, msg_name): """Peek at the set of callbacks for a request. Return tuple of Nones if callbacks don't exist. """ assert get_thread_ident() == self.ioloop_thread_id if msg_id is None: msg_id = self._msg_id_for_name(msg_name) if msg_id in self._async_queue: return self._async_queue[msg_id] else: return None, None, None, None, None
0.004357
def get_session(session_id): ''' Get information about a session. .. versionadded:: 2016.11.0 :param session_id: The numeric Id of the session. :return: A dictionary of session information. CLI Example: .. code-block:: bash salt '*' rdp.get_session session_id salt '*' rdp.get_session 99 ''' ret = dict() sessions = list_sessions() session = [item for item in sessions if item['session_id'] == session_id] if session: ret = session[0] if not ret: _LOG.warning('No session found for id: %s', session_id) return ret
0.001642
def _refine_merge(merge, aliases, min_goodness): """Remove entries from a merge to generate a valid merge which may be applied to the routing table. Parameters ---------- merge : :py:class:`~.Merge` Initial merge to refine. aliases : {(key, mask): {(key, mask), ...}, ...} Map of key-mask pairs to the sets of key-mask pairs that they actually represent. min_goodness : int Reject merges which are worse than the minimum goodness. Returns ------- :py:class:`~.Merge` Valid merge which may be applied to the routing table. """ # Perform the down-check merge = _refine_downcheck(merge, aliases, min_goodness) # If the merge is still sufficiently good then continue to refine it. if merge.goodness > min_goodness: # Perform the up-check merge, changed = _refine_upcheck(merge, min_goodness) if changed and merge.goodness > min_goodness: # If the up-check removed any entries we need to re-perform the # down-check; but we do not need to re-perform the up-check as the # down check can only move the resultant merge nearer the top of # the routing table. merge = _refine_downcheck(merge, aliases, min_goodness) return merge
0.000762
def fromCSV(csvfile,out=None,fieldnames=None,fmtparams=None,conv_func={}, empty_to_None=[]): """Conversion from CSV to PyDbLite csvfile : name of the CSV file in the file system out : path for the new PyDbLite base in the file system fieldnames : list of field names. If set to None, the field names must be present in the first line of the CSV file fmtparams : the format parameters for the CSV file, as described in the csv module of the standard distribution conv_func is a dictionary mapping a field name to the function used to convert the string read in the CSV to the appropriate Python type. For instance if field "age" must be converted to an integer : conv_func["age"] = int empty_to_None is a list of the fields such that when the value read in the CSV file is the empty string, the field value is set to None """ import csv import time import datetime if out is None: out = os.path.splitext(csvfile)[0]+".pdl" if fieldnames is None: # read field names in the first line of CSV file reader = csv.reader(open(csvfile)) fieldnames = reader.next() reader = csv.DictReader(open(csvfile),fieldnames,fmtparams) reader.next() # skip first line db = PyDbLite.Base(out) conv_func.update({"__id__":int}) auto_id = not "__id__" in fieldnames fieldnames = [ f for f in fieldnames if not f in ("__id__") ] kw = {"mode":"override"} db.create(*fieldnames,**kw) print db.fields next_id = 0 records = {} while True: try: record = reader.next() except StopIteration: break if auto_id: record["__id__"] = next_id next_id += 1 # replace empty strings by None for field in empty_to_None: if not record[field]: record[field] = None # type conversion for field in conv_func: if not isinstance(conv_func[field],(tuple,list)): record[field] = conv_func[field](record[field]) else: # date or datetime date_class,date_fmt = conv_func[field] if not record[field]: record[field] = None else: time_tuple = time.strptime(record[field],date_fmt) if date_class is datetime.date: time_tuple = time_tuple[:3] record[field] = date_class(*time_tuple) records[record["__id__"]] = record db.records = records db.commit() print len(db) return db
0.010496
def parseFunctionSignatures(self): """Search file and namespace node XML contents for function signatures.""" # Keys: string refid of either namespace or file nodes # Values: list of function objects that should be defined there parent_to_func = {} for func in self.functions: # Case 1: it is a function inside a namespace, the function information # is in the namespace's XML file. if func.parent: parent_refid = None if func.parent.kind == "namespace": parent_refid = func.parent.refid else: raise RuntimeError(textwrap.dedent(''' Function [{0}] with refid=[{1}] had a parent of kind '{2}': Parent name=[{3}], refid=[{4}]. Functions may only have namespace parents. Please report this issue online, Exhale has a parsing error. '''.format(func.name, func.refid, func.parent.name, func.parent.refid))) # Case 2: top-level function, it's information is in the file node's XML. elif func.def_in_file: parent_refid = func.def_in_file.refid else: utils.verbose_log(utils.critical( "Cannot parse function [{0}] signature, refid=[{2}], no parent/def_in_file found!".format( func.name, func.refid ) )) # If we found a suitable parent refid, gather in parent_to_func. if parent_refid: if parent_refid not in parent_to_func: parent_to_func[parent_refid] = [] parent_to_func[parent_refid].append(func) # Now we have a mapping of all defining elements to where the function # signatures _should_ live. # TODO: setwise comparison / report when children vs parent_to_func[refid] differ? for refid in parent_to_func: parent = self.node_by_refid[refid] parent_contents = utils.nodeCompoundXMLContents(parent) if not parent_contents: continue ############flake8efphase: TODO: error, log? try: parent_soup = BeautifulSoup(parent_contents, "lxml-xml") except: continue cdef = parent_soup.doxygen.compounddef func_section = None for section in cdef.find_all("sectiondef", recursive=False): if "kind" in section.attrs and section.attrs["kind"] == "func": func_section = section break if not func_section: continue############flake8efphase: TODO: error, log? functions = parent_to_func[refid] for memberdef in func_section.find_all("memberdef", recursive=False): if "kind" not in memberdef.attrs or memberdef.attrs["kind"] != "function": continue func_refid = memberdef.attrs["id"] func = None for candidate in functions: if candidate.refid == func_refid: func = candidate break if not func: continue ############flake8efphase: TODO: error, log? functions.remove(func) # At last, we can actually parse the function signature # 1. The function return type. func.return_type = utils.sanitize( memberdef.find("type", recursive=False).text ) # 2. The function parameter list. parameters = [] for param in memberdef.find_all("param", recursive=False): parameters.append(param.type.text) func.parameters = utils.sanitize_all(parameters) # 3. The template parameter list. templateparamlist = memberdef.templateparamlist if templateparamlist: template = [] for param in templateparamlist.find_all("param", recursive=False): template.append(param.type.text) func.template = utils.sanitize_all(template)
0.004342
def flash(self, partition, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK): """Flashes the last downloaded file to the given partition. Args: partition: Partition to flash. timeout_ms: Optional timeout in milliseconds to wait for it to finish. info_cb: See Download. Usually no messages. Returns: Response to a download request, normally nothing. """ return self._simple_command('flash', arg=partition, info_cb=info_cb, timeout_ms=timeout_ms)
0.001912
def download(url, filename): """download and extract file.""" logger.info("Downloading %s", url) request = urllib2.Request(url) request.add_header('User-Agent', 'caelum/0.1 +https://github.com/nrcharles/caelum') opener = urllib2.build_opener() local_file = open(filename, 'w') local_file.write(opener.open(request).read()) local_file.close()
0.002525
def append_utc_timestamp(self, tag, timestamp=None, precision=3, header=False): """Append a field with a UTCTimestamp value. :param tag: Integer or string FIX tag number. :param timestamp: Time value, see below. :param precision: Number of decimal places: 0, 3 (ms) or 6 (us). :param header: Append to FIX header if True; default to body. The `timestamp` value should be a datetime, such as created by datetime.datetime.utcnow(); a float, being the number of seconds since midnight 1 Jan 1970 UTC, such as returned by time.time(); or, None, in which case datetime.datetime.utcnow() is used to get the current UTC time. Precision values other than zero (seconds), 3 (milliseconds), or 6 (microseconds) will raise an exception. Note that prior to FIX 5.0, only values of 0 or 3 comply with the standard.""" return self._append_utc_datetime(tag, "%Y%m%d-%H:%M:%S", timestamp, precision, header)
0.002498
def get_quota(self): 'Return tuple of (bytes_available, bytes_quota).' du, ds = op.itemgetter('space_used', 'space_amount')\ ((yield super(txBox, self).info_user())) defer.returnValue((ds - du, ds))
0.033816
def mousePressEvent(self, event): """Folds/unfolds the pressed indicator if any.""" if self._mouse_over_line is not None: block = self.editor.document().findBlockByNumber( self._mouse_over_line) self.toggle_fold_trigger(block)
0.007092
def get_vulnerability_functions_04(fname): """ Parse the vulnerability model in NRML 0.4 format. :param fname: path of the vulnerability file :returns: a dictionary imt, taxonomy -> vulnerability function + vset """ categories = dict(assetCategory=set(), lossCategory=set(), vulnerabilitySetID=set()) imts = set() taxonomies = set() vf_dict = {} # imt, taxonomy -> vulnerability function for vset in nrml.read(fname).vulnerabilityModel: categories['assetCategory'].add(vset['assetCategory']) categories['lossCategory'].add(vset['lossCategory']) categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID']) IML = vset.IML imt_str = IML['IMT'] imls = ~IML imts.add(imt_str) for vfun in vset.getnodes('discreteVulnerability'): taxonomy = vfun['vulnerabilityFunctionID'] if taxonomy in taxonomies: raise InvalidFile( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % (taxonomy, fname, vfun.lineno)) taxonomies.add(taxonomy) with context(fname, vfun): loss_ratios = ~vfun.lossRatio coefficients = ~vfun.coefficientsVariation if len(loss_ratios) != len(imls): raise InvalidFile( 'There are %d loss ratios, but %d imls: %s, line %d' % (len(loss_ratios), len(imls), fname, vfun.lossRatio.lineno)) if len(coefficients) != len(imls): raise InvalidFile( 'There are %d coefficients, but %d imls: %s, line %d' % (len(coefficients), len(imls), fname, vfun.coefficientsVariation.lineno)) with context(fname, vfun): vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction( taxonomy, imt_str, imls, loss_ratios, coefficients, vfun['probabilisticDistribution']) categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID'])) del categories['vulnerabilitySetID'] return vf_dict, categories
0.000448
def _compute_jsonclass(obj): """ Compute the content of the __jsonclass__ field for the given object :param obj: An object :return: The content of the __jsonclass__ field """ # It's not a standard type, so it needs __jsonclass__ module_name = inspect.getmodule(obj).__name__ json_class = obj.__class__.__name__ if module_name not in ("", "__main__"): json_class = "{0}.{1}".format(module_name, json_class) return [json_class, []]
0.002088
def mix_columns(state): """ Transformation in the Cipher that takes all of the columns of the State and mixes their data (independently of one another) to produce new columns. """ state = state.reshape(4, 4, 8) return fcat( multiply(MA, state[0]), multiply(MA, state[1]), multiply(MA, state[2]), multiply(MA, state[3]), )
0.002625
def run(pipeline_id, verbose, use_cache, dirty, force, concurrency, slave): """Run a pipeline by pipeline-id. pipeline-id supports '%' wildcard for any-suffix matching, 'all' for running all pipelines and comma-delimited list of pipeline ids""" exitcode = 0 running = [] progress = {} def progress_cb(report): pid, count, success, *_, stats = report print('\x1b[%sA' % (1+len(running))) if pid not in progress: running.append(pid) progress[pid] = count, success for pid in running: count, success = progress[pid] if success is None: if count == 0: print('\x1b[2K%s: \x1b[31m%s\x1b[0m' % (pid, 'WAITING FOR OUTPUT')) else: print('\x1b[2K%s: \x1b[33mRUNNING, processed %s rows\x1b[0m' % (pid, count)) else: if success: print('\x1b[2K%s: \x1b[32mSUCCESS, processed %s rows\x1b[0m' % (pid, count)) else: print('\x1b[2K%s: \x1b[31mFAILURE, processed %s rows\x1b[0m' % (pid, count)) results = run_pipelines(pipeline_id, '.', use_cache, dirty, force, concurrency, verbose, progress_cb if not verbose else None, slave) if not slave: logging.info('RESULTS:') errd = False for result in results: stats = user_facing_stats(result.stats) errd = errd or result.errors or not result.success logging.info('%s: %s %s%s', 'SUCCESS' if result.success else 'FAILURE', result.pipeline_id, repr(stats) if stats is not None else '', ( '\nERROR log from processor %s:\n+--------\n| ' % result.errors[0] + '\n| '.join(result.errors[1:]) + '\n+--------' ) if result.errors else '') else: result_obj = [] errd = False for result in results: errd = errd or result.errors or not result.success stats = user_facing_stats(result.stats) result_obj.append(dict( success=result.success, pipeline_id=result.pipeline_id, stats=result.stats, errors=result.errors )) json.dump(result_obj, sys.stderr) if errd: exitcode = 1 exit(exitcode)
0.0023
def temp_url(self, duration=120): """Returns a temporary URL for the given key.""" return self.bucket._boto_s3.meta.client.generate_presigned_url( 'get_object', Params={'Bucket': self.bucket.name, 'Key': self.name}, ExpiresIn=duration )
0.006757
def cache_finite_samples(f): '''Decorator to cache audio samples produced by the wrapped generator.''' cache = {} def wrap(*args): key = FRAME_RATE, args if key not in cache: cache[key] = [sample for sample in f(*args)] return (sample for sample in cache[key]) return wrap
0.035088
def is_parameterized(val: Any) -> bool: """Returns whether the object is parameterized with any Symbols. A value is parameterized when it has an `_is_parameterized_` method and that method returns a truthy value, or if the value is an instance of sympy.Basic. Returns: True if the gate has any unresolved Symbols and False otherwise. If no implementation of the magic method above exists or if that method returns NotImplemented, this will default to False. """ if isinstance(val, sympy.Basic): return True getter = getattr(val, '_is_parameterized_', None) result = NotImplemented if getter is None else getter() if result is not NotImplemented: return result else: return False
0.001279
def common_items_ratio(pronac, dt): """ Calculates the common items on projects in a cultural segment, calculates the uncommon items on projects in a cultural segment and verify if a project is an outlier compared to the other projects in his segment. """ segment_id = get_segment_id(str(pronac)) metrics = data.common_items_metrics.to_dict(orient='index')[segment_id] ratio = common_items_percentage(pronac, segment_common_items(segment_id)) # constant that defines the threshold to verify if a project # is an outlier. k = 1.5 threshold = metrics['mean'] - k * metrics['std'] uncommon_items = get_uncommon_items(pronac) pronac_filter = data.all_items['PRONAC'] == pronac uncommon_items_filter = ( data.all_items['idPlanilhaItens'] .isin(uncommon_items) ) items_filter = (pronac_filter & uncommon_items_filter) filtered_items = ( data .all_items[items_filter] .drop_duplicates(subset='idPlanilhaItens') ) uncommon_items = add_info_to_uncommon_items(filtered_items, uncommon_items) return { 'is_outlier': ratio < threshold, 'valor': ratio, 'maximo_esperado': metrics['mean'], 'desvio_padrao': metrics['std'], 'items_incomuns': uncommon_items, 'items_comuns_que_o_projeto_nao_possui': get_common_items_not_present(pronac), }
0.00142
def refresh_db(failhard=False, **kwargs): # pylint: disable=unused-argument ''' Updates the opkg database to latest packages based upon repositories Returns a dict, with the keys being package databases and the values being the result of the update attempt. Values can be one of the following: - ``True``: Database updated successfully - ``False``: Problem updating database failhard If False, return results of failed lines as ``False`` for the package database that encountered the error. If True, raise an error with a list of the package databases that encountered errors. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) ret = {} error_repos = [] cmd = ['opkg', 'update'] # opkg returns a non-zero retcode when there is a failure to refresh # from one or more repos. Due to this, ignore the retcode. call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False, ignore_retcode=True, redirect_stderr=True) out = call['stdout'] prev_line = '' for line in salt.utils.itertools.split(out, '\n'): if 'Inflating' in line: key = line.strip().split()[1][:-1] ret[key] = True elif 'Updated source' in line: # Use the previous line. key = prev_line.strip().split()[1][:-1] ret[key] = True elif 'Failed to download' in line: key = line.strip().split()[5].split(',')[0] ret[key] = False error_repos.append(key) prev_line = line if failhard and error_repos: raise CommandExecutionError( 'Error getting repos: {0}'.format(', '.join(error_repos)) ) # On a non-zero exit code where no failed repos were found, raise an # exception because this appears to be a different kind of error. if call['retcode'] != 0 and not error_repos: raise CommandExecutionError(out) return ret
0.000435
def _get_options_group(group=None): """Get a specific group of options which are allowed.""" #: These expect a hexidecimal keyid as their argument, and can be parsed #: with :func:`_is_hex`. hex_options = frozenset(['--check-sigs', '--default-key', '--default-recipient', '--delete-keys', '--delete-secret-keys', '--delete-secret-and-public-keys', '--desig-revoke', '--export', '--export-secret-keys', '--export-secret-subkeys', '--fingerprint', '--gen-revoke', '--hidden-encrypt-to', '--hidden-recipient', '--list-key', '--list-keys', '--list-public-keys', '--list-secret-keys', '--list-sigs', '--recipient', '--recv-keys', '--send-keys', '--edit-key', '--sign-key', ]) #: These options expect value which are left unchecked, though still run #: through :func:`_fix_unsafe`. unchecked_options = frozenset(['--list-options', '--passphrase-fd', '--status-fd', '--verify-options', '--command-fd', ]) #: These have their own parsers and don't really fit into a group other_options = frozenset(['--debug-level', '--keyserver', ]) #: These should have a directory for an argument dir_options = frozenset(['--homedir', ]) #: These expect a keyring or keyfile as their argument keyring_options = frozenset(['--keyring', '--primary-keyring', '--secret-keyring', '--trustdb-name', ]) #: These expect a filename (or the contents of a file as a string) or None #: (meaning that they read from stdin) file_or_none_options = frozenset(['--decrypt', '--decrypt-files', '--encrypt', '--encrypt-files', '--import', '--verify', '--verify-files', '--output', ]) #: These options expect a string. see :func:`_check_preferences`. pref_options = frozenset(['--digest-algo', '--cipher-algo', '--compress-algo', '--compression-algo', '--cert-digest-algo', '--personal-digest-prefs', '--personal-digest-preferences', '--personal-cipher-prefs', '--personal-cipher-preferences', '--personal-compress-prefs', '--personal-compress-preferences', '--pinentry-mode', '--print-md', '--trust-model', ]) #: These options expect no arguments none_options = frozenset(['--allow-loopback-pinentry', '--always-trust', '--armor', '--armour', '--batch', '--check-sigs', '--check-trustdb', '--clearsign', '--debug-all', '--default-recipient-self', '--detach-sign', '--export', '--export-ownertrust', '--export-secret-keys', '--export-secret-subkeys', '--fingerprint', '--fixed-list-mode', '--gen-key', '--import-ownertrust', '--list-config', '--list-key', '--list-keys', '--list-packets', '--list-public-keys', '--list-secret-keys', '--list-sigs', '--lock-multiple', '--lock-never', '--lock-once', '--no-default-keyring', '--no-default-recipient', '--no-emit-version', '--no-options', '--no-tty', '--no-use-agent', '--no-verbose', '--print-mds', '--quiet', '--sign', '--symmetric', '--throw-keyids', '--use-agent', '--verbose', '--version', '--with-colons', '--yes', ]) #: These options expect either None or a hex string hex_or_none_options = hex_options.intersection(none_options) allowed = hex_options.union(unchecked_options, other_options, dir_options, keyring_options, file_or_none_options, pref_options, none_options) if group and group in locals().keys(): return locals()[group]
0.000461
def generate_config(output_directory): """ Generate a dcm2nii configuration file that disable the interactive mode. """ if not op.isdir(output_directory): os.makedirs(output_directory) config_file = op.join(output_directory, "config.ini") open_file = open(config_file, "w") open_file.write("[BOOL]\nManualNIfTIConv=0\n") open_file.close() return config_file
0.002488
def transfer(self, user_id, amount, desc, client_ip=None, check_name='OPTION_CHECK', real_name=None, out_trade_no=None, device_info=None): """ 企业付款接口 :param user_id: 接受收红包的用户在公众号下的 openid :param amount: 付款金额,单位分 :param desc: 付款说明 :param client_ip: 可选,调用接口机器的 IP 地址 :param check_name: 可选,校验用户姓名选项, NO_CHECK:不校验真实姓名, FORCE_CHECK:强校验真实姓名(未实名认证的用户会校验失败,无法转账), OPTION_CHECK:针对已实名认证的用户才校验真实姓名(未实名认证用户不校验,可以转账成功), 默认为 OPTION_CHECK :param real_name: 可选,收款用户真实姓名, 如果check_name设置为FORCE_CHECK或OPTION_CHECK,则必填用户真实姓名 :param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成 :param device_info: 可选,微信支付分配的终端设备号 :return: 返回的结果信息 """ if not out_trade_no: now = datetime.now() out_trade_no = '{0}{1}{2}'.format( self.mch_id, now.strftime('%Y%m%d%H%M%S'), random.randint(1000, 10000) ) data = { 'mch_appid': self.appid, 'mchid': self.mch_id, 'device_info': device_info, 'partner_trade_no': out_trade_no, 'openid': user_id, 'check_name': check_name, 're_user_name': real_name, 'amount': amount, 'desc': desc, 'spbill_create_ip': client_ip or get_external_ip(), } return self._post('mmpaymkttransfers/promotion/transfers', data=data)
0.002497
def DetectGce(): """Determine whether or not we're running on GCE. This is based on: https://cloud.google.com/compute/docs/metadata#runninggce Returns: True iff we're running on a GCE instance. """ metadata_url = 'http://{}'.format( os.environ.get('GCE_METADATA_ROOT', 'metadata.google.internal')) try: o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open( urllib_request.Request( metadata_url, headers={'Metadata-Flavor': 'Google'})) except urllib_error.URLError: return False return (o.getcode() == http_client.OK and o.headers.get('metadata-flavor') == 'Google')
0.001445
def get_albums(self, *args, **kwargs): """Convenience method for `get_music_library_information` with ``search_type='albums'``. For details of other arguments, see `that method <#soco.music_library.MusicLibrary.get_music_library_information>`_. """ args = tuple(['albums'] + list(args)) return self.get_music_library_information(*args, **kwargs)
0.004975
def freeze(this): """ Snapshot current **hierarchy** and cache it into a new poco instance. This new poco instance is a copy from current poco instance (``self``). The hierarchy of the new poco instance is fixed and immutable. It will be super fast when calling ``dump`` function from frozen poco. See the example below. Examples: :: poco = Poco(...) frozen_poco = poco.freeze() hierarchy_dict = frozen_poco.agent.hierarchy.dump() # will return the already cached hierarchy data Returns: :py:class:`Poco <poco.pocofw.Poco>`: new poco instance copy from current poco instance (``self``) """ class FrozenPoco(Poco): def __init__(self, **kwargs): hierarchy_dict = this.agent.hierarchy.dump() hierarchy = create_immutable_hierarchy(hierarchy_dict) agent_ = PocoAgent(hierarchy, this.agent.input, this.agent.screen) kwargs['action_interval'] = 0.01 kwargs['pre_action_wait_for_appearance'] = 0 super(FrozenPoco, self).__init__(agent_, **kwargs) self.this = this def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def __getattr__(self, item): return getattr(self.this, item) return FrozenPoco()
0.006631
def options_handler(self, sock, cmd, opt): "Negotiate options" if cmd == NOP: self.sendcommand(NOP) elif cmd == WILL or cmd == WONT: if self.WILLACK.has_key(opt): self.sendcommand(self.WILLACK[opt], opt) else: self.sendcommand(DONT, opt) if cmd == WILL and opt == TTYPE: self.writecooked(IAC + SB + TTYPE + SEND + IAC + SE) elif cmd == DO or cmd == DONT: if self.DOACK.has_key(opt): self.sendcommand(self.DOACK[opt], opt) else: self.sendcommand(WONT, opt) if opt == ECHO: self.DOECHO = (cmd == DO) elif cmd == SE: subreq = self.read_sb_data() if subreq[0] == TTYPE and subreq[1] == IS: try: self.setterm(subreq[2:]) except: log.debug("Terminal type not known") elif subreq[0] == NAWS: self.setnaws(subreq[1:]) elif cmd == SB: pass else: log.debug("Unhandled option: %s %s" % (cmdtxt, opttxt, ))
0.004244
def is_connected(C, directed=True): r"""Return true, if the input count matrix is completely connected. Effectively checking if the number of connected components equals one. Parameters ---------- C : scipy.sparse matrix or numpy ndarray Count matrix specifying edge weights. directed : bool, optional Whether to compute connected components for a directed or undirected graph. Default is True. Returns ------- connected : boolean, returning true only if C is connected. """ nc = csgraph.connected_components(C, directed=directed, connection='strong', \ return_labels=False) return nc == 1
0.004274
def update(self, validate=False): """ Update the image's state information by making a call to fetch the current image attributes from the service. :type validate: bool :param validate: By default, if EC2 returns no data about the image the update method returns quietly. If the validate param is True, however, it will raise a ValueError exception if no data is returned from EC2. """ rs = self.connection.get_all_images([self.id]) if len(rs) > 0: img = rs[0] if img.id == self.id: self._update(img) elif validate: raise ValueError('%s is not a valid Image ID' % self.id) return self.state
0.002427
def compare_sentences(self, str_a, str_b, language): """Tokenize two input strings on sentence boundary and return a matrix of Levenshtein distance ratios. :param language: str (language name) :param string_a: str :param string_b: str :return: list [[Comparison]] """ sents_a = [] sents_b = [] ratios = [] # Make the latin tokenizer if language == "latin": sent_tokenizer = TokenizeSentence('latin') # Make the greek tokenizer elif language == "greek": sent_tokenizer = TokenizeSentence('greek') # Otherwise, if language, is unsupported, throw error stating accepted Language # values that may be used to tokenize sentences else: print("Language for sentence tokenization not recognized. " "Accepted values are 'latin' and 'greek'.") return # If class instance is set to stem words, do so if self.stem_words: stemmer = Stemmer() str_a = stemmer.stem(str_a) str_b = stemmer.stem(str_b) # Tokenize input strings sents_a = sent_tokenizer.tokenize_sentences(str_a) sents_b = sent_tokenizer.tokenize_sentences(str_b) # Process sentences for comparison (taking into account sanitization settings) sents_a = self._process_sentences(sents_a) sents_b = self._process_sentences(sents_b) # Build matrix of edit distance ratios comparisons = self._calculate_ratios(sents_a, sents_b) return comparisons
0.002463
def add_callback(self, user_gpio, edge=RISING_EDGE, func=None): """ Calls a user supplied function (a callback) whenever the specified gpio edge is detected. user_gpio:= 0-31. edge:= EITHER_EDGE, RISING_EDGE (default), or FALLING_EDGE. func:= user supplied callback function. The user supplied callback receives three parameters, the gpio, the level, and the tick. If a user callback is not specified a default tally callback is provided which simply counts edges. The count may be retrieved by calling the tally function. The callback may be cancelled by calling the cancel function. A gpio may have multiple callbacks (although I can't think of a reason to do so). ... def cbf(gpio, level, tick): print(gpio, level, tick) cb1 = pi.callback(22, pigpio.EITHER_EDGE, cbf) cb2 = pi.callback(4, pigpio.EITHER_EDGE) cb3 = pi.callback(17) print(cb3.tally()) cb1.cancel() # To cancel callback cb1. ... """ cb = Callback(self._notify, user_gpio, edge, func) yield from self._notify.append(cb) return cb
0.001623
def get_resources(self): """Gets the resource list resulting from a search. return: (osid.resource.ResourceList) - the resource list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.ResourceList(self._results, runtime=self._runtime)
0.00409
def copy(self, outpoint=None, stack_script=None, redeem_script=None, sequence=None): ''' TxIn -> TxIn ''' return TxIn( outpoint=outpoint if outpoint is not None else self.outpoint, stack_script=(stack_script if stack_script is not None else self.stack_script), redeem_script=(redeem_script if redeem_script is not None else self.redeem_script), sequence=sequence if sequence is not None else self.sequence)
0.005435
def forms_invalid(self, form, inlines): """ If the form or formsets are invalid, re-render the context data with the data-filled form and formsets and errors. """ return self.render_to_response(self.get_context_data(form=form, inlines=inlines))
0.014085
def _append_commands(dct, # type: typing.Dict[str, typing.Set[str]] module_name, # type: str commands # type:typing.Iterable[_EntryPoint] ): # type: (...) -> None """Append entry point strings representing the given Command objects. Args: dct: The dictionary to append with entry point strings. Each key will be a primary command with a value containing a list of entry point strings representing a Command. module_name: The name of the module in which the command object resides. commands: A list of Command objects to convert to entry point strings. """ for command in commands: entry_point = '{command}{subcommand} = {module}{callable}'.format( command=command.command, subcommand=(':{}'.format(command.subcommand) if command.subcommand else ''), module=module_name, callable=(':{}'.format(command.callable) if command.callable else ''), ) dct.setdefault(command.command, set()).add(entry_point)
0.00086