text
stringlengths
78
104k
score
float64
0
0.18
def getIRData(self): ''' Returns last LaserData. @return last JdeRobotTypes LaserData saved ''' if self.hasproxy(): self.lock.acquire() ir = self.ir self.lock.release() return ir return None
0.013559
def _strip_column_name(col_name, keep_paren_contents=True): """ Utility script applying several regexs to a string. Intended to be used by `strip_column_names`. This function will: 1. replace informative punctuation components with text 2. (optionally) remove text within parentheses 3. replace remaining punctuation/whitespace with _ 4. strip leading/trailing punctuation/whitespace Parameters ---------- col_name (str): input character string keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns -------- modified string for new field name Examples -------- > print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']]) """ # start with input new_col_name = col_name # replace meaningful punctuation with text equivalents # surround each with whitespace to enforce consistent use of _ punctuation_to_text = { '<=': 'le', '>=': 'ge', '=<': 'le', '=>': 'ge', '<': 'lt', '>': 'gt', '#': 'num' } for punctuation, punctuation_text in punctuation_to_text.items(): new_col_name = new_col_name.replace(punctuation, punctuation_text) # remove contents within () if not(keep_paren_contents): new_col_name = re.sub('\([^)]*\)', '', new_col_name) # replace remaining punctuation/whitespace with _ punct_pattern = '[\W_]+' punct_replacement = '_' new_col_name = re.sub(punct_pattern, punct_replacement, new_col_name) # remove leading/trailing _ if it exists (if last char was punctuation) new_col_name = new_col_name.strip("_") # TODO: check for empty string # return lower-case version of column name return new_col_name.lower()
0.00204
def parse_filename_meta(filename): """ taken from suvi code by vhsu Parse the metadata from a product filename, either L1b or l2. - file start - file end - platform - product :param filename: string filename of product :return: (start datetime, end datetime, platform) """ common_pattern = "_%s_%s" % ( "(?P<product>[a-zA-Z]{3}[a-zA-Z]?-[a-zA-Z0-9]{2}[a-zA-Z0-9]?-[a-zA-Z0-9]{4}[a-zA-Z0-9]?)", # product l1b, or l2 "(?P<platform>[gG][1-9]{2})" # patform, like g16 ) patterns = { # all patterns must have the common componennt "l2_pattern": re.compile("%s_s(?P<start>[0-9]{8}T[0-9]{6})Z_e(?P<end>[0-9]{8}T[0-9]{6})Z" % common_pattern), "l1b_pattern": re.compile('%s_s(?P<start>[0-9]{14})_e(?P<end>[0-9]{14})' % common_pattern), "dayfile_pattern": re.compile("%s_d(?P<start>[0-9]{8})" % common_pattern), "monthfile_pattern": re.compile("%s_m(?P<start>[0-9]{6})" % common_pattern), "yearfile_pattern": re.compile("%s_y(?P<start>[0-9]{4})" % common_pattern), } match, dt_start, dt_end = None, None, None for pat_type, pat in patterns.items(): match = pat.search(filename) if match is not None: if pat_type == "l2_pattern": # parse l2 dt_start = datetime.strptime(match.group("start"), '%Y%m%dT%H%M%S') dt_end = datetime.strptime(match.group("end"), '%Y%m%dT%H%M%S') elif pat_type == "l1b_pattern": # parse l1b dt_start = datetime.strptime(match.group("start"), '%Y%j%H%M%S%f') dt_end = datetime.strptime(match.group("end"), '%Y%j%H%M%S%f') elif pat_type == "dayfile_pattern": dt_start = datetime.strptime(match.group("start"), "%Y%m%d") dt_end = dt_start + timedelta(hours=24) elif pat_type == "monthfile_pattern": dt_start = datetime.strptime(match.group("start"), "%Y%m") dt_end = datetime(dt_start.year, dt_start.month + 1, 1) # will raise exception in December, fix when needed elif pat_type == "yearfile_pattern": dt_start = datetime.strptime(match.group("start"), "%Y") dt_end = datetime(dt_start.year + 1, 1, 1) break if match is None: if "NCEI" in filename and ".fits" in filename: dt_start = datetime.strptime("T".join(filename.split("_")[4:6]), "%Y%m%dT%H%M%S") dt_end = dt_start angstroms = int(filename.split("_")[2]) atom = "Fe" if angstroms != 304 else "He" product = "SUVI-L1b-{}{}".format(atom, angstroms) return filename, dt_start, dt_end, "g16", product else: # we didn't find any matching patterns... raise ValueError("Timestamps not detected in filename: %s" % filename) else: return filename, dt_start, dt_end, match.group("platform"), match.group("product")
0.005161
def devices(self): """Manages users enrolled u2f devices""" self.verify_integrity() if session.get('u2f_device_management_authorized', False): if request.method == 'GET': return jsonify(self.get_devices()), 200 elif request.method == 'DELETE': response = self.remove_device(request.json) if response['status'] == 'ok': return jsonify(response), 200 else: return jsonify(response), 404 return jsonify({'status': 'failed', 'error': 'Unauthorized!'}), 401
0.003247
def email_queue(): """Checks for emails, that fill up the queue without getting sent.""" status = SERVER_STATUS['OK'] count = Message.objects.exclude(priority=PRIORITY_DEFERRED).filter( when_added__lte=now() - timedelta(minutes=QUEUE_TIMEOUT)).count() if QUEUE_WARNING_THRESHOLD <= count < QUEUE_DANGER_THRESHOLD: status = SERVER_STATUS['WARNING'] if count >= QUEUE_DANGER_THRESHOLD: status = SERVER_STATUS['DANGER'] return { 'label': 'Queued Email', 'status': status, 'info': 'There are currently {0} messages in the mail queue.'.format( count) }
0.00157
def pack(self, value=None): """Pack the value as a binary representation. :attr:`data` is packed before the calling :meth:`.GenericMessage.pack`. After that, :attr:`data`'s value is restored. Returns: bytes: The binary representation. Raises: :exc:`~.exceptions.PackException`: If pack fails. """ if value is None: data_backup = None if self.data is not None and not isinstance(self.data, bytes): data_backup = self.data self.data = self.data.pack() packed = super().pack() if data_backup is not None: self.data = data_backup return packed elif isinstance(value, type(self)): return value.pack() else: msg = "{} is not an instance of {}".format(value, type(self).__name__) raise PackException(msg)
0.002008
def render_docstring(self): """make a nice docstring for ipython""" res = '{{{self.method}}} {self.uri} {self.title}\n'.format(self=self) if self.params: for group, params in self.params.items(): res += '\n' + group + ' params:\n' for param in params.values(): res += param.render_docstring() return res
0.005013
def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None): """ Use Gauss-Hermite Quadrature to compute E_p(f) [ log p(y|f) ] d/dm E_p(f) [ log p(y|f) ] d/dv E_p(f) [ log p(y|f) ] where p(f) is a Gaussian with mean m and variance v. The shapes of Y, m and v should match. if no gh_points are passed, we construct them using defualt options """ if gh_points is None: gh_x, gh_w = self._gh_points() else: gh_x, gh_w = gh_points shape = m.shape m,v,Y = m.flatten(), v.flatten(), Y.flatten() #make a grid of points X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + m[:,None] #evaluate the likelhood for the grid. First ax indexes the data (and mu, var) and the second indexes the grid. # broadcast needs to be handled carefully. logp = self.logpdf(X,Y[:,None], Y_metadata=Y_metadata) dlogp_dx = self.dlogpdf_df(X, Y[:,None], Y_metadata=Y_metadata) d2logp_dx2 = self.d2logpdf_df2(X, Y[:,None], Y_metadata=Y_metadata) #clipping for numerical stability #logp = np.clip(logp,-1e9,1e9) #dlogp_dx = np.clip(dlogp_dx,-1e9,1e9) #d2logp_dx2 = np.clip(d2logp_dx2,-1e9,1e9) #average over the gird to get derivatives of the Gaussian's parameters #division by pi comes from fact that for each quadrature we need to scale by 1/sqrt(pi) F = np.dot(logp, gh_w)/np.sqrt(np.pi) dF_dm = np.dot(dlogp_dx, gh_w)/np.sqrt(np.pi) dF_dv = np.dot(d2logp_dx2, gh_w)/np.sqrt(np.pi) dF_dv /= 2. if np.any(np.isnan(dF_dv)) or np.any(np.isinf(dF_dv)): stop if np.any(np.isnan(dF_dm)) or np.any(np.isinf(dF_dm)): stop if self.size: dF_dtheta = self.dlogpdf_dtheta(X, Y[:,None], Y_metadata=Y_metadata) # Ntheta x (orig size) x N_{quad_points} dF_dtheta = np.dot(dF_dtheta, gh_w)/np.sqrt(np.pi) dF_dtheta = dF_dtheta.reshape(self.size, shape[0], shape[1]) else: dF_dtheta = None # Not yet implemented return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape), dF_dtheta
0.012048
def change_ssh_port(): """ For security woven changes the default ssh port. """ host = normalize(env.host_string)[1] after = env.port before = str(env.DEFAULT_SSH_PORT) host_string=join_host_strings(env.user,host,before) with settings(host_string=host_string, user=env.user): if env.verbosity: print env.host, "CHANGING SSH PORT TO: "+str(after) sed('/etc/ssh/sshd_config','Port '+ str(before),'Port '+str(after),use_sudo=True) if env.verbosity: print env.host, "RESTARTING SSH on",after sudo('/etc/init.d/ssh restart') return True
0.020313
def comments(recid): """Display comments.""" from invenio_access.local_config import VIEWRESTRCOLL from invenio_access.mailcookie import \ mail_cookie_create_authorize_action from .api import check_user_can_view_comments auth_code, auth_msg = check_user_can_view_comments(current_user, recid) if auth_code and current_user.is_guest: cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, { 'collection': g.collection}) url_args = {'action': cookie, 'ln': g.ln, 'referer': request.referrer} flash(_("Authorization failure"), 'error') return redirect(url_for('webaccount.login', **url_args)) elif auth_code: flash(auth_msg, 'error') abort(401) # FIXME check restricted discussion comments = CmtRECORDCOMMENT.query.filter(db.and_( CmtRECORDCOMMENT.id_bibrec == recid, CmtRECORDCOMMENT.in_reply_to_id_cmtRECORDCOMMENT == 0, CmtRECORDCOMMENT.star_score == 0 )).order_by(CmtRECORDCOMMENT.date_creation).all() return render_template('comments/comments.html', comments=comments, option='comments')
0.000864
def has_rotational(self): """Return true if any of the drive is HDD""" for member in self._drives_list(): if member.media_type == constants.MEDIA_TYPE_HDD: return True return False
0.008621
def attach_tasks(self, tasks: Tasks): """Attach a set of tasks. A task cannot be scheduled or executed before it is attached to an Engine. >>> tasks = Tasks() >>> spin.attach_tasks(tasks) """ if tasks._spin is not None and tasks._spin is not self: logger.warning('Tasks already attached to a different Engine') self._tasks.update(tasks) tasks._spin = self
0.004535
def _prep_components(component_list: Sequence[str]) -> List[Tuple[str, Tuple[str]]]: """Transform component description strings into tuples of component paths and required arguments. Parameters ---------- component_list : The component descriptions to transform. Returns ------- List of component/argument tuples. """ components = [] for c in component_list: path, args_plus = c.split('(') cleaned_args = _clean_args(args_plus[:-1].split(','), path) components.append((path, cleaned_args)) return components
0.005128
def insert(self, stim, position): """Inserts a new stimulus into the list at the given position :param stim: stimulus to insert into protocol :type stim: :class:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel>` :param position: index (row) of location to insert to :type position: int """ if position == -1: position = self.rowCount() stim.setReferenceVoltage(self.caldb, self.calv) stim.setCalibration(self.calibrationVector, self.calibrationFrequencies, self.calibrationFrange) self._tests.insert(position, stim)
0.006494
def _verify(waiveredHdul): """ Verify that the input HDUList is for a waivered FITS file. Parameters: waiveredHdul HDUList object to be verified Returns: None Exceptions: ValueError Input HDUList is not for a waivered FITS file """ if len(waiveredHdul) == 2: # # There must be exactly 2 HDU's # if waiveredHdul[0].header['NAXIS'] > 0: # # The Primary HDU must have some data # if isinstance(waiveredHdul[1], fits.TableHDU): # # The Alternate HDU must be a TableHDU # if waiveredHdul[0].data.shape[0] == \ waiveredHdul[1].data.shape[0] or \ waiveredHdul[1].data.shape[0] == 1: # # The number of arrays in the Primary HDU must match # the number of rows in the TableHDU. This includes # the case where there is only a single array and row. # return # # Not a valid waivered Fits file # raise ValueError("Input object does not represent a valid waivered" + \ " FITS file")
0.002322
def read_bytes(fh, byteorder, dtype, count, offsetsize): """Read tag data from file and return as byte string.""" dtype = 'B' if dtype[-1] == 's' else byteorder+dtype[-1] count *= numpy.dtype(dtype).itemsize data = fh.read(count) if len(data) != count: log.warning('read_bytes: failed to read all bytes (%i < %i)', len(data), count) return data
0.002525
def process_host_check_result(self, host, status_code, plugin_output): """Process host check result Format of the line that triggers function call:: PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output> :param host: host to process check to :type host: alignak.objects.host.Host :param status_code: exit code of plugin :type status_code: int :param plugin_output: plugin output :type plugin_output: str :return: None TODO: say that check is PASSIVE """ now = time.time() cls = host.__class__ # If globally disabled OR host disabled, do not launch.. if not cls.accept_passive_checks or not host.passive_checks_enabled: return try: plugin_output = plugin_output.decode('utf8', 'ignore') logger.debug('%s > Passive host check plugin output: %s', host.get_full_name(), plugin_output) except AttributeError: # Python 3 will raise an exception pass except UnicodeError: pass # Maybe the check is just too old, if so, bail out! if self.current_timestamp < host.last_chk: logger.debug('%s > Passive host check is too old (%.2f seconds). ' 'Ignoring, check output: %s', host.get_full_name(), self.current_timestamp < host.last_chk, plugin_output) return chk = host.launch_check(now, self.hosts, self.services, self.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True) # We will not have a check if an host/service is checked but it has no defined check_command if not chk: return # Now we 'transform the check into a result' # So exit_status, output and status is eaten by the host chk.exit_status = status_code chk.get_outputs(plugin_output, host.max_plugins_output_length) chk.status = ACT_STATUS_WAIT_CONSUME chk.check_time = self.current_timestamp # we are using the external command timestamps # Set the corresponding host's check type to passive chk.set_type_passive() # self.daemon.nb_check_received += 1 self.send_an_element(chk) # Ok now this result will be read by the scheduler the next loop # raise a passive check log only if needed if self.my_conf.log_passive_checks: log_level = 'info' if status_code == 1: # DOWN log_level = 'error' if status_code == 2: # UNREACHABLE log_level = 'warning' self.send_an_element(make_monitoring_log( log_level, 'PASSIVE HOST CHECK: %s;%d;%s;%s;%s' % ( host.get_name(), status_code, chk.output, chk.long_output, chk.perf_data)))
0.002643
def log(message: str, *args: str, category: str='info', logger_name: str='pgevents'): """Log a message to the given logger. If debug has not been enabled, this method will not log a message. Parameters ---------- message: str Message, with or without formatters, to print. args: Any Arguments to use with the message. args must either be a series of arguments that match up with anonymous formatters (i.e. "%<FORMAT-CHARACTER>") in the format string, or a dictionary with key-value pairs that match up with named formatters (i.e. "%(key)s") in the format string. logger_name: str Name of logger to which the message should be logged. """ global _DEBUG_ENABLED if _DEBUG_ENABLED: level = logging.INFO else: level = logging.CRITICAL + 1 with _create_logger(logger_name, level) as logger: log_fn = getattr(logger, category, None) if log_fn is None: raise ValueError('Invalid log category "{}"'.format(category)) log_fn(message, *args)
0.005489
def create(self, section, article): """ Create (POST) an Article - See: Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/help_center/articles#create-article>`__. :param section: Section ID or object :param article: Article to create """ return CRUDRequest(self).post(article, create=True, id=section)
0.005319
def get_version(): """Extracts the version number from the version.py file. """ VERSION_FILE = os.path.join(module_name, 'version.py') txt = open(VERSION_FILE).read() mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', txt, re.M) if mo: version = mo.group(1) bs_version = os.environ.get('MODULEVER', '0.0') assert bs_version == "0.0" or bs_version == version, \ "Version {} specified by the build system doesn't match {} in " \ "version.py".format(bs_version, version) return version else: raise RuntimeError('Unable to find version string in {0}.' .format(VERSION_FILE))
0.001445
def toxml(self): """ Exports this object into a LEMS XML object """ return '<Tunnel name="{0}"'.format(self.name) + \ ' endA="{0}"'.format(self.end_a) + \ ' endB="{0}"'.format(self.end_b) + \ ' componentA="{0}"'.format(self.component_a) + \ ' componentB="{0}"'.format(self.component_b) + '/>'
0.005195
def mock(self, tst_name, tst_type): """ Mock appearance/disappearance to force changes in interface , via setting local cache. :param tst_name: :param tst_type: :return: """ print(" -> Mock {tst_name} appear".format(**locals())) # Service appears self.available[tst_name] = tst_type yield # Service disappear self.available.pop(tst_name) print(" -> Mock {tst_name} disappear".format(**locals()))
0.006012
def get_controller(self, state): ''' Retrieves the actual controller name from the application Specific to Pecan (not available in the request object) ''' path = state.request.pecan['routing_path'].split('/')[1:] return state.controller.__str__().split()[2]
0.006557
def spectrum(self, ref=None, segmentLengthMultiplier=1, mode=None, **kwargs): """ analyses the source to generate the linear spectrum. :param ref: the reference value for dB purposes. :param segmentLengthMultiplier: allow for increased resolution. :param mode: cq or none. :return: f : ndarray Array of sample frequencies. Pxx : ndarray linear spectrum. """ def analysisFunc(x, nperseg, **kwargs): f, Pxx_spec = signal.welch(self.samples, self.fs, nperseg=nperseg, scaling='spectrum', detrend=False, **kwargs) Pxx_spec = np.sqrt(Pxx_spec) # it seems a 3dB adjustment is required to account for the change in nperseg if x > 0: Pxx_spec = Pxx_spec / (10 ** ((3 * x) / 20)) if ref is not None: Pxx_spec = librosa.amplitude_to_db(Pxx_spec, ref) return f, Pxx_spec if mode == 'cq': return self._cq(analysisFunc, segmentLengthMultiplier) else: return analysisFunc(0, self.getSegmentLength() * segmentLengthMultiplier, **kwargs)
0.004102
def _add(self, name, *args, **kw): """ Add an argument to the underlying parser and grow the list .all_arguments and the set .names """ argname = list(self.argdict)[self._argno] if argname != name: raise NameError( 'Setting argument %s, but it should be %s' % (name, argname)) self._group.add_argument(*args, **kw) self.all_arguments.append((args, kw)) self.names.append(name) self._argno += 1
0.003992
def _sub(cmd, *sections): """Build Subcmd instance.""" cmd_func = cmd if isfunction(cmd) else cmd.cmd return Subcmd(baredoc(cmd), *sections, func=cmd_func)
0.005988
def create(clients_num, clients_host, clients_port, people_num, throttle): """ Prepare clients to execute :return: Modules to execute, cmd line function :rtype: list[WrapperClient], (str, object) -> str | None """ res = [] for number in range(clients_num): sc = EchoClient({ 'id': number, 'listen_bind_ip': clients_host, #'multicast_bind_ip': "127.0.0.1", 'listen_port': clients_port + number }) people = [] for person_number in range(people_num): people.append(Person(id=person_number)) wrapper = WrapperEchoClient({ 'client': sc, 'people': people, 'throttle': throttle }) res.append(wrapper) return res, cmd_line
0.0025
def cap(self): """ "Caps" the construction of the pipeline, signifying that no more inputs and outputs are expected to be added and therefore the input and output nodes can be created along with the provenance. """ to_cap = (self._inputnodes, self._outputnodes, self._prov) if to_cap == (None, None, None): self._inputnodes = { f: self._make_inputnode(f) for f in self.input_frequencies} self._outputnodes = { f: self._make_outputnode(f) for f in self.output_frequencies} self._prov = self._gen_prov() elif None in to_cap: raise ArcanaError( "If one of _inputnodes, _outputnodes or _prov is not None then" " they all should be in {}".format(self))
0.00243
def dicom_diff(file1, file2): """ Shows the fields that differ between two DICOM images. Inspired by https://code.google.com/p/pydicom/source/browse/source/dicom/examples/DicomDiff.py """ datasets = compressed_dicom.read_file(file1), compressed_dicom.read_file(file2) rep = [] for dataset in datasets: lines = (str(dataset.file_meta)+"\n"+str(dataset)).split('\n') lines = [line + '\n' for line in lines] # add the newline to the end rep.append(lines) diff = difflib.Differ() for line in diff.compare(rep[0], rep[1]): if (line[0] == '+') or (line[0] == '-'): sys.stdout.write(line)
0.004525
def reduce_lists(d): """Replace single item lists in a dictionary with the single item.""" for field in d: old_data = d[field] if len(old_data) == 1: d[field] = old_data[0]
0.004808
def wsdl2py(args=None): """Utility for automatically generating client/service interface code from a wsdl definition, and a set of classes representing element declarations and type definitions. By default invoking this script produces three files, each named after the wsdl definition name, in the current working directory. Generated Modules Suffix: _client.py -- client locator, rpc proxy port, messages _types.py -- typecodes representing _server.py -- server-side bindings Parameters: args -- optional can provide arguments, rather than parsing command-line. return: Default behavior is to return None, if args are provided then return names of the generated files. """ op = optparse.OptionParser(usage="USAGE: %wsdl2py [options] WSDL", description=wsdl2py.__doc__) # Basic options op.add_option("-x", "--schema", action="store_true", dest="schema", default=False, help="process just the schema from an xsd file [no services]") op.add_option("-d", "--debug", action="callback", callback=SetDebugCallback, help="debug output") # WS Options op.add_option("-a", "--address", action="store_true", dest="address", default=False, help="ws-addressing support, must include WS-Addressing schema.") # pyclass Metaclass op.add_option("-b", "--complexType", action="callback", callback=SetPyclassMetaclass, callback_kwargs={'module':'ZSI.generate.pyclass', 'metaclass':'pyclass_type'}, help="add convenience functions for complexTypes, including Getters, Setters, factory methods, and properties (via metaclass). *** DONT USE WITH --simple-naming ***") # Lazy Evaluation of Typecodes (done at serialization/parsing when needed). op.add_option("-l", "--lazy", action="callback", callback=SetUpLazyEvaluation, callback_kwargs={}, help="EXPERIMENTAL: recursion error solution, lazy evalution of typecodes") # Use Twisted op.add_option("-w", "--twisted", action="store_true", dest='twisted', default=False, help="generate a twisted.web client/server, dependencies python>=2.4, Twisted>=2.0.0, TwistedWeb>=0.5.0") op.add_option("-o", "--output-dir", action="store", dest="output_dir", default=".", type="string", help="save files in directory") op.add_option("-s", "--simple-naming", action="store_true", dest="simple_naming", default=False, help="map element names directly to python attributes") op.add_option("-p", "--pydoc", action="store_true", dest="pydoc", default=False, help="top-level directory for pydoc documentation.") is_cmdline = args is None if is_cmdline: (options, args) = op.parse_args() else: (options, args) = op.parse_args(args) if len(args) != 1: print>>sys.stderr, 'Expecting a file/url as argument (WSDL).' sys.exit(os.EX_USAGE) location = args[0] if options.schema is True: reader = XMLSchema.SchemaReader(base_url=location) else: reader = WSDLTools.WSDLReader() load = reader.loadFromFile if not isfile(location): load = reader.loadFromURL try: wsdl = load(location) except Exception, e: print >> sys.stderr, "Error loading %s: \n\t%s" % (location, e) traceback.print_exc(sys.stderr) # exit code UNIX specific, Windows? if hasattr(os, 'EX_NOINPUT'): sys.exit(os.EX_NOINPUT) sys.exit("error loading %s" %location) if isinstance(wsdl, XMLSchema.XMLSchema): wsdl.location = location files = _wsdl2py(options, wsdl) else: files = _wsdl2py(options, wsdl) files.append(_wsdl2dispatch(options, wsdl)) if getattr(options, 'pydoc', False): _writepydoc(os.path.join('docs', 'API'), *files) if is_cmdline: return return files
0.009998
def open_issue(self): """Changes the state of issue to 'open'. """ self.github_request.update(issue=self, state='open') self.state = 'open'
0.011696
def load_string(self, string_data, share_name, directory_name, file_name, **kwargs): """ Upload a string to Azure File Share. :param string_data: String to load. :type string_data: str :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param file_name: Name of the file. :type file_name: str :param kwargs: Optional keyword arguments that `FileService.create_file_from_text()` takes. :type kwargs: object """ self.connection.create_file_from_text(share_name, directory_name, file_name, string_data, **kwargs)
0.003942
def _modify_new_lines(code_to_modify, offset, code_to_insert): """ Update new lines: the bytecode inserted should be the last instruction of the previous line. :return: bytes sequence of code with updated lines offsets """ # There's a nice overview of co_lnotab in # https://github.com/python/cpython/blob/3.6/Objects/lnotab_notes.txt new_list = list(code_to_modify.co_lnotab) if not new_list: # Could happen on a lambda (in this case, a breakpoint in the lambda should fallback to # tracing). return None # As all numbers are relative, what we want is to hide the code we inserted in the previous line # (it should be the last thing right before we increment the line so that we have a line event # right after the inserted code). bytecode_delta = len(code_to_insert) byte_increments = code_to_modify.co_lnotab[0::2] line_increments = code_to_modify.co_lnotab[1::2] if offset == 0: new_list[0] += bytecode_delta else: addr = 0 it = zip(byte_increments, line_increments) for i, (byte_incr, _line_incr) in enumerate(it): addr += byte_incr if addr == offset: new_list[i * 2] += bytecode_delta break return bytes(new_list)
0.003834
def print_summary_stats(counter): """ Prints summary statistics about which writer strategies were used, and how much they were used. :param counter: A list of lists. The first entry on the inner list is a number count of how many times a WriterStrategy was used, and the second entry is a string label describing the WriterStrategy """ sum = 0 # Sum of labeled WriterStrategy modifications u_sum = 0 # Sum of unlabeled WriterStrategy modifications for c in counter: if c[1] is not None and c[0] > 0: print("Total " + c[1] + " modified:\t" + str(c[0])) sum += c[0] else: u_sum += c[0] if u_sum > 0: print("Unlabeled modifications: \t" + str(u_sum)) print("Total modifications: \t\t" + str(sum + u_sum))
0.001212
def _add_ttl_ns(self, line): """ takes one prefix line from the turtle file and binds the namespace to the class Args: line: the turtle prefix line string """ lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) lg.debug("line:\n%s", line) line = str(line).strip() # if the line is not a prefix line exit if line is None or line == 'none' or line == '' \ or not line.lower().startswith('@prefix'): return # parse the turtle line line = line.replace("@prefix","",1).strip() if line.endswith("."): line = line[:-1] prefix = line[:line.find(":")].strip() uri = self.clean_iri(line[line.find(":")+1:].strip()) # add the namespace to the class lg.debug("\nprefix: %s uri: %s", prefix, uri) self.bind(prefix, uri, override=False, calc=False)
0.004098
def parse(self, rrstr): # type: (bytes) -> None ''' Parse a Rock Ridge Time Stamp record out of a string. Parameters: rrstr - The string to parse the record out of. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('TF record already initialized!') # We assume that the caller has already checked the su_entry_version, # so we don't bother. (su_len, su_entry_version_unused, self.time_flags,) = struct.unpack_from('=BBB', rrstr[:5], 2) if su_len < 5: raise pycdlibexception.PyCdlibInvalidISO('Not enough bytes in the TF record') tflen = 7 if self.time_flags & (1 << 7): tflen = 17 offset = 5 for index, fieldname in enumerate(self.FIELDNAMES): if self.time_flags & (1 << index): if tflen == 7: setattr(self, fieldname, dates.DirectoryRecordDate()) elif tflen == 17: setattr(self, fieldname, dates.VolumeDescriptorDate()) getattr(self, fieldname).parse(rrstr[offset:offset + tflen]) offset += tflen self._initialized = True
0.004747
def scroll_cb(self, viewer, direction, amt, data_x, data_y): """Called when the user scrolls in the color bar viewer. Pan up or down to show additional bars. """ bd = viewer.get_bindings() direction = bd.get_direction(direction) pan_x, pan_y = viewer.get_pan()[:2] qty = self._cmsep * amt * self.settings.get('cbar_pan_accel', 1.0) if direction == 'up': pan_y -= qty else: pan_y += qty pan_y = min(max(pan_y, 0), self._max_y) viewer.set_pan(pan_x, pan_y)
0.003521
def list_attached_partitions(self, name=None, status=None): """ Return the partitions to which this storage group is currently attached, optionally filtered by partition name and status. Authorization requirements: * Object-access permission to this storage group. * Task permission to the "Configure Storage - System Programmer" task. Parameters: name (:term:`string`): Filter pattern (regular expression) to limit returned partitions to those that have a matching name. If `None`, no filtering for the partition name takes place. status (:term:`string`): Filter string to limit returned partitions to those that have a matching status. The value must be a valid partition status property value. If `None`, no filtering for the partition status takes place. Returns: List of :class:`~zhmcclient.Partition` objects representing the partitions to whivch this storage group is currently attached, with a minimal set of properties ('object-id', 'name', 'status'). Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ query_parms = [] if name is not None: self.manager._append_query_parms(query_parms, 'name', name) if status is not None: self.manager._append_query_parms(query_parms, 'status', status) query_parms_str = '&'.join(query_parms) if query_parms_str: query_parms_str = '?{}'.format(query_parms_str) uri = '{}/operations/get-partitions{}'.format( self.uri, query_parms_str) sg_cpc = self.cpc part_mgr = sg_cpc.partitions result = self.manager.session.get(uri) props_list = result['partitions'] part_list = [] for props in props_list: part = part_mgr.resource_object(props['object-uri'], props) part_list.append(part) return part_list
0.000934
def call(args): """ Call terminal command and return exit_code and stdout Parameters ---------- args : list A command and arguments list Returns ------- list : [exit_code, stdout] exit_code indicate the exit code of the command and stdout indicate the output of the command """ b = StringIO() p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) encoding = getattr(sys.stdout, 'encoding', None) or 'utf-8' # old python has bug in p.stdout, so the following little # hack is required. for stdout in iter(p.stdout.readline, ''): if len(stdout) == 0: break # translate non unicode to unicode stdout = force_unicode(stdout, encoding) # StringIO store unicode b.write(stdout) # stdout require non unicode sys.stdout.write(from_unicode(stdout, encoding)) sys.stdout.flush() buf = b.getvalue() p.stdout.close() return p.returncode or 0, buf
0.00278
def _extract_cookies(self, response: Response): '''Load the cookie headers from the Response.''' self._cookie_jar.extract_cookies( response, response.request, self._get_cookie_referrer_host() )
0.008734
def add_download(self, info, future): """ Hand off a download to the Downloads plugin, if it is present. Parameters ---------- info : `~ginga.misc.Bunch.Bunch` A bunch of information about the URI as returned by `ginga.util.iohelper.get_fileinfo()` future : `~ginga.misc.Future.Future` A future that represents the future computation to be performed after downloading the file. Resolving the future will trigger the computation. """ if self.gpmon.has_plugin('Downloads'): obj = self.gpmon.get_plugin('Downloads') self.gui_do(obj.add_download, info, future) else: self.show_error("Please activate the 'Downloads' plugin to" " enable download functionality")
0.002339
def wrap_with_try(self, node): """Wrap an ast node in a 'try' node to enter debug on exception.""" handlers = [] if self.ignore_exceptions is None: handlers.append(ast.ExceptHandler(type=None, name=None, body=[ast.Raise()])) else: ignores_nodes = self.ignore_exceptions handlers.append(ast.ExceptHandler(type=ast.Tuple(ignores_nodes, ast.Load()), name=None, body=[ast.Raise()])) if self.catch_exception is None or \ get_node_value(self.catch_exception) not in \ (get_node_value(ast_node) for ast_node in self.ignore_exceptions): call_extra_parameters = [] if IS_PYTHON_3 else [None, None] start_debug_cmd = ast.Expr( value=ast.Call(ast.Name("start_debugging", ast.Load()), [], [], *call_extra_parameters)) catch_exception_type = None if self.catch_exception is not None: catch_exception_type = self.catch_exception handlers.append(ast.ExceptHandler(type=catch_exception_type, name=None, body=[start_debug_cmd])) try_except_extra_params = {"finalbody": []} if IS_PYTHON_3 else {} new_node = self.ast_try_except(orelse=[], body=[node], handlers=handlers, **try_except_extra_params) return ast.copy_location(new_node, node)
0.001068
def open_window(self, widget, data=None): """ Function opens Main Window and in case of previously created project is switches to /home directory This is fix in case that da creats a project and project was deleted and GUI was not closed yet """ if data is not None: self.data = data os.chdir(os.path.expanduser('~')) self.kwargs = dict() self.main_win.show_all()
0.004396
def read_requirements_file(path): """ reads requirements.txt file and handles PyPI index URLs :param path: (str) path to requirements.txt file :return: (tuple of lists) """ last_pypi_url = None with open(path) as f: requires = [] pypi_urls = [] for line in f.readlines(): if not line: continue if '--' in line: match = re.match(r'--index-url\s+([\w\d:/.-]+)\s', line) if match: last_pypi_url = match.group(1) if not last_pypi_url.endswith("/"): last_pypi_url += "/" else: if last_pypi_url: pypi_urls.append(last_pypi_url + line.strip().lower()) requires.append(line) return requires, pypi_urls
0.001178
def stage_http_request(self, conn_id, version, url, target, method, headers, payload): """Log request HTTP information including url, headers, etc.""" if self.enabled and self.http_detail_level is not None and \ self.httplogger.isEnabledFor(logging.DEBUG): # pylint: disable=attribute-defined-outside-init # if Auth header, mask data if 'Authorization' in headers: authtype, cred = headers['Authorization'].split(' ') headers['Authorization'] = _format( "{0} {1}", authtype, 'X' * len(cred)) header_str = ' '.join('{0}:{1!r}'.format(k, v) for k, v in headers.items()) if self.http_detail_level == 'summary': upayload = "" elif isinstance(payload, six.binary_type): upayload = payload.decode('utf-8') else: upayload = payload if self.http_maxlen and (len(payload) > self.http_maxlen): upayload = upayload[:self.http_maxlen] + '...' self.httplogger.debug('Request:%s %s %s %s %s %s\n %s', conn_id, method, target, version, url, header_str, upayload)
0.002244
def write(self, vendor_id=None, log_type=None, json=None, **kwargs): """Write log records to the Logging Service. This API requires a JSON array in its request body, each element of which represents a single log record. Log records are provided as JSON objects. Every log record must include the primary timestamp field that you identified when you registered your app. Every log record must also identify the log type. Args: vendor_id (str): Vendor ID. log_type (str): Log type. json (list): Payload/request body. **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Refer to ``logging_write.py`` example. """ path = "/logging-service/v1/logs/{}/{}".format( vendor_id, log_type ) r = self._httpclient.request( method="POST", url=self.url, json=json, path=path, **kwargs ) return r
0.002625
def reconnect(self): """ Reconnect session with device. Args: None Returns: bool: True if reconnect succeeds, False if not. Raises: None """ if self._auth_method is "userpass": self._mgr = manager.connect(host=self._conn[0], port=self._conn[1], username=self._auth[0], password=self._auth[1], hostkey_verify=self._hostkey_verify) elif self._auth_method is "key": self._mgr = manager.connect(host=self._conn[0], port=self._conn[1], username=self._auth[0], key_filename=self._auth_key, hostkey_verify=self._hostkey_verify) else: raise ValueError("auth_method incorrect value.") self._mgr.timeout = 600 return True
0.001826
def creation_dates(self, sort=True): """ Return a list of (file_path, creation_date) tuples created from list of walked paths. :param sort: Bool, sorts file_paths on created_date from newest to oldest. :return: List of (file_path, created_date) tuples. """ if not sort: return pool_creation_date(self.filepaths) else: pcd = pool_creation_date(self.filepaths) pcd.sort(key=itemgetter(1), reverse=True) return pcd
0.007752
def instantiate(self, **extra_args): """ Instantiate the model """ input_block = self.input_block.instantiate() backbone = self.backbone.instantiate(**extra_args) return StochasticPolicyModel(input_block, backbone, extra_args['action_space'])
0.010909
def regression(): """ Run regression testing - lint and then run all tests. """ # HACK: Start using hitchbuildpy to get around this. Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run() storybook = _storybook({}).only_uninherited() #storybook.with_params(**{"python version": "2.7.10"})\ #.ordered_by_name().play() Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run() storybook.with_params(**{"python version": "3.5.0"}).ordered_by_name().play() lint()
0.014159
def compile(self, X, verbose=False): """method to validate and prepare data-dependent parameters Parameters --------- X : array-like Input dataset verbose : bool whether to show warnings Returns ------- None """ for term in self._terms: term.compile(X, verbose=False) if self.by is not None and self.by >= X.shape[1]: raise ValueError('by variable requires feature {}, '\ 'but X has only {} dimensions'\ .format(self.by, X.shape[1])) return self
0.006144
def __vCmdConnectCameras(self, args): '''ToDo: Validate the argument as a valid port''' if len(args) >= 1: self.WirelessPort = args[0] print ("Connecting to Cameras on %s" % self.WirelessPort) self.__vRegisterCameras()
0.01145
def new(self): # type: () -> None ''' A method to create a new UDF Terminating Descriptor. Parameters: None. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Terminating Descriptor already initialized') self.desc_tag = UDFTag() self.desc_tag.new(8) # FIXME: we should let the user set serial_number self._initialized = True
0.008316
def inverted(self): """Return the inverse of the transform.""" # This is a bit of hackery so that we can put a single "inverse" # function here. If we just made "self._inverse_type" point to the class # in question, it wouldn't be defined yet. This way, it's done at # at runtime and we avoid the definition problem. Hackish, but better # than repeating code everywhere or making a relatively complex # metaclass. inverse_type = globals()[self._inverse_type] return inverse_type(self._center_longitude, self._center_latitude, self._resolution)
0.004666
def _get_script_nflows(self): """ Write the submission script. Return (script, num_flows_in_batch) """ flows_torun = [f for f in self.flows if not f.all_ok] if not flows_torun: return "", 0 executable = [ 'export _LOG=%s' % self.log_file.path, 'date1=$(date +"%s")', 'echo Running abirun.py in batch mode > ${_LOG}', " ", ] app = executable.append # Build list of abirun commands and save the name of the log files. self.sched_logs, num_flows = [], len(flows_torun) for i, flow in enumerate(flows_torun): logfile = os.path.join(self.workdir, "log_" + os.path.basename(flow.workdir)) app("echo Starting flow %d/%d on: `date` >> ${LOG}" % (i+1, num_flows)) app("\nabirun.py %s scheduler > %s" % (flow.workdir, logfile)) app("echo Returning from abirun on `date` with retcode $? >> ${_LOG}") assert logfile not in self.sched_logs self.sched_logs.append(logfile) # Remove the batch pid_file and compute elapsed time. executable.extend([ " ", "# Remove batch pid file", 'rm %s' % self.batch_pidfile.path, " ", "# Compute elapsed time", 'date2=$(date +"%s")', 'diff=$(($date2-$date1))', 'echo $(($diff / 60)) minutes and $(($diff % 60)) seconds elapsed. >> ${_LOG}' ]) return self.qadapter.get_script_str( job_name=self.name, launch_dir=self.workdir, executable=executable, qout_path=self.qout_file.path, qerr_path=self.qerr_file.path, ), num_flows
0.003399
def update_file_status(self): """Update the status of all the files in the archive""" nfiles = len(self.cache.keys()) status_vect = np.zeros((6), int) sys.stdout.write("Updating status of %i files: " % nfiles) sys.stdout.flush() for i, key in enumerate(self.cache.keys()): if i % 200 == 0: sys.stdout.write('.') sys.stdout.flush() fhandle = self.cache[key] fhandle.check_status(self._base_path) fhandle.update_table_row(self._table, fhandle.key - 1) status_vect[fhandle.status] += 1 sys.stdout.write("!\n") sys.stdout.flush() sys.stdout.write("Summary:\n") sys.stdout.write(" no_file: %i\n" % status_vect[0]) sys.stdout.write(" expected: %i\n" % status_vect[1]) sys.stdout.write(" exists: %i\n" % status_vect[2]) sys.stdout.write(" missing: %i\n" % status_vect[3]) sys.stdout.write(" superseded: %i\n" % status_vect[4]) sys.stdout.write(" temp_removed: %i\n" % status_vect[5])
0.001791
def off(self): """Send an OFF message to device group.""" off_command = ExtendedSend(self._address, COMMAND_LIGHT_OFF_0X13_0X00, self._udata) off_command.set_checksum() self._send_method(off_command, self._off_message_received)
0.006079
def _right_join(left, right, left_key_fn, right_key_fn, join_fn=union_join): """ :param left: left iterable to be joined :param right: right iterable to be joined :param function left_key_fn: function that produces hashable value from left objects :param function right_key_fn: function that produces hashable value from right objects :param join_fn: function called on joined left and right iterable items to complete join :rtype: list """ def reversed_join_fn(left_ele, right_ele): return join_fn(right_ele, left_ele) return _left_join(right, left, right_key_fn, left_key_fn, reversed_join_fn)
0.006221
def _convert(self, pos): """ For QPainter coordinate system, reflect over X axis and translate from center to top-left """ px = pos[0] + self.logical_size.width() / 2 py = self.logical_size.height() / 2 - pos[1] return px, py
0.007326
def preferred_width(self, cli, max_available_width): """ Report the width of the longest meta text as the preferred width of this control. It could be that we use less width, but this way, we're sure that the layout doesn't change when we select another completion (E.g. that completions are suddenly shown in more or fewer columns.) """ if cli.current_buffer.complete_state: state = cli.current_buffer.complete_state return 2 + max(get_cwidth(c.display_meta) for c in state.current_completions) else: return 0
0.006547
def refresh(self, *args, **kwargs): """ Updates/upgrades all system packages. """ r = self.local_renderer packager = self.packager if packager == APT: r.sudo('DEBIAN_FRONTEND=noninteractive apt-get -yq update --fix-missing') elif packager == YUM: raise NotImplementedError #return upgrade_yum(*args, **kwargs) else: raise Exception('Unknown packager: %s' % (packager,))
0.008316
def import_submodules(package_name): """ Import all submodules of a module. """ import importlib, pkgutil package = importlib.import_module(package_name) return { name: importlib.import_module(package_name + '.' + name) for _, name, _ in pkgutil.iter_modules(package.__path__) }
0.006211
def open_default_resource_manager(library): """This function returns a session to the Default Resource Manager resource. Corresponds to viOpenDefaultRM function of the VISA library. :param library: the visa library wrapped by ctypes. :return: Unique logical identifier to a Default Resource Manager session, return value of the library call. :rtype: session, :class:`pyvisa.constants.StatusCode` """ session = ViSession() ret = library.viOpenDefaultRM(byref(session)) return session.value, ret
0.00565
def format_epilog(self, ctx, formatter): """Writes the epilog into the formatter if it exists.""" if self.epilog: formatter.write_paragraph() with formatter.indentation(): formatter.write_text(self.epilog)
0.007663
def main(args=None): """Script entry point.""" if args is None: parser = get_argument_parser() args = parser.parse_args() #series_matrix_file = newstr(args.series_matrix_file, 'utf-8') #output_file = newstr(args.output_file, 'utf-8') #encoding = newstr(args.encoding, 'utf-8') series_matrix_file = args.series_matrix_file output_file = args.output_file encoding = args.encoding # log_file = args.log_file # quiet = args.quiet # verbose = args.verbose # logger = misc.get_logger(log_file = log_file, quiet = quiet, # verbose = verbose) accessions, titles, celfile_urls = read_series_matrix( series_matrix_file, encoding=encoding) write_sample_sheet(output_file, accessions, titles, celfile_urls) return 0
0.004975
def plot_connectivity_topos(self, fig=None): """ Plot scalp projections of the sources. This function only plots the topos. Use in combination with connectivity plotting. Parameters ---------- fig : {None, Figure object}, optional Where to plot the topos. f set to **None**, a new figure is created. Otherwise plot into the provided figure object. Returns ------- fig : Figure object Instance of the figure in which was plotted. """ self._prepare_plots(True, False) if self.plot_outside_topo: fig = self.plotting.plot_connectivity_topos('outside', self.topo_, self.mixmaps_, fig) elif self.plot_diagonal == 'topo': fig = self.plotting.plot_connectivity_topos('diagonal', self.topo_, self.mixmaps_, fig) return fig
0.006818
def bounding_square_polygon(self, inscribed_circle_radius_km=10.0): """ Returns a square polygon (bounding box) that circumscribes the circle having this geopoint as centre and having the specified radius in kilometers. The polygon's points calculation is based on theory exposed by: http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates by Jan Philip Matuschek, owner of the intellectual property of such material. In short: - locally to the geopoint, the Earth's surface is approximated to a sphere with radius = Earth's radius - the calculation works fine also when the bounding box contains the Earth's poles and the 180 deg meridian :param inscribed_circle_radius_km: the radius of the inscribed circle, defaults to 10 kms :type inscribed_circle_radius_km: int or float :return: a `pyowm.utils.geo.Polygon` instance """ assert isinstance(inscribed_circle_radius_km, int) or isinstance(inscribed_circle_radius_km, float) assert inscribed_circle_radius_km > 0., 'Radius must be greater than zero' # turn metric distance to radians on the approximated local sphere rad_distance = float(inscribed_circle_radius_km) / EARTH_RADIUS_KM # calculating min/max lat for bounding box bb_min_lat_deg = self.lat * math.pi/180. - rad_distance bb_max_lat_deg = self.lat * math.pi/180. + rad_distance # now checking for poles... if bb_min_lat_deg > math.radians(-90) and bb_max_lat_deg < math.radians(90): # no poles in the bounding box delta_lon = math.asin(math.sin(rad_distance) / math.cos(math.radians(self.lat))) bb_min_lon_deg = math.radians(self.lon) - delta_lon if bb_min_lon_deg < math.radians(-180): bb_min_lon_deg += 2 * math.pi bb_max_lon_deg = math.radians(self.lon) + delta_lon if bb_max_lon_deg > math.radians(180): bb_max_lon_deg -= 2 * math.pi else: # a pole is contained in the bounding box bb_min_lat_deg = max(bb_min_lat_deg, math.radians(-90)) bb_max_lat_deg = min(bb_max_lat_deg, math.radians(90)) bb_min_lon_deg = math.radians(-180) bb_max_lon_deg = math.radians(180) # turn back from radians to decimal bb_min_lat = bb_min_lat_deg * 180./math.pi bb_max_lat = bb_max_lat_deg * 180./math.pi bb_min_lon = bb_min_lon_deg * 180./math.pi bb_max_lon = bb_max_lon_deg * 180./math.pi return Polygon([[ [bb_min_lon, bb_max_lat], [bb_max_lon, bb_max_lat], [bb_max_lon, bb_min_lat], [bb_min_lon, bb_min_lat], [bb_min_lon, bb_max_lat] ]])
0.004646
def get_ticket(self, ticket_id): """Fetches the ticket for the given ticket ID""" url = 'tickets/%d' % ticket_id ticket = self._api._get(url) return Ticket(**ticket)
0.010152
def assign_asset_to_repository(self, asset_id, repository_id): """Adds an existing ``Asset`` to a ``Repository``. arg: asset_id (osid.id.Id): the ``Id`` of the ``Asset`` arg: repository_id (osid.id.Id): the ``Id`` of the ``Repository`` raise: AlreadyExists - ``asset_id`` already assigned to ``repository_id`` raise: NotFound - ``asset_id`` or ``repository_id`` not found raise: NullArgument - ``asset_id`` or ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin mgr = self._get_provider_manager('REPOSITORY', local=True) lookup_session = mgr.get_repository_lookup_session(proxy=self._proxy) lookup_session.get_repository(repository_id) # to raise NotFound self._assign_object_to_catalog(asset_id, repository_id)
0.00175
def get_single_file_info(f_path, int_path): """ Gets the creates and last change times for a single file, f_path is the path to the file on disk, int_path is an internal path relative to a root directory. """ return { 'path' : force_unicode(int_path), 'created' : os.path.getctime(f_path), 'last_mod' : os.path.getmtime(f_path)}
0.013298
def get_device(name, device_dict, loader, resource_dict): """Get a device from a device dictionary. :param name: name of the device :param device_dict: device dictionary :rtype: Device """ device = Device(name, device_dict.get('delimiter', ';').encode('utf-8')) device_dict = get_bases(device_dict, loader) err = device_dict.get('error', {}) device.add_error_handler(err) for itype, eom_dict in device_dict.get('eom', {}).items(): device.add_eom(itype, *_get_pair(eom_dict)) update_component(name, device, device_dict) for ch_name, ch_dict in device_dict.get('channels', {}).items(): device.add_channels(ch_name, get_channel(device, ch_name, ch_dict, loader, resource_dict)) return device
0.001232
def create_textinput(self, name, field, value, **extra_attrs): '''Generate and render a :class:`django.forms.widgets.TextInput` for a single year, month, or day input. If size is specified in the extra attributes, it will also be used to set the maximum length of the field. :param name: base name of the input field :param field: pattern for this field (used with name to generate input name) :param value: initial value for the field :param extra_attrs: any extra widget attributes :returns: rendered HTML output for the text input ''' # TODO: move id-generation logic out for re-use if 'id' in self.attrs: id_ = self.attrs['id'] else: id_ = 'id_%s' % name # use size to set maximum length if 'size' in extra_attrs: extra_attrs['maxlength'] = extra_attrs['size'] local_attrs = self.build_attrs(id=field % id_, **extra_attrs) txtinput = TextInput() return txtinput.render(field % name, value, local_attrs)
0.002755
def delete(self, *, if_unused=True): """ Delete the exchange. This method is a :ref:`coroutine <coroutine>`. :keyword bool if_unused: If true, the exchange will only be deleted if it has no queues bound to it. """ self.sender.send_ExchangeDelete(self.name, if_unused) yield from self.synchroniser.wait(spec.ExchangeDeleteOK) self.reader.ready()
0.004739
def refresh(self, document): """ Load a new copy of a document from the database. does not replace the old one """ try: old_cache_size = self.cache_size self.cache_size = 0 obj = self.query(type(document)).filter_by(mongo_id=document.mongo_id).one() finally: self.cache_size = old_cache_size self.cache_write(obj) return obj
0.034286
def copy(self): """Create a copy of this pen.""" pen = Pen() pen.__dict__ = self.__dict__.copy() return pen
0.014388
def log_trial(args): ''''get trial log path''' trial_id_path_dict = {} nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if not detect_process(rest_pid): print_error('Experiment is not running...') return running, response = check_rest_server_quick(rest_port) if running: response = rest_get(trial_jobs_url(rest_port), REST_TIME_OUT) if response and check_response(response): content = json.loads(response.text) for trial in content: trial_id_path_dict[trial['id']] = trial['logPath'] else: print_error('Restful server is not running...') exit(1) if args.id: if args.trial_id: if trial_id_path_dict.get(args.trial_id): print_normal('id:' + args.trial_id + ' path:' + trial_id_path_dict[args.trial_id]) else: print_error('trial id is not valid!') exit(1) else: print_error('please specific the trial id!') exit(1) else: for key in trial_id_path_dict: print('id:' + key + ' path:' + trial_id_path_dict[key])
0.001577
def random_filtered_sources(sources, srcfilter, seed): """ :param sources: a list of sources :param srcfilte: a SourceFilter instance :param seed: a random seed :returns: an empty list or a list with a single filtered source """ random.seed(seed) while sources: src = random.choice(sources) if srcfilter.get_close_sites(src) is not None: return [src] sources.remove(src) return []
0.002212
def resultcallback(self, replace=False): """Adds a result callback to the chain command. By default if a result callback is already registered this will chain them but this can be disabled with the `replace` parameter. The result callback is invoked with the return value of the subcommand (or the list of return values from all subcommands if chaining is enabled) as well as the parameters as they would be passed to the main callback. Example:: @click.group() @click.option('-i', '--input', default=23) def cli(input): return 42 @cli.resultcallback() def process_result(result, input): return result + input .. versionadded:: 3.0 :param replace: if set to `True` an already existing result callback will be removed. """ def decorator(f): old_callback = self.result_callback if old_callback is None or replace: self.result_callback = f return f def function(__value, *args, **kwargs): return f(old_callback(__value, *args, **kwargs), *args, **kwargs) self.result_callback = rv = update_wrapper(function, f) return rv return decorator
0.002152
def randstring(length=1): """ Generate a random string consisting of letters, digits and punctuation :type length: integer :param length: The length of the generated string. """ charstouse = string.ascii_letters + string.digits + string.punctuation newpass = '' for _ in range(length): newpass += str(charstouse[random.randint(0, len(charstouse) - 1)]) return newpass
0.002427
def _get_assignment_target_end(self, ast_module): """Returns position of 1st char after assignment traget. If there is no assignment, -1 is returned If there are more than one of any ( expressions or assigments) then a ValueError is raised. """ if len(ast_module.body) > 1: raise ValueError("More than one expression or assignment.") elif len(ast_module.body) > 0 and \ type(ast_module.body[0]) is ast.Assign: if len(ast_module.body[0].targets) != 1: raise ValueError("More than one assignment target.") else: return len(ast_module.body[0].targets[0].id) return -1
0.002786
def parse_tle(fileobj): """Parse a file of TLE satellite element sets. Builds an Earth satellite from each pair of adjacent lines in the file that start with "1 " and "2 " and have 69 or more characters each. If the preceding line is exactly 24 characters long, then it is parsed as the satellite's name. For each satellite found, yields a tuple `(names, sat)` giving the name(s) on the preceding line (or `None` if no name was found) and the satellite object itself. An exception is raised if the attempt to parse a pair of candidate lines as TLE elements fails. """ b0 = b1 = b'' for b2 in fileobj: if (b1.startswith(b'1 ') and len(b1) >= 69 and b2.startswith(b'2 ') and len(b2) >= 69): b0 = b0.rstrip(b'\n\r') if len(b0) == 24: # Celestrak name = b0.decode('ascii').rstrip() names = [name] elif b0.startswith(b'0 '): # Spacetrack 3-line format name = b0[2:].decode('ascii').rstrip() names = [name] else: name = None names = () line1 = b1.decode('ascii') line2 = b2.decode('ascii') sat = EarthSatellite(line1, line2, name) if name and ' (' in name: # Given a name like `ISS (ZARYA)` or `HTV-6 (KOUNOTORI # 6)`, also support lookup by the name inside or outside # the parentheses. short_name, secondary_name = name.split(' (') secondary_name = secondary_name.rstrip(')') names.append(short_name) names.append(secondary_name) yield names, sat b0 = b1 b1 = b2
0.001695
def c32encode(input_hex, min_length=None): """ >>> c32encode('a46ff88886c2ef9762d970b4d2c63678835bd39d') 'MHQZH246RBQSERPSE2TD5HHPF21NQMWX' >>> c32encode('') '' >>> c32encode('0000000000000000000000000000000000000000', 20) '00000000000000000000' >>> c32encode('0000000000000000000000000000000000000001', 20) '00000000000000000001' >>> c32encode('1000000000000000000000000000000000000001', 32) '20000000000000000000000000000001' >>> c32encode('1000000000000000000000000000000000000000', 32) '20000000000000000000000000000000' >>> c32encode('1') '1' >>> c32encode('22') '12' >>> c32encode('001') '01' >>> c32encode('0001') '01' >>> c32encode('00001') '001' >>> c32encode('000001') '001' >>> c32encode('10') 'G' >>> c32encode('100') '80' >>> c32encode('1000') '400' >>> c32encode('10000') '2000' >>> c32encode('100000') '10000' >>> c32encode('1000000') 'G0000' """ if len(input_hex) == 0: return '' if not re.match(r'^[0-9a-fA-F]+$', input_hex): raise ValueError('Requires a hex string') if len(input_hex) % 2 != 0: input_hex = '0{}'.format(input_hex) input_hex = input_hex.lower() res = [] carry = 0 for i in range(len(input_hex) - 1, -1, -1): if (carry < 4): current_code = HEX.index(input_hex[i]) >> carry next_code = 0 if i != 0: next_code = HEX.index(input_hex[i-1]) # carry = 0, next_bits is 1, carry = 1, next_bits = 2 next_bits = 1 + carry next_low_bits = (next_code % (1 << next_bits)) << (5 - next_bits) cur_c32_digit = C32[current_code + next_low_bits] carry = next_bits res = [cur_c32_digit] + res else: carry = 0 # fix padding # -- strip leading c32 zeros # -- add leading hex zeros c32_leading_zeros = 0 for i in range(0, len(res)): if res[i] != '0': break c32_leading_zeros += 1 res = res[c32_leading_zeros:] num_leading_hex_zeros = 0 num_leading_byte_zeros = 0 for i in range(0, len(input_hex)): if input_hex[i] != '0': break num_leading_hex_zeros += 1 num_leading_byte_zeros = num_leading_hex_zeros / 2 res = ['0'] * num_leading_byte_zeros + res if min_length > 0: count = min_length - len(res) if count > 0: res = ['0'] * count + res return ''.join(res)
0.000776
def stratSRS2(f, G, y0, tspan, Jmethod=Jkpw, dW=None, J=None): """Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm SRS2 to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t) where y is d-dimensional vector variable, f is a vector-valued function, G is a d x m matrix-valued function giving the noise coefficients and dW(t) is a vector of m independent Wiener increments. This algorithm is suitable for Stratonovich systems with an arbitrary noise coefficient matrix G (i.e. the noise does not need to be scalar, diagonal, or commutative). The algorithm has order 2.0 convergence for the deterministic part alone and order 1.0 strong convergence for the complete stochastic system. Args: f: A function f(y, t) returning an array of shape (d,) Vector-valued function to define the deterministic part of the system G: The d x m coefficient function G can be given in two different ways: You can provide a single function G(y, t) that returns an array of shape (d, m). In this case the entire matrix G() will be evaluated 2m+1 times at each time step so complexity grows quadratically with m. Alternatively you can provide a list of m functions g(y, t) each defining one column of G (each returning an array of shape (d,). In this case each g will be evaluated 3 times at each time step so complexity grows linearly with m. If your system has large m and G involves complicated functions, consider using this way. y0: array of shape (d,) giving the initial state vector y(t==0) tspan (array): The sequence of time points for which to solve for y. These must be equally spaced, e.g. np.arange(0,10,0.005) tspan[0] is the intial time corresponding to the initial state y0. Jmethod (callable, optional): which function to use to simulate repeated Stratonovich integrals. Here you can choose either sdeint.Jkpw (the default) or sdeint.Jwik (which is more accurate but uses a lot of memory in the current implementation). dW: optional array of shape (len(tspan)-1, d). J: optional array of shape (len(tspan)-1, m, m). These optional arguments dW and J are for advanced use, if you want to use a specific realization of the d independent Wiener processes and their multiple integrals at each time step. If not provided, suitable values will be generated randomly. Returns: y: array, with shape (len(tspan), len(y0)) With the initial value y0 in the first row Raises: SDEValueError See also: A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of Solutions of Stochastic Differential Equations """ return _Roessler2010_SRK2(f, G, y0, tspan, Jmethod, dW, J)
0.001032
def recv(self, stream, crc_mode=1, retry=16, timeout=60, delay=1, quiet=0): ''' Receive a stream via the XMODEM protocol. >>> stream = file('/etc/issue', 'wb') >>> print modem.recv(stream) 2342 Returns the number of bytes received on success or ``None`` in case of failure. ''' # initiate protocol error_count = 0 char = 0 cancel = 0 while True: # first try CRC mode, if this fails, # fall back to checksum mode if error_count >= retry: self.abort(timeout=timeout) return None elif crc_mode and error_count < (retry / 2): if not self.putc(CRC): time.sleep(delay) error_count += 1 else: crc_mode = 0 if not self.putc(NAK): time.sleep(delay) error_count += 1 char = self.getc(1, timeout) if not char: error_count += 1 continue elif char == SOH: #crc_mode = 0 break elif char == STX: break elif char == CAN: if cancel: return None else: cancel = 1 else: error_count += 1 # read data error_count = 0 income_size = 0 packet_size = 128 sequence = 1 cancel = 0 while True: while True: if char == SOH: packet_size = 128 break elif char == STX: packet_size = 1024 break elif char == EOT: # We received an EOT, so send an ACK and return the received # data length self.putc(ACK) return income_size elif char == CAN: # cancel at two consecutive cancels if cancel: return None else: cancel = 1 else: if not quiet: print >> sys.stderr, \ 'recv ERROR expected SOH/EOT, got', ord(char) error_count += 1 if error_count >= retry: self.abort() return None # read sequence error_count = 0 cancel = 0 seq1 = ord(self.getc(1)) seq2 = 0xff - ord(self.getc(1)) if seq1 == sequence and seq2 == sequence: # sequence is ok, read packet # packet_size + checksum data = self.getc(packet_size + 1 + crc_mode, timeout) if crc_mode: csum = (ord(data[-2]) << 8) + ord(data[-1]) data = data[:-2] log.debug('CRC (%04x <> %04x)' % \ (csum, self.calc_crc(data))) valid = csum == self.calc_crc(data) else: csum = data[-1] data = data[:-1] log.debug('checksum (checksum(%02x <> %02x)' % \ (ord(csum), self.calc_checksum(data))) valid = ord(csum) == self.calc_checksum(data) # valid data, append chunk if valid: income_size += len(data) stream.write(data) self.putc(ACK) sequence = (sequence + 1) % 0x100 char = self.getc(1, timeout) continue else: # consume data self.getc(packet_size + 1 + crc_mode) self.debug('expecting sequence %d, got %d/%d' % \ (sequence, seq1, seq2)) # something went wrong, request retransmission self.putc(NAK)
0.002396
def add_suffix(fullname, suffix): """ Add suffix to a full file name""" name, ext = os.path.splitext(fullname) return name + '_' + suffix + ext
0.006452
def worker(job): """Run a single download job.""" ret = False try: if job.full_url is not None: req = requests.get(job.full_url, stream=True) ret = save_and_check(req, job.local_file, job.expected_checksum) if not ret: return ret ret = create_symlink(job.local_file, job.symlink_path) except KeyboardInterrupt: # pragma: no cover # TODO: Actually test this once I figure out how to do this in py.test logging.debug("Ignoring keyboard interrupt.") return ret
0.001776
def _get_resources(rtype='resources'): """ resource types from state summary include: resources, used_resources offered_resources, reserved_resources, unreserved_resources The default is resources. :param rtype: the type of resources to return :type rtype: str :param role: the name of the role if for reserved and if None all reserved :type rtype: str :return: resources(cpu,mem) :rtype: Resources """ cpus = 0 mem = 0 summary = DCOSClient().get_state_summary() if 'slaves' in summary: agents = summary.get('slaves') for agent in agents: if agent[rtype].get('cpus') is not None: cpus += agent[rtype].get('cpus') if agent[rtype].get('mem') is not None: mem += agent[rtype].get('mem') return Resources(cpus, mem)
0.001179
def enable_runtime_notifications(self, resourceids): """Enable notification for specified resource ids""" idsarr = "" for ihcid in resourceids: idsarr += "<a:arrayItem>{id}</a:arrayItem>".format(id=ihcid) payload = """<enableRuntimeValueNotifications1 xmlns=\"utcs\" xmlns:a=\"http://www.w3.org/2001/XMLSchema\" xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"> {arr} </enableRuntimeValueNotifications1> """.format(arr=idsarr) xdoc = self.connection.soap_action('/ws/ResourceInteractionService', 'enableRuntimeValueNotifications', payload) if not xdoc: return False return True
0.002342
def find_last(fileobj, serial, finishing=False): """Find the last page of the stream 'serial'. If the file is not multiplexed this function is fast. If it is, it must read the whole the stream. This finds the last page in the actual file object, or the last page in the stream (with eos set), whichever comes first. If finishing is True it returns the last page which contains a packet finishing on it. If there exist pages but none with finishing packets returns None. Returns None in case no page with the serial exists. Raises error in case this isn't a valid ogg stream. Raises IOError. """ # For non-muxed streams, look at the last page. seek_end(fileobj, 256 * 256) data = fileobj.read() try: index = data.rindex(b"OggS") except ValueError: raise error("unable to find final Ogg header") bytesobj = cBytesIO(data[index:]) def is_valid(page): return not finishing or page.position != -1 best_page = None try: page = OggPage(bytesobj) except error: pass else: if page.serial == serial and is_valid(page): if page.last: return page else: best_page = page else: best_page = None # The stream is muxed, so use the slow way. fileobj.seek(0) try: page = OggPage(fileobj) while True: if page.serial == serial: if is_valid(page): best_page = page if page.last: break page = OggPage(fileobj) return best_page except error: return best_page except EOFError: return best_page
0.001024
def limit_pos(p, se_pos, nw_pos): """ Limits position p to stay inside containing state :param p: Position to limit :param se_pos: Bottom/Right boundary :param nw_pos: Top/Left boundary :return: """ if p > se_pos: _update(p, se_pos) elif p < nw_pos: _update(p, nw_pos)
0.005495
def set_inifile(self, current, previous): """Set the configobj to the current index of the files_lv This is a slot for the currentChanged signal :param current: the modelindex of a inifilesmodel that should be set for the configobj_treev :type current: QModelIndex :param previous: the previous selected index :type previous: QModelIndex :returns: None :raises: None """ c = self.inimodel.data(current, self.inimodel.confobjRole) self.confobjmodel = ConfigObjModel(c) self.configobj_treev.setModel(self.confobjmodel) self.configobj_treev.expandAll() self.confobjmodel.dataChanged.connect(self.iniedited)
0.004184
def fit_creatine(self, reject_outliers=3.0, fit_lb=2.7, fit_ub=3.5): """ Fit a model to the portion of the summed spectra containing the creatine and choline signals. Parameters ---------- reject_outliers : float or bool If set to a float, this is the z score threshold for rejection (on any of the parameters). If set to False, no outlier rejection fit_lb, fit_ub : float What part of the spectrum (in ppm) contains the creatine peak. Default (2.7, 3.5) Note ---- We use upper and lower bounds that are a variation on the bounds mentioned on the GANNET ISMRM2013 poster [1]_. [1] RAE Edden et al (2013). Gannet GABA analysis toolkit. ISMRM conference poster. """ # We fit a two-lorentz function to this entire chunk of the spectrum, # to catch both choline and creatine model, signal, params = ana.fit_two_lorentzian(self.sum_spectra, self.f_ppm, lb=fit_lb, ub=fit_ub) # Use an array of ones to index everything but the outliers and nans: ii = np.ones(signal.shape[0], dtype=bool) # Reject outliers: if reject_outliers: model, signal, params, ii = self._outlier_rejection(params, model, signal, ii) # We'll keep around a private attribute to tell us which transients # were good (this is for both creatine and choline): self._cr_transients = np.where(ii) # Now we separate choline and creatine params from each other (remember # that they both share offset and drift!): self.choline_params = params[:, (0,2,4,6,8,9)] self.creatine_params = params[:, (1,3,5,7,8,9)] self.cr_idx = ut.make_idx(self.f_ppm, fit_lb, fit_ub) # We'll need to generate the model predictions from these parameters, # because what we're holding in 'model' is for both together: self.choline_model = np.zeros((self.creatine_params.shape[0], np.abs(self.cr_idx.stop-self.cr_idx.start))) self.creatine_model = np.zeros((self.choline_params.shape[0], np.abs(self.cr_idx.stop-self.cr_idx.start))) for idx in range(self.creatine_params.shape[0]): self.creatine_model[idx] = ut.lorentzian(self.f_ppm[self.cr_idx], *self.creatine_params[idx]) self.choline_model[idx] = ut.lorentzian(self.f_ppm[self.cr_idx], *self.choline_params[idx]) self.creatine_signal = signal self.creatine_auc = self._calc_auc(ut.lorentzian, self.creatine_params, self.cr_idx) self.choline_auc = self._calc_auc(ut.lorentzian, self.choline_params, self.cr_idx)
0.00613
def task_property_present_predicate(service, task, prop): """ True if the json_element passed is present for the task specified. """ try: response = get_service_task(service, task) except Exception as e: pass return (response is not None) and (prop in response)
0.003356
def replace_cluster_role(self, name, body, **kwargs): """ replace the specified ClusterRole This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_cluster_role(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ClusterRole (required) :param V1ClusterRole body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1ClusterRole If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_cluster_role_with_http_info(name, body, **kwargs) else: (data) = self.replace_cluster_role_with_http_info(name, body, **kwargs) return data
0.003891
def train_set_producer(socket, train_archive, patch_archive, wnid_map): """Load/send images from the training set TAR file or patch images. Parameters ---------- socket : :class:`zmq.Socket` PUSH socket on which to send loaded images. train_archive : str or file-like object Filename or file handle for the TAR archive of training images. patch_archive : str or file-like object Filename or file handle for the TAR archive of patch images. wnid_map : dict A dictionary that maps WordNet IDs to 0-based class indices. Used to decode the filenames of the inner TAR files. """ patch_images = extract_patch_images(patch_archive, 'train') num_patched = 0 with tar_open(train_archive) as tar: for inner_tar_info in tar: with tar_open(tar.extractfile(inner_tar_info.name)) as inner: wnid = inner_tar_info.name.split('.')[0] class_index = wnid_map[wnid] filenames = sorted(info.name for info in inner if info.isfile()) images_gen = (load_from_tar_or_patch(inner, filename, patch_images) for filename in filenames) pathless_filenames = (os.path.split(fn)[-1] for fn in filenames) stream = equizip(pathless_filenames, images_gen) for image_fn, (image_data, patched) in stream: if patched: num_patched += 1 socket.send_pyobj((image_fn, class_index), zmq.SNDMORE) socket.send(image_data) if num_patched != len(patch_images): raise ValueError('not all patch images were used')
0.000545
async def unixlisten(path, onlink): ''' Start an PF_UNIX server listening on the given path. ''' info = {'path': path, 'unix': True} async def onconn(reader, writer): link = await Link.anit(reader, writer, info=info) link.schedCoro(onlink(link)) return await asyncio.start_unix_server(onconn, path=path)
0.005831
def pretty_tree(x, kids, show): """(a, (a -> list(a)), (a -> str)) -> str Returns a pseudographic tree representation of x similar to the tree command in Unix. """ (MID, END, CONT, LAST, ROOT) = (u'|-- ', u'`-- ', u'| ', u' ', u'') def rec(x, indent, sym): line = indent + sym + show(x) xs = kids(x) if len(xs) == 0: return line else: if sym == MID: next_indent = indent + CONT elif sym == ROOT: next_indent = indent + ROOT else: next_indent = indent + LAST syms = [MID] * (len(xs) - 1) + [END] lines = [rec(x, next_indent, sym) for x, sym in zip(xs, syms)] return u'\n'.join([line] + lines) return rec(x, u'', ROOT)
0.002451
def create(self, data): """ Create a temporary directory for the tar file that will be removed at the end of the operation. """ try: floyd_logger.info("Making create request to server...") post_body = data.to_dict() post_body["resumable"] = True response = self.request("POST", self.url, json=post_body) return response.json() except BadRequestException as e: if 'Dataset not found, ID' in e.message: floyd_logger.error( 'Data create: ERROR! Please run "floyd data init DATASET_NAME" before upload.') else: floyd_logger.error('Data create: ERROR! %s', e.message) return None except FloydException as e: floyd_logger.error("Data create: ERROR! %s", e.message) return None
0.003341
def compile_dictionary(self, lang, wordlists, encoding, output): """Compile user dictionary.""" cmd = [ self.binary, '--lang', lang, '--encoding', codecs.lookup(filters.PYTHON_ENCODING_NAMES.get(encoding, encoding).lower()).name, 'create', 'master', output ] wordlist = '' try: output_location = os.path.dirname(output) if not os.path.exists(output_location): os.makedirs(output_location) if os.path.exists(output): os.remove(output) self.log("Compiling Dictionary...", 1) # Read word lists and create a unique set of words words = set() for wordlist in wordlists: with open(wordlist, 'rb') as src: for word in src.read().split(b'\n'): words.add(word.replace(b'\r', b'')) # Compile wordlist against language util.call( [ self.binary, '--lang', lang, '--encoding=utf-8', 'create', 'master', output ], input_text=b'\n'.join(sorted(words)) + b'\n' ) except Exception: self.log(cmd, 0) self.log("Current wordlist: '%s'" % wordlist, 0) self.log("Problem compiling dictionary. Check the binary path and options.", 0) raise
0.002602