text
stringlengths
78
104k
score
float64
0
0.18
def get_region(): """Gets the AWS Region ID for this system :return: (str) AWS Region ID where this system lives """ log = logging.getLogger(mod_logger + '.get_region') # First get the availability zone availability_zone = get_availability_zone() if availability_zone is None: msg = 'Unable to determine the Availability Zone for this system, cannot determine the AWS Region' log.error(msg) return # Strip of the last character to get the region region = availability_zone[:-1] return region
0.003578
def query( self, query, job_config=None, job_id=None, job_id_prefix=None, location=None, project=None, retry=DEFAULT_RETRY, ): """Run a SQL query. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query Arguments: query (str): SQL query to be executed. Defaults to the standard SQL dialect. Use the ``job_config`` parameter to change dialects. Keyword Arguments: job_config (google.cloud.bigquery.job.QueryJobConfig): (Optional) Extra configuration options for the job. To override any options that were previously set in the ``default_query_job_config`` given to the ``Client`` constructor, manually set those options to ``None``, or whatever value is preferred. job_id (str): (Optional) ID to use for the query job. job_id_prefix (str): (Optional) The prefix to use for a randomly generated job ID. This parameter will be ignored if a ``job_id`` is also given. location (str): Location where to run the job. Must match the location of the any table used in the query as well as the destination table. project (str): Project ID of the project of where to run the job. Defaults to the client's project. retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: google.cloud.bigquery.job.QueryJob: A new query job instance. """ job_id = _make_job_id(job_id, job_id_prefix) if project is None: project = self.project if location is None: location = self.location if self._default_query_job_config: if job_config: # anything that's not defined on the incoming # that is in the default, # should be filled in with the default # the incoming therefore has precedence job_config = job_config._fill_from_default( self._default_query_job_config ) else: job_config = self._default_query_job_config job_ref = job._JobReference(job_id, project=project, location=location) query_job = job.QueryJob(job_ref, query, client=self, job_config=job_config) query_job._begin(retry=retry) return query_job
0.00152
def _extract_from_html(msg_body): """ Extract not quoted message from provided html message body using tags and plain text algorithm. Cut out first some encoding html tags such as xml and doctype for avoiding conflict with unicode decoding Cut out the 'blockquote', 'gmail_quote' tags. Cut Microsoft quotations. Then use plain text algorithm to cut out splitter or leftover quotation. This works by adding checkpoint text to all html tags, then converting html to text, then extracting quotations from text, then checking deleted checkpoints, then deleting necessary tags. """ if msg_body.strip() == b'': return msg_body msg_body = msg_body.replace(b'\r\n', b'\n') msg_body = re.sub(r"\<\?xml.+\?\>|\<\!DOCTYPE.+]\>", "", msg_body) html_tree = html_document_fromstring(msg_body) if html_tree is None: return msg_body cut_quotations = (html_quotations.cut_gmail_quote(html_tree) or html_quotations.cut_zimbra_quote(html_tree) or html_quotations.cut_blockquote(html_tree) or html_quotations.cut_microsoft_quote(html_tree) or html_quotations.cut_by_id(html_tree) or html_quotations.cut_from_block(html_tree) ) html_tree_copy = deepcopy(html_tree) number_of_checkpoints = html_quotations.add_checkpoint(html_tree, 0) quotation_checkpoints = [False] * number_of_checkpoints plain_text = html_tree_to_text(html_tree) plain_text = preprocess(plain_text, '\n', content_type='text/html') lines = plain_text.splitlines() # Don't process too long messages if len(lines) > MAX_LINES_COUNT: return msg_body # Collect checkpoints on each line line_checkpoints = [ [int(i[4:-4]) # Only checkpoint number for i in re.findall(html_quotations.CHECKPOINT_PATTERN, line)] for line in lines] # Remove checkpoints lines = [re.sub(html_quotations.CHECKPOINT_PATTERN, '', line) for line in lines] # Use plain text quotation extracting algorithm markers = mark_message_lines(lines) return_flags = [] process_marked_lines(lines, markers, return_flags) lines_were_deleted, first_deleted, last_deleted = return_flags if not lines_were_deleted and not cut_quotations: return msg_body if lines_were_deleted: #collect checkpoints from deleted lines for i in range(first_deleted, last_deleted): for checkpoint in line_checkpoints[i]: quotation_checkpoints[checkpoint] = True # Remove tags with quotation checkpoints html_quotations.delete_quotation_tags( html_tree_copy, 0, quotation_checkpoints ) if _readable_text_empty(html_tree_copy): return msg_body return html.tostring(html_tree_copy)
0.000681
def permalink(func): """ Decorator that calls app_reverse() Use this instead of standard django.db.models.permalink if you want to integrate the model through ApplicationContent. The wrapped function must return 4 instead of 3 arguments:: class MyModel(models.Model): @appmodels.permalink def get_absolute_url(self): return ('myapp.urls', 'model_detail', (), {'slug': self.slug}) """ def inner(*args, **kwargs): return app_reverse(*func(*args, **kwargs)) return wraps(func)(inner)
0.001761
def create_listening_socket(host, port, handler): """ Create socket and set listening options :param host: :param port: :param handler: :return: """ sock = socket.socket() sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((host, port)) sock.listen(1) GObject.io_add_watch(sock, GObject.IO_IN, handler) return sock
0.002591
def sendRobust( signal=Any, sender=Anonymous, *arguments, **named ): """Send signal from sender to all connected receivers catching errors signal -- (hashable) signal value, see connect for details sender -- the sender of the signal if Any, only receivers registered for Any will receive the message. if Anonymous, only receivers registered to receive messages from Anonymous or Any will receive the message Otherwise can be any python object (normally one registered with a connect if you actually want something to occur). arguments -- positional arguments which will be passed to *all* receivers. Note that this may raise TypeErrors if the receivers do not allow the particular arguments. Note also that arguments are applied before named arguments, so they should be used with care. named -- named arguments which will be filtered according to the parameters of the receivers to only provide those acceptable to the receiver. Return a list of tuple pairs [(receiver, response), ... ] if any receiver raises an error (specifically any subclass of Exception), the error instance is returned as the result for that receiver. """ # Call each receiver with whatever arguments it can accept. # Return a list of tuple pairs [(receiver, response), ... ]. responses = [] for receiver in liveReceivers(getAllReceivers(sender, signal)): try: response = robustApply( receiver, signal=signal, sender=sender, *arguments, **named ) except Exception as err: responses.append((receiver, err)) else: responses.append((receiver, response)) return responses
0.002654
def setGameScore(self, user_id, score, game_message_identifier, force=None, disable_edit_message=None): """ See: https://core.telegram.org/bots/api#setgamescore :param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText` """ p = _strip(locals(), more=['game_message_identifier']) p.update(_dismantle_message_identifier(game_message_identifier)) return self._api_request('setGameScore', _rectify(p))
0.009346
def p_queue(p): """ queue : QUEUE COLON LIFO | QUEUE COLON FIFO """ if p[3] == "LIFO": p[0] = {"queue": LIFO()} elif p[3] == "FIFO": p[0] = {"queue": FIFO()} else: raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
0.003425
def create_install_template_skin(self): """ Create an example ckan extension for this environment and install it """ ckan_extension_template(self.name, self.target) self.install_package_develop('ckanext-' + self.name + 'theme')
0.007491
def _delete_state(self, activity, agent, state_id=None, registration=None, etag=None): """Private method to delete a specified state from the LRS :param activity: Activity object of state to be deleted :type activity: :class:`tincan.activity.Activity` :param agent: Agent object of state to be deleted :type agent: :class:`tincan.agent.Agent` :param state_id: UUID of state to be deleted :type state_id: str | unicode :param registration: registration UUID of state to be deleted :type registration: str | unicode :param etag: etag of state to be deleted :type etag: str | unicode :return: LRS Response object with deleted state as content :rtype: :class:`tincan.lrs_response.LRSResponse` """ if not isinstance(activity, Activity): activity = Activity(activity) if not isinstance(agent, Agent): agent = Agent(agent) request = HTTPRequest( method="DELETE", resource="activities/state" ) if etag is not None: request.headers["If-Match"] = etag request.query_params = { "activityId": activity.id, "agent": agent.to_json(self.version) } if state_id is not None: request.query_params["stateId"] = state_id if registration is not None: request.query_params["registration"] = registration lrs_response = self._send_request(request) return lrs_response
0.001923
def start_watcher_thread(self): """ Start watcher thread. :return: Watcher thread object. """ # Create watcher thread watcher_thread = threading.Thread(target=self.run_watcher) # If the reload mode is `spawn_wait` if self._reload_mode == self.RELOAD_MODE_V_SPAWN_WAIT: # Use non-daemon thread daemon = False # If the reload mode is not `spawn_wait` else: # Use daemon thread daemon = True # Set whether the thread is daemon watcher_thread.setDaemon(daemon) # Start watcher thread watcher_thread.start() # Return watcher thread return watcher_thread
0.002695
def transmit(self, payload, **kwargs): """ Transmit content metadata items to the integrated channel. """ items_to_create, items_to_update, items_to_delete, transmission_map = self._partition_items(payload) self._transmit_delete(items_to_delete) self._transmit_create(items_to_create) self._transmit_update(items_to_update, transmission_map)
0.007557
def execute(self, kv, **kwargs): """ Execute the operation scheduling items as needed :param kv: An iterable of keys (or key-values, or Items) :param kwargs: Settings for the operation :return: A MultiResult object """ self._verify_iter(kv) if not len(kv): raise ArgumentError.pyexc(obj=kv, message="No items in container") if isinstance(kv, dict): is_dict = True try: kviter = kv.iteritems() except AttributeError: kviter = iter(kv.items()) else: is_dict = False kviter = iter(kv) is_itmcoll = isinstance(kv, ItemCollection) mres = kwargs.get('_MRES') if mres is None: mres = self.parent._make_mres() self.set_mres_flags(mres, kwargs) C.lcb_sched_enter(self.instance) num_items = 0 while True: # Clear the previous command object C.memset(self.c_command, 0, ffi.sizeof(self.c_command[0])) try: self._invoke_submit(kviter, is_dict, is_itmcoll, mres, kwargs) num_items += 1 except StopIteration: break except: C.lcb_sched_fail(self.instance) raise C.lcb_sched_leave(self.instance) mres._remaining += num_items # print "Execute(): mres:", mres return mres
0.002022
def letter2num(letters, zbase=False): """A = 1, C = 3 and so on. Convert spreadsheet style column enumeration to a number. Answers: A = 1, Z = 26, AA = 27, AZ = 52, ZZ = 702, AMJ = 1024 >>> from channelpack.pullxl import letter2num >>> letter2num('A') == 1 True >>> letter2num('Z') == 26 True >>> letter2num('AZ') == 52 True >>> letter2num('ZZ') == 702 True >>> letter2num('AMJ') == 1024 True >>> letter2num('AMJ', zbase=True) == 1023 True >>> letter2num('A', zbase=True) == 0 True """ letters = letters.upper() res = 0 weight = len(letters) - 1 assert weight >= 0, letters for i, c in enumerate(letters): assert 65 <= ord(c) <= 90, c # A-Z res += (ord(c) - 64) * 26**(weight - i) if not zbase: return res return res - 1
0.001166
def get(cls, attachment_public_uuid, custom_headers=None): """ Get a specific attachment's metadata through its UUID. The Content-Type header of the response will describe the MIME type of the attachment file. :type api_context: context.ApiContext :type attachment_public_uuid: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseAttachmentPublic """ if custom_headers is None: custom_headers = {} api_client = client.ApiClient(cls._get_api_context()) endpoint_url = cls._ENDPOINT_URL_READ.format(attachment_public_uuid) response_raw = api_client.get(endpoint_url, {}, custom_headers) return BunqResponseAttachmentPublic.cast_from_bunq_response( cls._from_json(response_raw, cls._OBJECT_TYPE_GET) )
0.002323
def to_python(self, value): """Convert value if needed.""" if isinstance(value, GroupDescriptor): value = value._value # pylint: disable=protected-access result = {} for name, field in self.fields.items(): result[name] = field.to_python(value.get(name, None)) return GroupDescriptor(result)
0.005602
def extractor(url): """Extract details from the response body.""" response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed) if clone: mirror(url, response) matches = rhref.findall(response) for link in matches: # Remove everything after a "#" to deal with in-page anchors link = link[1].replace('\'', '').replace('"', '').split('#')[0] # Checks if the URLs should be crawled if is_link(link, processed, files): if link[:4] == 'http': if link.startswith(main_url): verb('Internal page', link) internal.add(link) else: verb('External page', link) external.add(link) elif link[:2] == '//': if link.split('/')[2].startswith(host): verb('Internal page', link) internal.add(schema + '://' + link) else: verb('External page', link) external.add(link) elif link[:1] == '/': verb('Internal page', link) internal.add(remove_file(url) + link) else: verb('Internal page', link) usable_url = remove_file(url) if usable_url.endswith('/'): internal.add(usable_url + link) elif link.startswith('/'): internal.add(usable_url + link) else: internal.add(usable_url + '/' + link) if not only_urls: intel_extractor(url, response) js_extractor(response) if args.regex and not supress_regex: regxy(args.regex, response, supress_regex, custom) if api: matches = rentropy.findall(response) for match in matches: if entropy(match) >= 4: verb('Key', match) keys.add(url + ': ' + match)
0.000969
def create_log_entry(self, log_entry_form): """Creates a new ``LogEntry``. arg: log_entry_form (osid.logging.LogEntryForm): the form for this ``LogEntry`` return: (osid.logging.LogEntry) - the new ``LogEntry`` raise: IllegalState - ``log_entry_form`` already used in a create transaction raise: InvalidArgument - one or more of the form elements is invalid raise: NullArgument - ``log_entry_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``log_entry_form`` did not originate from ``get_log_entry_form_for_create()`` *compliance: mandatory -- This method must be implemented.* """ collection = JSONClientValidated('logging', collection='LogEntry', runtime=self._runtime) if not isinstance(log_entry_form, ABCLogEntryForm): raise errors.InvalidArgument('argument type is not an LogEntryForm') if log_entry_form.is_for_update(): raise errors.InvalidArgument('the LogEntryForm is for update only, not create') try: if self._forms[log_entry_form.get_id().get_identifier()] == CREATED: raise errors.IllegalState('log_entry_form already used in a create transaction') except KeyError: raise errors.Unsupported('log_entry_form did not originate from this session') if not log_entry_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') if 'timestamp' not in log_entry_form._my_map or log_entry_form._my_map['timestamp'] is None: log_entry_form._my_map['timestamp'] = DateTime.utcnow() log_entry_form._my_map['agentId'] = str(self.get_effective_agent_id()) insert_result = collection.insert_one(log_entry_form._my_map) self._forms[log_entry_form.get_id().get_identifier()] = CREATED result = objects.LogEntry( osid_object_map=collection.find_one({'_id': insert_result.inserted_id}), runtime=self._runtime, proxy=self._proxy) return result
0.004292
def org_set_member_access(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /org-xxxx/setMemberAccess API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FsetMemberAccess """ return DXHTTPRequest('/%s/setMemberAccess' % object_id, input_params, always_retry=always_retry, **kwargs)
0.01005
def register_db(cls, dbname): """Register method to keep list of dbs.""" def decorator(subclass): """Register as decorator function.""" cls._dbs[dbname] = subclass subclass.name = dbname return subclass return decorator
0.006873
def use_categories_as_metadata(self): ''' Returns a TermDocMatrix which is identical to self except the metadata values are now identical to the categories present. :return: TermDocMatrix ''' new_metadata_factory = CSRMatrixFactory() for i, category_idx in enumerate(self.get_category_ids()): new_metadata_factory[i, category_idx] = 1 new_metadata = new_metadata_factory.get_csr_matrix() new_tdm = self._make_new_term_doc_matrix(self._X, new_metadata, self._y, self._term_idx_store, self._category_idx_store, copy(self._category_idx_store), self._y == self._y) return new_tdm
0.004154
def pip_version_check(session): """Check for an update for pip. Limit the frequency of checks to once per week. State is stored either in the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix of the pip script path. """ import pip # imported here to prevent circular imports pypi_version = None try: state = load_selfcheck_statefile() current_time = datetime.datetime.utcnow() # Determine if we need to refresh the state if "last_check" in state.state and "pypi_version" in state.state: last_check = datetime.datetime.strptime( state.state["last_check"], SELFCHECK_DATE_FMT ) if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60: pypi_version = state.state["pypi_version"] # Refresh the version if we need to or just see if we need to warn if pypi_version is None: resp = session.get( PyPI.pip_json_url, headers={"Accept": "application/json"}, ) resp.raise_for_status() pypi_version = [ v for v in sorted( list(resp.json()["releases"]), key=packaging_version.parse, ) if not packaging_version.parse(v).is_prerelease ][-1] # save that we've performed a check state.save(pypi_version, current_time) pip_version = packaging_version.parse(pip.__version__) remote_version = packaging_version.parse(pypi_version) # Determine if our pypi_version is older if (pip_version < remote_version and pip_version.base_version != remote_version.base_version): # Advise "python -m pip" on Windows to avoid issues # with overwriting pip.exe. if WINDOWS: pip_cmd = "python -m pip" else: pip_cmd = "pip" logger.warning( "You are using pip version %s, however version %s is " "available.\nYou should consider upgrading via the " "'%s install --upgrade pip' command." % (pip.__version__, pypi_version, pip_cmd) ) except Exception: logger.debug( "There was an error checking the latest version of pip", exc_info=True, )
0.00039
def set_reviewing(self, hit_id, revert=None): """ Update a HIT with a status of Reviewable to have a status of Reviewing, or reverts a Reviewing HIT back to the Reviewable status. Only HITs with a status of Reviewable can be updated with a status of Reviewing. Similarly, only Reviewing HITs can be reverted back to a status of Reviewable. """ params = {'HITId' : hit_id,} if revert: params['Revert'] = revert return self._process_request('SetHITAsReviewing', params)
0.008897
def list_udas(self, database=None, like=None): """ Lists all UDAFs associated with a given database Parameters ---------- database : string like : string for searching (optional) """ if not database: database = self.current_database statement = ddl.ListFunction(database, like=like, aggregate=True) with self._execute(statement, results=True) as cur: result = self._get_udfs(cur, udf.ImpalaUDA) return result
0.003831
def cast(self, **kwargs): """Get the cast for a movie specified by id from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_id_path('cast') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
0.005848
def step(self, action): """ Run one frame of the NES and return the relevant observation data. Args: action (byte): the bitmap determining which buttons to press Returns: a tuple of: - state (np.ndarray): next frame as a result of the given action - reward (float) : amount of reward returned after given action - done (boolean): whether the episode has ended - info (dict): contains auxiliary diagnostic information """ # if the environment is done, raise an error if self.done: raise ValueError('cannot step in a done environment! call `reset`') # set the action on the controller self.controllers[0][:] = action # pass the action to the emulator as an unsigned byte _LIB.Step(self._env) # get the reward for this step reward = self._get_reward() # get the done flag for this step self.done = self._get_done() # get the info for this step info = self._get_info() # call the after step callback self._did_step(self.done) # bound the reward in [min, max] if reward < self.reward_range[0]: reward = self.reward_range[0] elif reward > self.reward_range[1]: reward = self.reward_range[1] # return the screen from the emulator and other relevant data return self.screen, reward, self.done, info
0.00134
def fit_quadrature(orth, nodes, weights, solves, retall=False, norms=None, **kws): """ Using spectral projection to create a polynomial approximation over distribution space. Args: orth (chaospy.poly.base.Poly): Orthogonal polynomial expansion. Must be orthogonal for the approximation to be accurate. nodes (numpy.ndarray): Where to evaluate the polynomial expansion and model to approximate. ``nodes.shape==(D,K)`` where ``D`` is the number of dimensions and ``K`` is the number of nodes. weights (numpy.ndarray): Weights when doing numerical integration. ``weights.shape == (K,)`` must hold. solves (numpy.ndarray): The model evaluation to approximate. If `numpy.ndarray` is provided, it must have ``len(solves) == K``. If callable, it must take a single argument X with ``len(X) == D``, and return a consistent numpy compatible shape. norms (numpy.ndarray): In the of TTR using coefficients to estimate the polynomial norm is more stable than manual calculation. Calculated using quadrature if no provided. ``norms.shape == (len(orth),)`` must hold. Returns: (chaospy.poly.base.Poly): Fitted model approximation in the form of an polynomial. """ orth = chaospy.poly.Poly(orth) nodes = numpy.asfarray(nodes) weights = numpy.asfarray(weights) if callable(solves): solves = [solves(node) for node in nodes.T] solves = numpy.asfarray(solves) shape = solves.shape solves = solves.reshape(weights.size, int(solves.size/weights.size)) ovals = orth(*nodes) vals1 = [(val*solves.T*weights).T for val in ovals] if norms is None: norms = numpy.sum(ovals**2*weights, -1) else: norms = numpy.array(norms).flatten() assert len(norms) == len(orth) coefs = (numpy.sum(vals1, 1).T/norms).T coefs = coefs.reshape(len(coefs), *shape[1:]) approx_model = chaospy.poly.transpose(chaospy.poly.sum(orth*coefs.T, -1)) if retall: return approx_model, coefs return approx_model
0.000902
def opp_stats(self, year): """Returns a Series (dict-like) of the team's opponent's stats from the team-season page. :year: Int representing the season. :returns: A Series of team stats. """ doc = self.get_year_doc(year) table = doc('table#team_stats') df = sportsref.utils.parse_table(table) return df.loc[df.player_id == 'Opp. Stats'].iloc[0]
0.004796
def apply(self, compound, orientation='', compound_port=''): """Arrange copies of a Compound as specified by the Pattern. Parameters ---------- compound orientation Returns ------- """ compounds = list() if self.orientations.get(orientation): for port in self.orientations[orientation]: new_compound = clone(compound) new_port = new_compound.labels[compound_port] (new_compound, new_port['up'], port['up']) compounds.append(new_compound) else: for point in self.points: new_compound = clone(compound) new_compound.translate(point) compounds.append(new_compound) return compounds
0.002454
def find(self, name=None, **attrs): r"""First descendant node matching criteria. Returns None if no descendant node found. :return: descendant node matching criteria :rtype: Union[None,TexExpr] >>> from TexSoup import TexSoup >>> soup = TexSoup(r''' ... \section{Ooo} ... \textit{eee} ... \textit{ooo}''') >>> soup.find('textit') \textit{eee} >>> soup.find('textbf') """ try: return next(self.find_all(name, **attrs)) except StopIteration: return None
0.003344
def _Moran_BV_Matrix_array(variables, w, permutations=0, varnames=None): """ Base calculation for MORAN_BV_Matrix """ if varnames is None: varnames = ['x{}'.format(i) for i in range(k)] k = len(variables) rk = list(range(0, k - 1)) results = {} for i in rk: for j in range(i + 1, k): y1 = variables[i] y2 = variables[j] results[i, j] = Moran_BV(y1, y2, w, permutations=permutations) results[j, i] = Moran_BV(y2, y1, w, permutations=permutations) results[i, j].varnames = {'x': varnames[i], 'y': varnames[j]} results[j, i].varnames = {'x': varnames[j], 'y': varnames[i]} return results
0.00141
def read_array(fname, sep=','): r""" Convert a CSV file without header into a numpy array of floats. >>> from openquake.baselib.general import gettemp >>> print(read_array(gettemp('.1 .2, .3 .4, .5 .6\n'))) [[[0.1 0.2] [0.3 0.4] [0.5 0.6]]] """ with open(fname) as f: records = [] for line in f: row = line.split(sep) record = [list(map(float, col.split())) for col in row] records.append(record) return numpy.array(records)
0.001905
def _findSwiplWin(): import re """ This function uses several heuristics to gues where SWI-Prolog is installed in Windows. It always returns None as the path of the resource file because, in Windows, the way to find it is more robust so the SWI-Prolog DLL is always able to find it. :returns: A tuple of (path to the swipl DLL, path to the resource file) :returns type: ({str, None}, {str, None}) """ dllNames = ('swipl.dll', 'libswipl.dll') # First try: check the usual installation path (this is faster but # hardcoded) programFiles = os.getenv('ProgramFiles') paths = [os.path.join(programFiles, r'pl\bin', dllName) for dllName in dllNames] for path in paths: if os.path.exists(path): return (path, None) # Second try: use the find_library path = _findSwiplPathFromFindLib() if path is not None and os.path.exists(path): return (path, None) # Third try: use reg.exe to find the installation path in the registry # (reg should be installed in all Windows XPs) try: cmd = Popen(['reg', 'query', r'HKEY_LOCAL_MACHINE\Software\SWI\Prolog', '/v', 'home'], stdout=PIPE) ret = cmd.communicate() # Result is like: # ! REG.EXE VERSION 3.0 # # HKEY_LOCAL_MACHINE\Software\SWI\Prolog # home REG_SZ C:\Program Files\pl # (Note: spaces may be \t or spaces in the output) ret = ret[0].splitlines() ret = [line.decode("utf-8") for line in ret if len(line) > 0] pattern = re.compile('[^h]*home[^R]*REG_SZ( |\t)*(.*)$') match = pattern.match(ret[-1]) if match is not None: path = match.group(2) paths = [os.path.join(path, 'bin', dllName) for dllName in dllNames] for path in paths: if os.path.exists(path): return (path, None) except OSError: # reg.exe not found? Weird... pass # May the exec is on path? (path, swiHome) = _findSwiplFromExec() if path is not None: return (path, swiHome) # Last try: maybe it is in the current dir for dllName in dllNames: if os.path.exists(dllName): return (dllName, None) return (None, None)
0.001687
def generateLatticeFile(self, beamline, filename=None, format='elegant'): """ generate simulation files for lattice analysis, e.g. ".lte" for elegant, ".madx" for madx input parameters: :param beamline: keyword for beamline :param filename: name of lte/mad file, if None, output to stdout; if 'sio', output to a string as return value; other cases, output to filename; :param format: madx, elegant, 'elegant' by default, generated lattice is for elegant tracking """ """ if not self.isBeamline(beamline): print("%s is a valid defined beamline, do not process." % (beamline)) return False """ if filename is None: f = sys.stdout elif filename == 'sio': f = StringIO() else: f = open(os.path.expanduser(filename), 'w') # write filehead, mainly resolving prefix string lines cl1 = "This file is automatically generated by 'generateLatticeFile()' method," cl2 = 'could be used as ' + format + ' lattice file.' cl3 = 'Author: Tong Zhang ([email protected])' cl4 = 'Generated Date: ' + time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime()) f.write('!{str1:<73s}!\n'.format(str1='-' * 73)) f.write('!{str1:^73s}!\n'.format(str1=cl1)) f.write('!{str1:^73s}!\n'.format(str1=cl2)) f.write('!{str1:^73s}!\n'.format(str1='-' * 24)) f.write('!{str1:^73s}!\n'.format(str1=cl3)) f.write('!{str1:^73s}!\n'.format(str1=cl4)) f.write('!{str1:<73s}!\n'.format(str1='-' * 73)) f.write('\n') """ do not need to dump stoed variables now, 2016-03-21 # write global variables f.write('! {str1:<73s}\n'.format(str1= 'Global variable definitions:')) f.write('\n'.join(self.all_elements['_prefixstr'])) f.write('\n') f.write('\n') """ # write EPICS control configuration part if contains '_epics' key if '_epics' in self.all_elements: f.write('! {str1:<73s}\n'.format(str1='EPICS control definitions:')) for k, v in self.all_elements['_epics'].items(): f.write('!!epics {k:<10s}:{v:>50s}\n'.format(k=k, v=json.dumps(v))) f.write('\n') # write element definitions and lattice f.write('! {str1:<72s}\n'.format(str1='Element definitions:')) elelist = self.getFullBeamline(beamline, extend=True) if self.getElementType(elelist[0]) != 'CHARGE': elelist.insert(0, self.getChargeElement()) for ele in sorted(set(elelist)): elestring = self.rinseElement(ele)['name'] f.write(self.formatElement(elestring, format='elegant') + '\n') # write beamline lattice definition f.write('\n') f.write('! {str1:<72s}\n'.format(str1='Beamline definitions:')) f.write('{bl:<10s}: line = ({lattice})'.format(bl=beamline.upper(), lattice=', '.join(elelist))) if filename == 'sio': retval = f.getvalue() else: retval = True f.close() # if everything's OK, return True or string ('sio') mode return retval
0.002374
def is_active(self, key, *instances, **kwargs): """ Returns ``True`` if any of ``instances`` match an active switch. Otherwise returns ``False``. >>> operator.is_active('my_feature', request) #doctest: +SKIP """ try: default = kwargs.pop('default', False) # Check all parents for a disabled state parts = key.split(':') if len(parts) > 1: child_kwargs = kwargs.copy() child_kwargs['default'] = None result = self.is_active(':'.join(parts[:-1]), *instances, **child_kwargs) if result is False: return result elif result is True: default = result try: switch = self[key] except KeyError: # switch is not defined, defer to parent return default if switch.status == GLOBAL: return True elif switch.status == DISABLED: return False elif switch.status == INHERIT: return default conditions = switch.value # If no conditions are set, we inherit from parents if not conditions: return default instances = list(instances) if instances else [] instances.extend(self.context.values()) # check each switch to see if it can execute return_value = False for namespace, condition in conditions.iteritems(): condition_set = registry_by_namespace.get(namespace) if not condition_set: continue result = condition_set.has_active_condition(condition, instances) if result is False: return False elif result is True: return_value = True except: log.exception('Error checking if switch "%s" is active', key) return_value = False # there were no matching conditions, so it must not be enabled return return_value
0.001321
def max_width(self): """Get maximum width of progress bar :rtype: int :returns: Maximum column width of progress bar """ value, unit = float(self._width_str[:-1]), self._width_str[-1] ensure(unit in ["c", "%"], ValueError, "Width unit must be either 'c' or '%'") if unit == "c": ensure(value <= self.columns, ValueError, "Terminal only has {} columns, cannot draw " "bar of size {}.".format(self.columns, value)) retval = value else: # unit == "%" ensure(0 < value <= 100, ValueError, "value=={} does not satisfy 0 < value <= 100".format(value)) dec = value / 100 retval = dec * self.columns return floor(retval)
0.002436
def itemsize(obj, **opts): '''Return the item size of an object (in bytes). See function **basicsize** for a description of the options. ''' v = _typedefof(obj, **opts) if v: v = v.item return v
0.004219
def convert_response(check_response, project_id): """Computes a http status code and message `CheckResponse` The return value a tuple (code, message, api_key_is_bad) where code: is the http status code message: is the message to return api_key_is_bad: indicates that a given api_key is bad Args: check_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.CheckResponse`): the response from calling an api Returns: tuple(code, message, bool) """ if not check_response or not check_response.checkErrors: return _IS_OK # only check the first error for now, as per ESP theError = check_response.checkErrors[0] error_tuple = _CHECK_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN) if error_tuple[1].find(u'{') == -1: # no replacements needed: return error_tuple updated_msg = error_tuple[1].format(project_id=project_id, detail=theError.detail or u'') return error_tuple[0], updated_msg, error_tuple[2]
0.002941
def _handle_api(self, handler, handler_args, handler_kwargs): """ Handle call to subclasses and convert the output to an appropriate value """ try: status_code, return_value = handler(*handler_args, **handler_kwargs) except APIError as error: return error.send() web.ctx.status = _convert_http_status(status_code) return _api_convert_output(return_value)
0.009547
def compute_score(self): """Calculate the overall test score using the configuration.""" # LOGGER.info("Begin scoring") cases = self.get_configured_tests() | set(self.result.cases) scores = DataFrame({"score": 0.0, "max": 1.0}, index=sorted(cases)) self.result.setdefault("score", dict()) self.result["score"]["sections"] = list() # Calculate the scores for each test individually. for test, result in iteritems(self.result.cases): # LOGGER.info("Calculate score for test: '%s'.", test) # Test metric may be a dictionary for a parametrized test. metric = result["metric"] if hasattr(metric, "items"): result["score"] = test_score = dict() total = 0.0 for key, value in iteritems(metric): value = 1.0 - value total += value test_score[key] = value # For some reason there are parametrized tests without cases. if len(metric) == 0: metric = 0.0 else: metric = total / len(metric) else: metric = 1.0 - metric scores.at[test, "score"] = metric scores.loc[test, :] *= self.config["weights"].get(test, 1.0) score = 0.0 maximum = 0.0 # Calculate the scores for each section considering the individual test # case scores. for section_id, card in iteritems( self.config['cards']['scored']['sections'] ): # LOGGER.info("Calculate score for section: '%s'.", section_id) cases = card.get("cases", None) if cases is None: continue card_score = scores.loc[cases, "score"].sum() card_total = scores.loc[cases, "max"].sum() # Format results nicely to work immediately with Vega Bar Chart. section_score = {"section": section_id, "score": card_score / card_total} self.result["score"]["sections"].append(section_score) # Calculate the final score for the entire model. weight = card.get("weight", 1.0) score += card_score * weight maximum += card_total * weight self.result["score"]["total_score"] = score / maximum
0.000819
def list_keywords(self): ''' Return the list of keywords ''' names = [] try: for n in self.cur.execute("SELECT keyword FROM keyword;").fetchall(): # Strip out leading and trailing whitespaces (can be artifacts of old data) k = n[0].strip() if len(k): names.extend([k]) except: self.error("ERROR: cannot find database table 'keyword'") names = list(set(names)) # remove duplicates names = sorted(names, key=lambda s: s.lower()) return(names)
0.010204
def items(self): """ Return a copied list of the property names and values of this CIM instance. Each item in the returned list is a tuple of property name (in the original lexical case) and property value. The order of properties is preserved. """ return [(key, v.value) for key, v in self.properties.items()]
0.005319
def schedule_next_requests(self): """Schedules a request if available""" # TODO: While there is capacity, schedule a batch of redis requests. for req in self.next_requests(): self.crawler.engine.crawl(req, spider=self)
0.007874
def lint(exclude, skip_untracked, commit_only): # type: (List[str], bool, bool) -> None """ Lint python files. Args: exclude (list[str]): A list of glob string patterns to test against. If the file/path matches any of those patters, it will be filtered out. skip_untracked (bool): If set to **True** it will skip all files not tracked by git. commit_only (bool): Only lint files that are staged for commit. """ exclude = list(exclude) + conf.get('lint.exclude', []) runner = LintRunner(exclude, skip_untracked, commit_only) if not runner.run(): exit(1)
0.001511
def _get_split_tasks(args, split_fn, file_key, outfile_i=-1): """Split up input files and arguments, returning arguments for parallel processing. outfile_i specifies the location of the output file in the arguments to the processing function. Defaults to the last item in the list. """ split_args = [] combine_map = {} finished_map = collections.OrderedDict() extras = [] for data in args: out_final, out_parts = split_fn(data) for parts in out_parts: split_args.append([utils.deepish_copy(data)] + list(parts)) for part_file in [x[outfile_i] for x in out_parts]: combine_map[part_file] = out_final if len(out_parts) == 0: if out_final is not None: if out_final not in finished_map: data[file_key] = out_final finished_map[out_final] = [data] else: extras.append([data]) else: extras.append([data]) return split_args, combine_map, list(finished_map.values()), extras
0.001826
def _get_numeric_status(self, key): """Extract the numeric value from the statuses object.""" value = self._get_status(key) if value and any(i.isdigit() for i in value): return float(re.sub("[^0-9.]", "", value)) return None
0.007435
def cluster_del_slots(self, slot, *slots): """Set hash slots as unbound in receiving node.""" slots = (slot,) + slots if not all(isinstance(s, int) for s in slots): raise TypeError("All parameters must be of type int") fut = self.execute(b'CLUSTER', b'DELSLOTS', *slots) return wait_ok(fut)
0.005848
def percentage(columns, maximum=100, name=None): """ Creates the grammar for a Numeric (N) field storing a percentage and accepting only the specified number of characters. It is possible to set the maximum allowed value. By default this is 100 (for 100%), and if modified it is expected to be reduced, not increased. The three first digits will be for the integer value. The columns can't be lower than 3. :param columns: number of columns for this field :param maximum: maximum allowed value :param name: name for the field :return: grammar for the float numeric field """ if name is None: name = 'Percentage Field' if columns < 3: message = 'The values can not be lower than 3' raise pp.ParseException(message) field = basic.numeric_float(columns, 3) field.addParseAction(lambda v: _assert_is_percentage(v[0], maximum)) field.setName(name) return field
0.001041
def combined_credits(self, **kwargs): """ Get the combined (movie and TV) credits for a specific person id. To get the expanded details for each TV record, call the /credit method with the provided credit_id. This will provide details about which episode and/or season the credit is for. Args: language: (optional) ISO 639-1 code. append_to_response: (optional) Comma separated, any person method. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_id_path('combined_credits') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
0.00542
def read(self): '''Execute the expression and capture its output, similar to backticks or $() in the shell. This is a wrapper around run() which captures stdout, decodes it, trims it, and returns it directly.''' result = self.stdout_capture().run() stdout_str = decode_with_universal_newlines(result.stdout) return stdout_str.rstrip('\n')
0.005181
def kmeans_segmentation(image, k, kmask=None, mrf=0.1): """ K-means image segmentation that is a wrapper around `ants.atropos` ANTsR function: `kmeansSegmentation` Arguments --------- image : ANTsImage input image k : integer integer number of classes kmask : ANTsImage (optional) segment inside this mask mrf : scalar smoothness, higher is smoother Returns ------- ANTsImage Example ------- >>> import ants >>> fi = ants.image_read(ants.get_ants_data('r16'), 'float') >>> fi = ants.n3_bias_field_correction(fi, 2) >>> seg = ants.kmeans_segmentation(fi, 3) """ dim = image.dimension kmimage = utils.iMath(image, 'Normalize') if kmask is None: kmask = utils.get_mask(kmimage, 0.01, 1, cleanup=2) kmask = utils.iMath(kmask, 'FillHoles').threshold_image(1,2) nhood = 'x'.join(['1']*dim) mrf = '[%s,%s]' % (str(mrf), nhood) kmimage = atropos(a = kmimage, m = mrf, c = '[5,0]', i = 'kmeans[%s]'%(str(k)), x = kmask) kmimage['segmentation'] = kmimage['segmentation'].clone(image.pixeltype) return kmimage
0.0121
def _check_model_types(self, models): """ Check types of passed models for correctness and in case raise exception :rtype: set :returns: set of models that are valid for the class""" if not hasattr(models, "__iter__"): models = {models} if not all([isinstance(model, (AbstractStateModel, StateElementModel)) for model in models]): raise TypeError("The selection supports only models with base class AbstractStateModel or " "StateElementModel, see handed elements {0}".format(models)) return models if isinstance(models, set) else set(models)
0.009346
def _unparse_entry_record(self, entry): """ :type entry: Dict[string, List[string]] :param entry: Dictionary holding an entry """ for attr_type in sorted(entry.keys()): for attr_value in entry[attr_type]: self._unparse_attr(attr_type, attr_value)
0.006369
def current_ioloop(io_loop): ''' A context manager that will set the current ioloop to io_loop for the context ''' orig_loop = tornado.ioloop.IOLoop.current() io_loop.make_current() try: yield finally: orig_loop.make_current()
0.007407
def media_post(self, media_file, mime_type=None, description=None, focus=None): """ Post an image. `media_file` can either be image data or a file name. If image data is passed directly, the mime type has to be specified manually, otherwise, it is determined from the file name. `focus` should be a tuple of floats between -1 and 1, giving the x and y coordinates of the images focus point for cropping (with the origin being the images center). Throws a `MastodonIllegalArgumentError` if the mime type of the passed data or file can not be determined properly. Returns a `media dict`_. This contains the id that can be used in status_post to attach the media file to a toot. """ if mime_type is None and (isinstance(media_file, str) and os.path.isfile(media_file)): mime_type = guess_type(media_file) media_file = open(media_file, 'rb') elif isinstance(media_file, str) and os.path.isfile(media_file): media_file = open(media_file, 'rb') if mime_type is None: raise MastodonIllegalArgumentError('Could not determine mime type' ' or data passed directly ' 'without mime type.') random_suffix = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)) file_name = "mastodonpyupload_" + str(time.time()) + "_" + str(random_suffix) + mimetypes.guess_extension( mime_type) if focus != None: focus = str(focus[0]) + "," + str(focus[1]) media_file_description = (file_name, media_file, mime_type) return self.__api_request('POST', '/api/v1/media', files={'file': media_file_description}, params={'description': description, 'focus': focus})
0.004548
def add_function(self, func): """ Record line profiling information for the given Python function. """ try: # func_code does not exist in Python3 code = func.__code__ except AttributeError: import warnings warnings.warn("Could not extract a code object for the object %r" % (func,)) return if code not in self.code_map: self.code_map[code] = {} self.functions.append(func)
0.003831
def delete_user(self, user_id, **kwargs): # noqa: E501 """Delete a user. # noqa: E501 An endpoint for deleting a user. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/users/{user-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.delete_user(user_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str user_id: The ID of the user to be deleted. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.delete_user_with_http_info(user_id, **kwargs) # noqa: E501 else: (data) = self.delete_user_with_http_info(user_id, **kwargs) # noqa: E501 return data
0.001892
def filter_redundant(self, ids): """ Return all non-redundant ids from a list """ sids = set(ids) for id in ids: sids = sids.difference(self.ancestors(id, reflexive=False)) return sids
0.008197
def _create(self, **kwargs): '''Allow creation of draft policy and ability to publish a draft Draft policies only exist in 12.1.0 and greater versions of TMOS. But there must be a method to create a draft, then publish it. :raises: MissingRequiredCreationParameter ''' tmos_ver = self._meta_data['bigip']._meta_data['tmos_version'] legacy = kwargs.pop('legacy', False) publish = kwargs.pop('publish', False) self._filter_version_specific_options(tmos_ver, **kwargs) if LooseVersion(tmos_ver) < LooseVersion('12.1.0'): return super(Policy, self)._create(**kwargs) else: if legacy: return super(Policy, self)._create(legacy=True, **kwargs) else: if 'subPath' not in kwargs: msg = "The keyword 'subPath' must be specified when " \ "creating draft policy in TMOS versions >= 12.1.0. " \ "Try and specify subPath as 'Drafts'." raise MissingRequiredCreationParameter(msg) self = super(Policy, self)._create(**kwargs) if publish: self.publish() return self
0.001582
def main(): """Script that finds and runs flatc built from source.""" if len(sys.argv) < 2: sys.stderr.write('Usage: run_flatc.py flatbuffers_dir [flatc_args]\n') return 1 cwd = os.getcwd() flatc = '' flatbuffers_dir = sys.argv[1] for path in FLATC_SEARCH_PATHS: current = os.path.join(flatbuffers_dir, path, 'flatc' + EXECUTABLE_EXTENSION) if os.path.exists(current): flatc = current break if not flatc: sys.stderr.write('flatc not found\n') return 1 command = [flatc] + sys.argv[2:] return subprocess.call(command)
0.020101
def _makepretty(printout, stack): ''' Pretty print the stack trace and environment information for debugging those hard to reproduce user problems. :) ''' printout.write('======== Salt Debug Stack Trace =========\n') traceback.print_stack(stack, file=printout) printout.write('=========================================\n')
0.002849
def dataframe(self): """ Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string abbreviation of the team, such as 'DET'. """ fields_to_include = { 'abbreviation': self.abbreviation, 'assists': self.assists, 'blocks': self.blocks, 'defensive_rebounds': self.defensive_rebounds, 'field_goal_attempts': self.field_goal_attempts, 'field_goal_percentage': self.field_goal_percentage, 'field_goals': self.field_goals, 'free_throw_attempts': self.free_throw_attempts, 'free_throw_percentage': self.free_throw_percentage, 'free_throws': self.free_throws, 'games_played': self.games_played, 'minutes_played': self.minutes_played, 'name': self.name, 'offensive_rebounds': self.offensive_rebounds, 'opp_assists': self.opp_assists, 'opp_blocks': self.opp_blocks, 'opp_defensive_rebounds': self.opp_defensive_rebounds, 'opp_field_goal_attempts': self.opp_field_goal_attempts, 'opp_field_goal_percentage': self.opp_field_goal_percentage, 'opp_field_goals': self.opp_field_goals, 'opp_free_throw_attempts': self.opp_free_throw_attempts, 'opp_free_throw_percentage': self.opp_free_throw_percentage, 'opp_free_throws': self.opp_free_throws, 'opp_offensive_rebounds': self.opp_offensive_rebounds, 'opp_personal_fouls': self.opp_personal_fouls, 'opp_points': self.opp_points, 'opp_steals': self.opp_steals, 'opp_three_point_field_goal_attempts': self.opp_three_point_field_goal_attempts, 'opp_three_point_field_goal_percentage': self.opp_three_point_field_goal_percentage, 'opp_three_point_field_goals': self.opp_three_point_field_goals, 'opp_total_rebounds': self.opp_total_rebounds, 'opp_turnovers': self.opp_turnovers, 'opp_two_point_field_goal_attempts': self.opp_two_point_field_goal_attempts, 'opp_two_point_field_goal_percentage': self.opp_two_point_field_goal_percentage, 'opp_two_point_field_goals': self.opp_two_point_field_goals, 'personal_fouls': self.personal_fouls, 'points': self.points, 'rank': self.rank, 'steals': self.steals, 'three_point_field_goal_attempts': self.three_point_field_goal_attempts, 'three_point_field_goal_percentage': self.three_point_field_goal_percentage, 'three_point_field_goals': self.three_point_field_goals, 'total_rebounds': self.total_rebounds, 'turnovers': self.turnovers, 'two_point_field_goal_attempts': self.two_point_field_goal_attempts, 'two_point_field_goal_percentage': self.two_point_field_goal_percentage, 'two_point_field_goals': self.two_point_field_goals } return pd.DataFrame([fields_to_include], index=[self._abbreviation])
0.000615
def _FixedSizer(value_size): """Like _SimpleSizer except for a fixed-size field. The input is the size of one value.""" def SpecificSizer(field_number, is_repeated, is_packed): tag_size = _TagSize(field_number) if is_packed: local_VarintSize = _VarintSize def PackedFieldSize(value): result = len(value) * value_size return result + local_VarintSize(result) + tag_size return PackedFieldSize elif is_repeated: element_size = value_size + tag_size def RepeatedFieldSize(value): return len(value) * element_size return RepeatedFieldSize else: field_size = value_size + tag_size def FieldSize(value): return field_size return FieldSize return SpecificSizer
0.020888
def report(self): """ Print network statistics. """ logging.info("network inputs: %s", " ".join(map(str, self.input_variables))) logging.info("network targets: %s", " ".join(map(str, self.target_variables))) logging.info("network parameters: %s", " ".join(map(str, self.all_parameters))) logging.info("parameter count: %d", self.parameter_count)
0.012469
def predict_mhcii_binding(job, peptfile, allele, univ_options, mhcii_options): """ Predict binding for each peptide in `peptfile` to `allele` using the IEDB mhcii binding prediction tool. :param toil.fileStore.FileID peptfile: The input peptide fasta :param str allele: Allele to predict binding against :param dict univ_options: Dict of universal options used by almost all tools :param dict mhcii_options: Options specific to mhcii binding prediction :return: tuple of fsID for file containing the predictions and the predictor used :rtype: tuple(toil.fileStore.FileID, str|None) """ work_dir = os.getcwd() input_files = { 'peptfile.faa': peptfile} input_files = get_files_from_filestore(job, input_files, work_dir, docker=True) peptides = read_peptide_file(os.path.join(os.getcwd(), 'peptfile.faa')) parameters = [mhcii_options['pred'], allele, input_files['peptfile.faa']] if not peptides: return job.fileStore.writeGlobalFile(job.fileStore.getLocalTempFile()), None with open('/'.join([work_dir, 'predictions.tsv']), 'w') as predfile: docker_call(tool='mhcii', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], outfile=predfile, interactive=True, tool_version=mhcii_options['version']) run_netmhciipan = True predictor = None with open(predfile.name, 'r') as predfile: for line in predfile: if not line.startswith('HLA'): continue if line.strip().split('\t')[5] == 'NetMHCIIpan': break # If the predictor type is sturniolo then it needs to be processed differently elif line.strip().split('\t')[5] == 'Sturniolo': predictor = 'Sturniolo' else: predictor = 'Consensus' run_netmhciipan = False break if run_netmhciipan: netmhciipan = job.addChildJobFn(predict_netmhcii_binding, peptfile, allele, univ_options, mhcii_options['netmhciipan'], disk='100M', memory='100M', cores=1) job.fileStore.logToMaster('Ran mhcii on %s:%s successfully' % (univ_options['patient'], allele)) return netmhciipan.rv() else: output_file = job.fileStore.writeGlobalFile(predfile.name) job.fileStore.logToMaster('Ran mhcii on %s:%s successfully' % (univ_options['patient'], allele)) return output_file, predictor
0.004124
def create_marker_index(self): """ Create the index that will keep track of the tasks if necessary. """ if not self.es.indices.exists(index=self.marker_index): self.es.indices.create(index=self.marker_index)
0.007968
def _merge(self, old, new, use_equals=False): """Helper to merge which handles merging one value.""" if old is None: return new if new is None: return old if (old == new) if use_equals else (old is new): return old raise ValueError("Incompatible values: %s != %s" % (old, new))
0.012698
def is_os(name, version_id=None): '''Return True if OS name in /etc/lsb-release of host given by fabric param `-H` is the same as given by argument, False else. If arg version_id is not None only return True if it is the same as in /etc/lsb-release, too. Args: name: 'Debian GNU/Linux', 'Ubuntu' version_id(None or str): None, '14.04', (Ubuntu) '16.04', (Ubuntu) '8', (Debian) ''' result = False os_release_infos = _fetch_os_release_infos() if name == os_release_infos.get('name', None): if version_id is None: result = True elif version_id == os_release_infos.get('version_id', None): result = True return result
0.001235
def get_region_products(self, region): """获得指定区域的产品信息 Args: - region: 区域,如:"nq" Returns: 返回该区域的产品信息,若失败则返回None """ regions, retInfo = self.list_regions() if regions is None: return None for r in regions: if r.get('name') == region: return r.get('products')
0.005263
def format_field(self, value, format_spec): """Override :meth:`string.Formatter.format_field` to have our default format_spec for :class:`datetime.Datetime` objects, and to let None yield an empty string rather than ``None``.""" if isinstance(value, datetime) and not format_spec: return super().format_field(value, '%Y-%m-%d_%H-%M-%S') if value is None: return '' return super().format_field(value, format_spec)
0.004115
def filtered_notebook_metadata(notebook): """Notebook metadata, filtered for metadata added by Jupytext itself""" metadata = copy(notebook.metadata) metadata = filter_metadata(metadata, notebook.metadata.get('jupytext', {}).get('notebook_metadata_filter'), _DEFAULT_NOTEBOOK_METADATA) if 'jupytext' in metadata: del metadata['jupytext'] return metadata
0.004525
def writeXMLFile(filename, content): """ Used only for debugging to write out intermediate files""" xmlfile = open(filename, 'w') # pretty print content = etree.tostring(content, pretty_print=True) xmlfile.write(content) xmlfile.close()
0.003846
def expose(self, key=None): """ Expose the decorated method for this L{Exposer} with the given key. A method which is exposed will be able to be retrieved by this L{Exposer}'s C{get} method with that key. If no key is provided, the key is the method name of the exposed method. Use like so:: class MyClass: @someExposer.expose() def foo(): ... or:: class MyClass: @someExposer.expose('foo') def unrelatedMethodName(): ... @param key: a hashable object, used by L{Exposer.get} to look up the decorated method later. If None, the key is the exposed method's name. @return: a 1-argument callable which records its input as exposed, then returns it. """ def decorator(function): rkey = key if rkey is None: if isinstance(function, FunctionType): rkey = function.__name__ else: raise NameRequired() if rkey not in self._exposed: self._exposed[rkey] = [] self._exposed[rkey].append(function) return function return decorator
0.001569
def validateArchiveList(archiveList): """ Validates an archiveList. An ArchiveList must: 1. Have at least one archive config. Example: (60, 86400) 2. No archive may be a duplicate of another. 3. Higher precision archives' precision must evenly divide all lower precision archives' precision. 4. Lower precision archives must cover larger time intervals than higher precision archives. 5. Each archive must have at least enough points to consolidate to the next archive Returns True or False """ if not archiveList: raise InvalidConfiguration("You must specify at least one archive configuration!") archiveList = sorted(archiveList, key=lambda a: a[0]) #sort by precision (secondsPerPoint) for i,archive in enumerate(archiveList): if i == len(archiveList) - 1: break nextArchive = archiveList[i+1] if not archive[0] < nextArchive[0]: raise InvalidConfiguration("A Whisper database may not configured having" "two archives with the same precision (archive%d: %s, archive%d: %s)" % (i, archive, i + 1, nextArchive)) if nextArchive[0] % archive[0] != 0: raise InvalidConfiguration("Higher precision archives' precision " "must evenly divide all lower precision archives' precision " "(archive%d: %s, archive%d: %s)" % (i, archive[0], i + 1, nextArchive[0])) retention = archive[0] * archive[1] nextRetention = nextArchive[0] * nextArchive[1] if not nextRetention > retention: raise InvalidConfiguration("Lower precision archives must cover " "larger time intervals than higher precision archives " "(archive%d: %s seconds, archive%d: %s seconds)" % (i, retention, i + 1, nextRetention)) archivePoints = archive[1] pointsPerConsolidation = nextArchive[0] // archive[0] if not archivePoints >= pointsPerConsolidation: raise InvalidConfiguration("Each archive must have at least enough points " "to consolidate to the next archive (archive%d consolidates %d of " "archive%d's points but it has only %d total points)" % (i + 1, pointsPerConsolidation, i, archivePoints))
0.013908
def ticket_skips(self, ticket_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/ticket_skips#list-skips-for-the-current-account" api_path = "/api/v2/tickets/{ticket_id}/skips.json" api_path = api_path.format(ticket_id=ticket_id) return self.call(api_path, **kwargs)
0.009615
def add_answer_at_time(self, record, now): """Adds an answer if if does not expire by a certain time""" if record is not None: if now == 0 or not record.is_expired(now): self.answers.append((record, now)) if record.rrsig is not None: self.answers.append((record.rrsig, now))
0.00565
def update_environment(self, environment_name, description=None, option_settings=[], tier_type=None, tier_name=None, tier_version='1.0'): """ Updates an application version """ out("Updating environment: " + str(environment_name)) messages = self.ebs.validate_configuration_settings(self.app_name, option_settings, environment_name=environment_name) messages = messages['ValidateConfigurationSettingsResponse']['ValidateConfigurationSettingsResult']['Messages'] ok = True for message in messages: if message['Severity'] == 'error': ok = False out("[" + message['Severity'] + "] " + str(environment_name) + " - '" \ + message['Namespace'] + ":" + message['OptionName'] + "': " + message['Message']) self.ebs.update_environment( environment_name=environment_name, description=description, option_settings=option_settings, tier_type=tier_type, tier_name=tier_name, tier_version=tier_version)
0.008511
def interpolate_logscale_single(start, end, coefficient): """ Cosine interpolation """ return np.exp(np.log(start) + (np.log(end) - np.log(start)) * coefficient)
0.005917
def open_group(store=None, mode='a', cache_attrs=True, synchronizer=None, path=None, chunk_store=None): """Open a group using file-mode-like semantics. Parameters ---------- store : MutableMapping or string, optional Store or path to directory in file system or name of zip file. mode : {'r', 'r+', 'a', 'w', 'w-'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist); 'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists). cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. synchronizer : object, optional Array synchronizer. path : string, optional Group path within store. chunk_store : MutableMapping or string, optional Store or path to directory in file system or name of zip file. Returns ------- g : zarr.hierarchy.Group Examples -------- >>> import zarr >>> root = zarr.open_group('data/example.zarr', mode='w') >>> foo = root.create_group('foo') >>> bar = root.create_group('bar') >>> root <zarr.hierarchy.Group '/'> >>> root2 = zarr.open_group('data/example.zarr', mode='a') >>> root2 <zarr.hierarchy.Group '/'> >>> root == root2 True """ # handle polymorphic store arg store = _normalize_store_arg(store) if chunk_store is not None: chunk_store = _normalize_store_arg(chunk_store) path = normalize_storage_path(path) # ensure store is initialized if mode in ['r', 'r+']: if contains_array(store, path=path): err_contains_array(path) elif not contains_group(store, path=path): err_group_not_found(path) elif mode == 'w': init_group(store, overwrite=True, path=path, chunk_store=chunk_store) elif mode == 'a': if contains_array(store, path=path): err_contains_array(path) if not contains_group(store, path=path): init_group(store, path=path, chunk_store=chunk_store) elif mode in ['w-', 'x']: if contains_array(store, path=path): err_contains_array(path) elif contains_group(store, path=path): err_contains_group(path) else: init_group(store, path=path, chunk_store=chunk_store) # determine read only status read_only = mode == 'r' return Group(store, read_only=read_only, cache_attrs=cache_attrs, synchronizer=synchronizer, path=path, chunk_store=chunk_store)
0.00072
def __tdfs(j, k, head, next, post, stack): """ Depth-first search and postorder of a tree rooted at node j. """ top = 0 stack[0] = j while (top >= 0): p = stack[top] i = head[p] if i == -1: top -= 1 post[k] = p k += 1 else: head[p] = next[i] top += 1 stack[top] = i return k
0.002457
def main(fixer_pkg, args=None): """Main program. Args: fixer_pkg: the name of a package where the fixers are located. args: optional; a list of command line arguments. If omitted, sys.argv[1:] is used. Returns a suggested exit status (0, 1, 2). """ # Set up option parser parser = optparse.OptionParser(usage="2to3 [options] file|dir ...") parser.add_option("-d", "--doctests_only", action="store_true", help="Fix up doctests only") parser.add_option("-f", "--fix", action="append", default=[], help="Each FIX specifies a transformation; default: all") parser.add_option("-j", "--processes", action="store", default=1, type="int", help="Run 2to3 concurrently") parser.add_option("-x", "--nofix", action="append", default=[], help="Prevent a transformation from being run") parser.add_option("-l", "--list-fixes", action="store_true", help="List available transformations") parser.add_option("-p", "--print-function", action="store_true", help="Modify the grammar so that print() is a function") parser.add_option("-v", "--verbose", action="store_true", help="More verbose logging") parser.add_option("--no-diffs", action="store_true", help="Don't show diffs of the refactoring") parser.add_option("-w", "--write", action="store_true", help="Write back modified files") parser.add_option("-n", "--nobackups", action="store_true", default=False, help="Don't write backups for modified files") parser.add_option("-o", "--output-dir", action="store", type="str", default="", help="Put output files in this directory " "instead of overwriting the input files. Requires -n.") parser.add_option("-W", "--write-unchanged-files", action="store_true", help="Also write files even if no changes were required" " (useful with --output-dir); implies -w.") parser.add_option("--add-suffix", action="store", type="str", default="", help="Append this string to all output filenames." " Requires -n if non-empty. " "ex: --add-suffix='3' will generate .py3 files.") # Parse command line arguments refactor_stdin = False flags = {} options, args = parser.parse_args(args) if options.write_unchanged_files: flags["write_unchanged_files"] = True if not options.write: warn("--write-unchanged-files/-W implies -w.") options.write = True # If we allowed these, the original files would be renamed to backup names # but not replaced. if options.output_dir and not options.nobackups: parser.error("Can't use --output-dir/-o without -n.") if options.add_suffix and not options.nobackups: parser.error("Can't use --add-suffix without -n.") if not options.write and options.no_diffs: warn("not writing files and not printing diffs; that's not very useful") if not options.write and options.nobackups: parser.error("Can't use -n without -w") if options.list_fixes: print "Available transformations for the -f/--fix option:" for fixname in refactor.get_all_fix_names(fixer_pkg): print fixname if not args: return 0 if not args: print >> sys.stderr, "At least one file or directory argument required." print >> sys.stderr, "Use --help to show usage." return 2 if "-" in args: refactor_stdin = True if options.write: print >> sys.stderr, "Can't write to stdin." return 2 if options.print_function: flags["print_function"] = True # Set up logging handler level = logging.DEBUG if options.verbose else logging.INFO logging.basicConfig(format='%(name)s: %(message)s', level=level) logger = logging.getLogger('lib2to3.main') # Initialize the refactoring tool avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg)) unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix) explicit = set() if options.fix: all_present = False for fix in options.fix: if fix == "all": all_present = True else: explicit.add(fixer_pkg + ".fix_" + fix) requested = avail_fixes.union(explicit) if all_present else explicit else: requested = avail_fixes.union(explicit) fixer_names = requested.difference(unwanted_fixes) input_base_dir = os.path.commonprefix(args) if (input_base_dir and not input_base_dir.endswith(os.sep) and not os.path.isdir(input_base_dir)): # One or more similar names were passed, their directory is the base. # os.path.commonprefix() is ignorant of path elements, this corrects # for that weird API. input_base_dir = os.path.dirname(input_base_dir) if options.output_dir: input_base_dir = input_base_dir.rstrip(os.sep) logger.info('Output in %r will mirror the input directory %r layout.', options.output_dir, input_base_dir) rt = StdoutRefactoringTool( sorted(fixer_names), flags, sorted(explicit), options.nobackups, not options.no_diffs, input_base_dir=input_base_dir, output_dir=options.output_dir, append_suffix=options.add_suffix) # Refactor all files and directories passed as arguments if not rt.errors: if refactor_stdin: rt.refactor_stdin() else: try: rt.refactor(args, options.write, options.doctests_only, options.processes) except refactor.MultiprocessingUnsupported: assert options.processes > 1 print >> sys.stderr, "Sorry, -j isn't " \ "supported on this platform." return 1 rt.summarize() # Return error status (0 if rt.errors is zero) return int(bool(rt.errors))
0.000637
def clone(self, data=None, shared_data=True, new_type=None, link=True, *args, **overrides): """Clones the object, overriding data and parameters. Args: data: New data replacing the existing data shared_data (bool, optional): Whether to use existing data new_type (optional): Type to cast object to link (bool, optional): Whether clone should be linked Determines whether Streams and Links attached to original object will be inherited. *args: Additional arguments to pass to constructor **overrides: New keyword arguments to pass to constructor Returns: Cloned object """ params = dict(self.get_param_values()) if new_type is None: clone_type = self.__class__ else: clone_type = new_type new_params = new_type.params() params = {k: v for k, v in params.items() if k in new_params} if params.get('group') == self.params()['group'].default: params.pop('group') settings = dict(params, **overrides) if 'id' not in settings: settings['id'] = self.id if data is None and shared_data: data = self.data if link: settings['plot_id'] = self._plot_id # Apply name mangling for __ attribute pos_args = getattr(self, '_' + type(self).__name__ + '__pos_params', []) return clone_type(data, *args, **{k:v for k,v in settings.items() if k not in pos_args})
0.003593
def markAsDelivered(self, thread_id, message_id): """ Mark a message as delivered :param thread_id: User/Group ID to which the message belongs. See :ref:`intro_threads` :param message_id: Message ID to set as delivered. See :ref:`intro_threads` :return: Whether the request was successful :raises: FBchatException if request failed """ data = { "message_ids[0]": message_id, "thread_ids[%s][0]" % thread_id: message_id, } r = self._post(self.req_url.DELIVERED, data) return r.ok
0.006757
def main(): """ Example application that opens a device that has been exposed to the network with ser2sock or similar serial-to-IP software. """ try: # Retrieve an AD2 device that has been exposed with ser2sock on localhost:10000. device = AlarmDecoder(SocketDevice(interface=(HOSTNAME, PORT))) # Set up an event handler and open the device device.on_message += handle_message with device.open(): while True: time.sleep(1) except Exception as ex: print('Exception:', ex)
0.005245
def get_all_file_report_pages(self, query): """ Get File Report (All Pages). :param query: a VirusTotal Intelligence search string in accordance with the file search documentation. :return: All JSON responses appended together. """ responses = [] r = self.get_hashes_from_search(query) responses.append(r) if ('results' in r.keys()) and ('next_page' in r['results'].keys()): next_page = r['results']['next_page'] else: next_page = None while next_page: r = self.get_hashes_from_search(query, next_page) if ('results' in r.keys()) and ('next_page' in r['results'].keys()): next_page = r['results']['next_page'] else: next_page = None responses.append(r) return dict(results=responses)
0.004545
def login(self): """ Perform IAM cookie based user login. """ access_token = self._get_access_token() try: super(IAMSession, self).request( 'POST', self._session_url, headers={'Content-Type': 'application/json'}, data=json.dumps({'access_token': access_token}) ).raise_for_status() except RequestException: raise CloudantException( 'Failed to exchange IAM token with Cloudant')
0.00369
def geometrize_shapes( shapes: DataFrame, *, use_utm: bool = False ) -> DataFrame: """ Given a GTFS shapes DataFrame, convert it to a GeoPandas GeoDataFrame and return the result. The result has a ``'geometry'`` column of WGS84 LineStrings instead of the columns ``'shape_pt_sequence'``, ``'shape_pt_lon'``, ``'shape_pt_lat'``, and ``'shape_dist_traveled'``. If ``use_utm``, then use local UTM coordinates for the geometries. Notes ------ Requires GeoPandas. """ import geopandas as gpd f = shapes.copy().sort_values(["shape_id", "shape_pt_sequence"]) def my_agg(group): d = {} d["geometry"] = sg.LineString( group[["shape_pt_lon", "shape_pt_lat"]].values ) return pd.Series(d) g = f.groupby("shape_id").apply(my_agg).reset_index() g = gpd.GeoDataFrame(g, crs=cs.WGS84) if use_utm: lat, lon = f.loc[0, ["shape_pt_lat", "shape_pt_lon"]].values crs = hp.get_utm_crs(lat, lon) g = g.to_crs(crs) return g
0.000952
def artist_undelete(self, artist_id): """Lets you undelete artist (Requires login) (UNTESTED) (Only Builder+). Parameters: artist_id (int): """ return self._get('artists/{0}/undelete.json'.format(artist_id), method='POST', auth=True)
0.009901
def _em_conversion(orig_units, conv_data, to_units=None, unit_system=None): """Convert between E&M & MKS base units. If orig_units is a CGS (or MKS) E&M unit, conv_data contains the corresponding MKS (or CGS) unit and scale factor converting between them. This must be done by replacing the expression of the original unit with the new one in the unit expression and multiplying by the scale factor. """ conv_unit, canonical_unit, scale = conv_data if conv_unit is None: conv_unit = canonical_unit new_expr = scale * canonical_unit.expr if unit_system is not None: # we don't know the to_units, so we get it directly from the # conv_data to_units = Unit(conv_unit.expr, registry=orig_units.registry) new_units = Unit(new_expr, registry=orig_units.registry) conv = new_units.get_conversion_factor(to_units) return to_units, conv
0.001092
def _inject(): """ Inject functions and constants from PyOpenGL but leave out the names that are deprecated or that we provide in our API. """ # Get namespaces NS = globals() GLNS = _GL.__dict__ # Get names that we use in our API used_names = [] used_names.extend([names[0] for names in _pyopengl2._functions_to_import]) used_names.extend([name for name in _pyopengl2._used_functions]) NS['_used_names'] = used_names # used_constants = set(_constants.__dict__) # Count injected_constants = 0 injected_functions = 0 for name in dir(_GL): if name.startswith('GL_'): # todo: find list of deprecated constants if name not in used_constants: NS[name] = GLNS[name] injected_constants += 1 elif name.startswith('gl'): # Functions if (name + ',') in _deprecated_functions: pass # Function is deprecated elif name in used_names: pass # Function is in our GL ES 2.0 API else: NS[name] = GLNS[name] injected_functions += 1
0.005008
def InitFromApiFlow(self, f, cron_job_id=None): """Shortcut method for easy legacy cron jobs support.""" if f.flow_id: self.run_id = f.flow_id elif f.urn: self.run_id = f.urn.Basename() self.started_at = f.started_at self.cron_job_id = cron_job_id flow_state_enum = api_plugins_flow.ApiFlow.State cron_enum = rdf_cronjobs.CronJobRun.CronJobRunStatus errors_map = { flow_state_enum.RUNNING: cron_enum.RUNNING, flow_state_enum.TERMINATED: cron_enum.FINISHED, flow_state_enum.ERROR: cron_enum.ERROR, flow_state_enum.CLIENT_CRASHED: cron_enum.ERROR } self.status = errors_map[f.state] if f.state != f.State.RUNNING: self.finished_at = f.last_active_at if f.context.kill_timestamp: self.status = self.Status.LIFETIME_EXCEEDED if f.context.HasField("status"): self.log_message = f.context.status if f.context.HasField("backtrace"): self.backtrace = f.context.backtrace return self
0.006944
def relevent_issue(issue, after): """Returns True iff this issue is something we should show in the changelog.""" return (closed_issue(issue, after) and issue_completed(issue) and issue_section(issue))
0.008584
def _parse_create_args(client, args): """Converts CLI arguments to args for VSManager.create_instance. :param dict args: CLI arguments """ data = { "hourly": args.get('billing', 'hourly') == 'hourly', "cpus": args.get('cpu', None), "ipv6": args.get('ipv6', None), "disks": args.get('disk', None), "os_code": args.get('os', None), "memory": args.get('memory', None), "flavor": args.get('flavor', None), "domain": args.get('domain', None), "host_id": args.get('host_id', None), "private": args.get('private', None), "hostname": args.get('hostname', None), "nic_speed": args.get('network', None), "boot_mode": args.get('boot_mode', None), "dedicated": args.get('dedicated', None), "post_uri": args.get('postinstall', None), "datacenter": args.get('datacenter', None), "public_vlan": args.get('vlan_public', None), "private_vlan": args.get('vlan_private', None), "public_subnet": args.get('subnet_public', None), "private_subnet": args.get('subnet_private', None), } # The primary disk is included in the flavor and the local_disk flag is not needed # Setting it to None prevents errors from the flag not matching the flavor if not args.get('san') and args.get('flavor'): data['local_disk'] = None else: data['local_disk'] = not args.get('san') if args.get('image'): if args.get('image').isdigit(): image_mgr = SoftLayer.ImageManager(client) image_details = image_mgr.get_image(args.get('image'), mask="id,globalIdentifier") data['image_id'] = image_details['globalIdentifier'] else: data['image_id'] = args['image'] if args.get('userdata'): data['userdata'] = args['userdata'] elif args.get('userfile'): with open(args['userfile'], 'r') as userfile: data['userdata'] = userfile.read() # Get the SSH keys if args.get('key'): keys = [] for key in args.get('key'): resolver = SoftLayer.SshKeyManager(client).resolve_ids key_id = helpers.resolve_id(resolver, key, 'SshKey') keys.append(key_id) data['ssh_keys'] = keys if args.get('public_security_group'): pub_groups = args.get('public_security_group') data['public_security_groups'] = [group for group in pub_groups] if args.get('private_security_group'): priv_groups = args.get('private_security_group') data['private_security_groups'] = [group for group in priv_groups] if args.get('tag', False): data['tags'] = ','.join(args['tag']) if args.get('host_id'): data['host_id'] = args['host_id'] if args.get('placementgroup'): resolver = SoftLayer.managers.PlacementManager(client).resolve_ids data['placement_id'] = helpers.resolve_id(resolver, args.get('placementgroup'), 'PlacementGroup') return data
0.001321
def verify(self, obj): """Verify that the object conforms to this verifier's schema Args: obj (object): A python object to verify Raises: ValidationError: If there is a problem verifying the dictionary, a ValidationError is thrown with at least the reason key set indicating the reason for the lack of validation. """ if isinstance(obj, str): raise ValidationError("Object was not a list", reason="a string was passed instead of a list", object=obj) out_obj = [] if self._min_length is not None and len(obj) < self._min_length: raise ValidationError("List was too short", reason="list length %d was less than the minimum %d" % (len(obj), self._min_length), min_length=self._min_length, actual_length=len(obj)) if self._max_length is not None and len(obj) > self._max_length: raise ValidationError("List was too long", reason="list length %d was greater than the max %d" % (len(obj), self._max_length), min_length=self._max_length, actual_length=len(obj)) for val in obj: out_obj.append(self._verifier.verify(val)) return out_obj
0.005904
def resolve(object): """Look up the name of a source using a resolver""" import re sesame_cmd = 'curl -s http://cdsweb.u-strasbg.fr/viz-bin/nph-sesame/-oI?'+string.replace(object,' ','') f = os.popen(sesame_cmd) lines = f.readlines() f.close() for line in lines: if re.search('%J ', line): result2 = line.split() ra_deg = float(result2[1]) dec_deg = float(result2[2]) return (ra_deg, dec_deg) return (0,0)
0.015625
def _get_service_state(service_id: str): """Get the Service state object for the specified id.""" LOG.debug('Getting state of service %s', service_id) services = get_service_id_list() service_ids = [s for s in services if service_id in s] if len(service_ids) != 1: return 'Service not found! services = {}'.format(str(services)) subsystem, name, version = service_ids[0].split(':') return ServiceState(subsystem, name, version)
0.00404
def blockmix_salsa8(BY, Yi, r): """Blockmix; Used by SMix""" start = (2 * r - 1) * 16 X = BY[start:start+16] # BlockMix - 1 tmp = [0]*16 for i in xrange(2 * r): # BlockMix - 2 #blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner) salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer) #array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4 for i in xrange(r): # BlockMix - 6 BY[i * 16:(i * 16)+(16)] = BY[Yi + (i * 2) * 16:(Yi + (i * 2) * 16)+(16)] BY[(i + r) * 16:((i + r) * 16)+(16)] = BY[Yi + (i*2 + 1) * 16:(Yi + (i*2 + 1) * 16)+(16)]
0.006878
def hidden_cursor(self): """Return a context manager that hides the cursor while inside it and makes it visible on leaving.""" self.stream.write(self.hide_cursor) try: yield finally: self.stream.write(self.normal_cursor)
0.007042
def _getPredictedField(options): """ Gets the predicted field and it's datatype from the options dictionary Returns: (predictedFieldName, predictedFieldType) """ if not options['inferenceArgs'] or \ not options['inferenceArgs']['predictedField']: return None, None predictedField = options['inferenceArgs']['predictedField'] predictedFieldInfo = None includedFields = options['includedFields'] for info in includedFields: if info['fieldName'] == predictedField: predictedFieldInfo = info break if predictedFieldInfo is None: raise ValueError( "Predicted field '%s' does not exist in included fields." % predictedField ) predictedFieldType = predictedFieldInfo['fieldType'] return predictedField, predictedFieldType
0.017926
def serialize_math(ctx, document, elem, root): """Serialize math element. Math objects are not supported at the moment. This is wht we only show error message. """ _div = etree.SubElement(root, 'span') if ctx.options['embed_styles']: _div.set('style', 'border: 1px solid red') _div.text = 'We do not support Math blocks at the moment.' fire_hooks(ctx, document, elem, _div, ctx.get_hook('math')) return root
0.004435