text
stringlengths
78
104k
score
float64
0
0.18
def _useChunk(self, index) -> None: """ Switch to specific chunk :param index: """ if self.currentChunk is not None: if self.currentChunkIndex == index and \ not self.currentChunk.closed: return self.currentChunk.close() self.currentChunk = self._openChunk(index) self.currentChunkIndex = index self.itemNum = self.currentChunk.numKeys + 1
0.004292
def GetWindowsEnvironmentVariablesMap(knowledge_base): """Return a dictionary of environment variables and their values. Implementation maps variables mentioned in https://en.wikipedia.org/wiki/Environment_variable#Windows to known KB definitions. Args: knowledge_base: A knowledgebase object. Returns: A dictionary built from a given knowledgebase object where keys are variables names and values are their values. """ environ_vars = {} if knowledge_base.environ_path: environ_vars["path"] = knowledge_base.environ_path if knowledge_base.environ_temp: environ_vars["temp"] = knowledge_base.environ_temp if knowledge_base.environ_systemroot: environ_vars["systemroot"] = knowledge_base.environ_systemroot if knowledge_base.environ_windir: environ_vars["windir"] = knowledge_base.environ_windir if knowledge_base.environ_programfiles: environ_vars["programfiles"] = knowledge_base.environ_programfiles environ_vars["programw6432"] = knowledge_base.environ_programfiles if knowledge_base.environ_programfilesx86: environ_vars["programfiles(x86)"] = knowledge_base.environ_programfilesx86 if knowledge_base.environ_systemdrive: environ_vars["systemdrive"] = knowledge_base.environ_systemdrive if knowledge_base.environ_allusersprofile: environ_vars["allusersprofile"] = knowledge_base.environ_allusersprofile environ_vars["programdata"] = knowledge_base.environ_allusersprofile if knowledge_base.environ_allusersappdata: environ_vars["allusersappdata"] = knowledge_base.environ_allusersappdata for user in knowledge_base.users: if user.appdata: environ_vars.setdefault("appdata", []).append(user.appdata) if user.localappdata: environ_vars.setdefault("localappdata", []).append(user.localappdata) if user.userdomain: environ_vars.setdefault("userdomain", []).append(user.userdomain) if user.userprofile: environ_vars.setdefault("userprofile", []).append(user.userprofile) return environ_vars
0.008824
def get_worker_digests(self): """ If we are being called from an orchestrator build, collect the worker node data and recreate the data locally. """ try: builds = self.workflow.build_result.annotations['worker-builds'] except(TypeError, KeyError): # This annotation is only set for the orchestrator build. # It's not present, so this is a worker build. return {} worker_digests = {} for plat, annotation in builds.items(): digests = annotation['digests'] self.log.debug("build %s has digests: %s", plat, digests) for digest in digests: reg = registry_hostname(digest['registry']) worker_digests.setdefault(reg, []) worker_digests[reg].append(digest) return worker_digests
0.00227
def merge_record_extra(record, target, reserved): """ Merges extra attributes from LogRecord object into target dictionary :param record: logging.LogRecord :param target: dict to update :param reserved: dict or list with reserved keys to skip """ for key, value in record.__dict__.items(): # this allows to have numeric keys if (key not in reserved and not (hasattr(key, "startswith") and key.startswith('_'))): target[key] = value return target
0.001855
def put_file(self, in_path, out_path): ''' transfer a file from local to local ''' vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) if not os.path.exists(in_path): raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) try: shutil.copyfile(in_path, out_path) except shutil.Error: traceback.print_exc() raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path)) except IOError: traceback.print_exc() raise errors.AnsibleError("failed to transfer file to %s" % out_path)
0.007645
def track(self, thread_id, frame, frame_id_to_lineno, frame_custom_thread_id=None): ''' :param thread_id: The thread id to be used for this frame. :param frame: The topmost frame which is suspended at the given thread. :param frame_id_to_lineno: If available, the line number for the frame will be gotten from this dict, otherwise frame.f_lineno will be used (needed for unhandled exceptions as the place where we report may be different from the place where it's raised). :param frame_custom_thread_id: If None this this is the id of the thread id for the custom frame (i.e.: coroutine). ''' with self._lock: coroutine_or_main_thread_id = frame_custom_thread_id or thread_id if coroutine_or_main_thread_id in self._suspended_frames_manager._thread_id_to_tracker: sys.stderr.write('pydevd: Something is wrong. Tracker being added twice to the same thread id.\n') self._suspended_frames_manager._thread_id_to_tracker[coroutine_or_main_thread_id] = self self._main_thread_id = thread_id self._frame_id_to_lineno = frame_id_to_lineno frame_ids_from_thread = self._thread_id_to_frame_ids.setdefault( coroutine_or_main_thread_id, []) while frame is not None: frame_id = id(frame) self._frame_id_to_frame[frame_id] = frame _FrameVariable(frame, self._register_variable) # Instancing is enough to register. self._suspended_frames_manager._variable_reference_to_frames_tracker[frame_id] = self frame_ids_from_thread.append(frame_id) self._frame_id_to_main_thread_id[frame_id] = thread_id frame = frame.f_back
0.006431
def abi_encode_args(method, args): "encode args for method: method_id|data" assert issubclass(method.im_class, NativeABIContract), method.im_class m_abi = method.im_class._get_method_abi(method) return zpad(encode_int(m_abi['id']), 4) + abi.encode_abi(m_abi['arg_types'], args)
0.006826
async def process_events_async(self, context, messages): """ Called by the processor host when a batch of events has arrived. This is where the real work of the event processor is done. :param context: Information about the partition :type context: ~azure.eventprocessorhost.PartitionContext :param messages: The events to be processed. :type messages: list[~azure.eventhub.common.EventData] """ logger.info("Events processed {}".format(context.sequence_number)) await context.checkpoint_async()
0.003478
def set_ncbi_email(): """Set contact email for NCBI.""" Entrez.email = args.email logger.info("Set NCBI contact email to %s", args.email) Entrez.tool = "genbank_get_genomes_by_taxon.py"
0.004975
def categorize( data, col_name: str = None, new_col_name: str = None, categories: dict = None, max_categories: float = 0.15 ): """ :param data: :param col_name: :param new_col_name: :param categories: :param max_categories: max proportion threshold of categories :return: new categories :rtype dict: """ _categories = {} if col_name is None: if categories is not None: raise Exception( 'col_name is None when categories was defined.' ) # create a list of cols with all object columns cols = [ k for k in data.keys() if data[k].dtype == 'object' and (data[k].unique() / data[k].count()) <= max_categories ] else: # create a list with col_name if new_col_name is not None: data[new_col_name] = data[col_name] col_name = new_col_name cols = [col_name] for c in cols: if categories is not None: # assert all keys is a number assert all(type(k) in (int, float) for k in categories.keys()) # replace values using given categories dict data[c].replace(categories, inplace=True) # change column to categorical type data[c] = data[c].astype('category') # update categories information _categories.update({c: categories}) else: # change column to categorical type data[c] = data[c].astype('category') # change column to categorical type _categories.update({ c: dict(enumerate( data[c].cat.categories, )) }) return _categories
0.000568
def locale(self): ''' Do a lookup for the locale code that is set for this layout. NOTE: USB HID specifies only 35 different locales. If your layout does not fit, it should be set to Undefined/0 @return: Tuple (<USB HID locale code>, <name>) ''' name = self.json_data['hid_locale'] # Set to Undefined/0 if not set if name is None: name = "Undefined" return (int(self.json_data['from_hid_locale'][name]), name)
0.006036
def getValue(words): """Computes the sum of the values of the words.""" value = 0 for word in words: for letter in word: # shared.getConst will evaluate to the dictionary broadcasted by # the root Future value += shared.getConst('lettersValue')[letter] return value
0.003077
def _set_status_flag(self, status): """ Configure state and files on disk to match current processing status. :param str status: Name of new status designation for pipeline. """ # Remove previous status flag file. flag_file_path = self._flag_file_path() try: os.remove(flag_file_path) except: # Print message only if the failure to remove the status flag # is unexpected; there's no flag for initialization, so we # can't remove the file. if self.status != "initializing": print("Could not remove flag file: '{}'".format(flag_file_path)) pass # Set new status. prev_status = self.status self.status = status self._create_file(self._flag_file_path()) print("\nChanged status from {} to {}.".format( prev_status, self.status))
0.004278
def _fetch_features(self): """ Retrieves a new page of features from Geopedia """ if self.next_page_url is None: return response = get_json(self.next_page_url, post_values=self.query, headers=self.gpd_session.session_headers) self.features.extend(response['features']) self.next_page_url = response['pagination']['next'] self.layer_size = response['pagination']['total']
0.006818
def p_decl(self, p): 'decl : sigtypes declnamelist SEMICOLON' decllist = [] for rname, rlength in p[2]: decllist.extend(self.create_decl(p[1], rname, length=rlength, lineno=p.lineno(2))) p[0] = Decl(tuple(decllist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
0.00554
def _parse_trigger(self, trigger_clause): """Parse a named event or explicit stream trigger into a TriggerDefinition.""" cond = trigger_clause[0] named_event = None explicit_stream = None explicit_trigger = None # Identifier parse tree is Group(Identifier) if cond.getName() == 'identifier': named_event = cond[0] elif cond.getName() == 'stream_trigger': trigger_type = cond[0] stream = cond[1] oper = cond[2] ref = cond[3] trigger = InputTrigger(trigger_type, oper, ref) explicit_stream = stream explicit_trigger = trigger elif cond.getName() == 'stream_always': stream = cond[0] trigger = TrueTrigger() explicit_stream = stream explicit_trigger = trigger else: raise ArgumentError("OnBlock created from an invalid ParseResults object", parse_results=trigger_clause) return TriggerDefinition(named_event, explicit_stream, explicit_trigger)
0.004591
def send_message(self, message): """ Dispatch a message using 0mq """ with self._instance_lock: if message is None: Global.LOGGER.error("can't deliver a null messages") return if message.sender is None: Global.LOGGER.error(f"can't deliver anonymous messages with body {message.body}") return if message.receiver is None: Global.LOGGER.error( f"can't deliver message from {message.sender}: recipient not specified") return if message.message is None: Global.LOGGER.error(f"can't deliver message with no body from {message.sender}") return sender = "*" + message.sender + "*" self.socket.send_multipart( [bytes(sender, 'utf-8'), pickle.dumps(message)]) if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug("dispatched : " + message.sender + "-" + message.message + "-" + message.receiver) self.dispatched = self.dispatched + 1
0.003776
def algorithm(self): """ :return: A unicode string of "rsa", "dsa" or "ec" """ if self._algorithm is None: self._algorithm = self['private_key_algorithm']['algorithm'].native return self._algorithm
0.007634
def set_timestamp_to_current(self): """ Set timestamp to current time utc :rtype: None """ # Good form to add tzinfo self.timestamp = pytz.UTC.localize(datetime.datetime.utcnow())
0.008772
def _find_alias(FunctionName, Name, FunctionVersion=None, region=None, key=None, keyid=None, profile=None): ''' Given function name and alias name, find and return matching alias information. ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) args = { 'FunctionName': FunctionName } if FunctionVersion: args['FunctionVersion'] = FunctionVersion for aliases in __utils__['boto3.paged_call'](conn.list_aliases, **args): for alias in aliases.get('Aliases'): if alias['Name'] == Name: return alias return None
0.00315
def redirect(location=None, internal=False, code=None, headers={}, add_slash=False, request=None): ''' Perform a redirect, either internal or external. An internal redirect performs the redirect server-side, while the external redirect utilizes an HTTP 302 status code. :param location: The HTTP location to redirect to. :param internal: A boolean indicating whether the redirect should be internal. :param code: The HTTP status code to use for the redirect. Defaults to 302. :param headers: Any HTTP headers to send with the response, as a dictionary. :param request: The :class:`pecan.Request` instance to use. ''' request = request or state.request if add_slash: if location is None: split_url = list(urlparse.urlsplit(request.url)) new_proto = request.environ.get( 'HTTP_X_FORWARDED_PROTO', split_url[0] ) split_url[0] = new_proto else: split_url = urlparse.urlsplit(location) split_url[2] = split_url[2].rstrip('/') + '/' location = urlparse.urlunsplit(split_url) if not headers: headers = {} if internal: if code is not None: raise ValueError('Cannot specify a code for internal redirects') request.environ['pecan.recursive.context'] = request.context raise ForwardRequestException(location) if code is None: code = 302 raise exc.status_map[code](location=location, headers=headers)
0.000637
def get_alarms(username, auth, url): """Takes in no param as input to fetch RealTime Alarms from HP IMC RESTFUL API :param username OpeatorName, String type. Required. Default Value "admin". Checks the operator has the privileges to view the Real-Time Alarms. :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return:list of dictionaries where each element of the list represents a single alarm as pulled from the the current list of browse alarms in the HPE IMC Platform :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.alarms import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> all_alarms = get_alarms('admin', auth.creds, auth.url) >>> assert (type(all_alarms)) is list >>> assert 'ackStatus' in all_alarms[0] """ f_url = url + "/imcrs/fault/alarm?operatorName=" + username + \ "&recStatus=0&ackStatus=0&timeRange=0&size=50&desc=true" response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: alarm_list = (json.loads(response.text)) return alarm_list['alarm'] except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' get_alarms: An Error has occured'
0.004828
def __get_ssh_credentials(vm_): ''' Get configured SSH credentials. ''' ssh_user = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default=os.getenv('USER')) ssh_key = config.get_cloud_config_value( 'ssh_keyfile', vm_, __opts__, default=os.path.expanduser('~/.ssh/google_compute_engine')) return ssh_user, ssh_key
0.002667
def spectra(self, alpha=None, nmax=None, convention='power', unit='per_l', base=10.): """ Return the spectra of one or more Slepian functions. Usage ----- spectra = x.spectra([alpha, nmax, convention, unit, base]) Returns ------- spectra : ndarray, shape (lmax+1, nmax) A matrix with each column containing the spectrum of a Slepian function, and where the functions are arranged with increasing concentration factors. If alpha is set, only a single vector is returned, whereas if nmax is set, the first nmax spectra are returned. Parameters ---------- alpha : int, optional, default = None The function number of the output spectrum, where alpha=0 corresponds to the best concentrated Slepian function. nmax : int, optional, default = 1 The number of best concentrated Slepian function power spectra to return. convention : str, optional, default = 'power' The type of spectrum to return: 'power' for power spectrum, 'energy' for energy spectrum, and 'l2norm' for the l2 norm spectrum. unit : str, optional, default = 'per_l' If 'per_l', return the total contribution to the spectrum for each spherical harmonic degree l. If 'per_lm', return the average contribution to the spectrum for each coefficient at spherical harmonic degree l. If 'per_dlogl', return the spectrum per log interval dlog_a(l). base : float, optional, default = 10. The logarithm base when calculating the 'per_dlogl' spectrum. Description ----------- This function returns either the power spectrum, energy spectrum, or l2-norm spectrum of one or more of the Slepian funtions. Total power is defined as the integral of the function squared over all space, divided by the area the function spans. If the mean of the function is zero, this is equivalent to the variance of the function. The total energy is the integral of the function squared over all space and is 4pi times the total power. The l2-norm is the sum of the magnitude of the coefficients squared. The output spectrum can be expresed using one of three units. 'per_l' returns the contribution to the total spectrum from all angular orders at degree l. 'per_lm' returns the average contribution to the total spectrum from a single coefficient at degree l. The 'per_lm' spectrum is equal to the 'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the contribution to the total spectrum from all angular orders over an infinitessimal logarithmic degree band. The contrubution in the band dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base, and where spectrum(l, 'per_dlogl) is equal to spectrum(l, 'per_l')*l*log(a). """ if alpha is None: if nmax is None: nmax = self.nmax spectra = _np.zeros((self.lmax+1, nmax)) for iwin in range(nmax): coeffs = self.to_array(iwin) spectra[:, iwin] = _spectrum(coeffs, normalization='4pi', convention=convention, unit=unit, base=base) else: coeffs = self.to_array(alpha) spectra = _spectrum(coeffs, normalization='4pi', convention=convention, unit=unit, base=base) return spectra
0.000798
def create_result(self, local_path, container_path, permissions, meta, val, dividers): """Default permissions to rw""" if permissions is NotSpecified: permissions = 'rw' return Mount(local_path, container_path, permissions)
0.011583
def merge_accounts(self, request): """ Attach NetID account to regular django account and then redirect user. In this situation user dont have to fill extra fields because he filled them when first account (request.user) was created. Note that self.indentity must be already set in this stage by validate_response function. """ # create new net ID record in database # and attach it to request.user account. try: netid = NetID.objects.get(identity=self.identity, provider=self.provider) except NetID.DoesNotExist: netid = NetID(user=request.user, identity=self.identity, provider=self.provider) netid.save() # show nice message to user. messages.add_message(request, messages.SUCCESS, lang.ACCOUNTS_MERGED)
0.005794
def memory_usage_psutil(): """Return the current process memory usage in MB. """ process = psutil.Process(os.getpid()) mem = process.memory_info()[0] / float(2 ** 20) mem_vms = process.memory_info()[1] / float(2 ** 20) return mem, mem_vms
0.003817
def export(self, nidm_version, export_dir): """ Create prov graph. """ self.stat = None if isinstance(self.stat_type, QualifiedName): stat = self.stat_type elif self.stat_type is not None: if self.stat_type.lower() == "t": stat = STATO_TSTATISTIC elif self.stat_type.lower() == "z": stat = STATO_ZSTATISTIC elif self.stat_type.lower() == "f": stat = STATO_FSTATISTIC elif self.stat_type.startswith('http'): stat = Identifier(self.stat_type) self.add_attributes(( (PROV['type'], STATO_CONTRAST_WEIGHT_MATRIX), (NIDM_STATISTIC_TYPE, stat), (PROV['label'], self.label), (NIDM_CONTRAST_NAME, self.contrast_name), (PROV['value'], json.dumps(self.contrast_weights))))
0.002217
def month_to_year(timeperiod): """:param timeperiod: as string in YYYYMM0000 format :return string in YYYY000000 format""" t = datetime.strptime(timeperiod, SYNERGY_MONTHLY_PATTERN) return t.strftime(SYNERGY_YEARLY_PATTERN)
0.004184
def get_xpath_branch(xroot, xpath): """ :return: the relative part of an XPATH: that which extends past the root provided """ if xroot and xpath and xpath.startswith(xroot): xpath = xpath[len(xroot):] xpath = xpath.lstrip(XPATH_DELIM) return xpath
0.00722
def doublewell(theta): """Pointwise minimum of two quadratic bowls""" k0, k1, depth = 0.01, 100, 0.5 shallow = 0.5 * k0 * theta ** 2 + depth deep = 0.5 * k1 * theta ** 2 obj = float(np.minimum(shallow, deep)) grad = np.where(deep < shallow, k1 * theta, k0 * theta) return obj, grad
0.003236
def kill(self) -> None: """Kill ffmpeg job.""" self._proc.kill() self._loop.run_in_executor(None, self._proc.communicate)
0.013793
def as_integer_type(ary): ''' Returns argument as an integer array, converting floats if convertable. Raises ValueError if it's a float array with nonintegral values. ''' ary = np.asanyarray(ary) if is_integer_type(ary): return ary rounded = np.rint(ary) if np.any(rounded != ary): raise ValueError("argument array must contain only integers") return rounded.astype(int)
0.00237
def get_datetime(epoch): ''' get datetime from an epoch timestamp >>> get_datetime(1432188772) datetime.datetime(2015, 5, 21, 6, 12, 52) ''' t = time.gmtime(epoch) dt = datetime.datetime(*t[:6]) return dt
0.004184
def _get_vnet(self, adapter_number): """ Return the vnet will use in ubridge """ vnet = "ethernet{}.vnet".format(adapter_number) if vnet not in self._vmx_pairs: raise VMwareError("vnet {} not in VMX file".format(vnet)) return vnet
0.006897
def get_mapreduce_yaml(parse=parse_mapreduce_yaml): """Locates mapreduce.yaml, loads and parses its info. Args: parse: Used for testing. Returns: MapReduceYaml object. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file or the file is missing. """ mr_yaml_path = find_mapreduce_yaml() if not mr_yaml_path: raise errors.MissingYamlError() mr_yaml_file = open(mr_yaml_path) try: return parse(mr_yaml_file.read()) finally: mr_yaml_file.close()
0.015474
def determine_keymap(qteMain=None): """ Return the conversion from keys and modifiers to Qt constants. This mapping depends on the used OS and keyboard layout. .. warning :: This method is currently only a dummy that always returns the same mapping from characters to keys. It works fine on my Linux and Windows 7 machine using English/US keyboard layouts, but other layouts will eventually have to be supported. """ if qteMain is None: # This case should only happen for testing purposes. qte_global.Qt_key_map = default_qt_keymap qte_global.Qt_modifier_map = default_qt_modifier_map else: doc = 'Conversion table from local characters to Qt constants' qteMain.qteDefVar("Qt_key_map", default_qt_keymap, doc=doc) doc = 'Conversion table from local modifier keys to Qt constants' qteMain.qteDefVar("Qt_modifier_map", default_qt_modifier_map, doc=doc)
0.001037
def list_networks(kwargs=None, call=None): ''' List all the standard networks for this VMware environment CLI Example: .. code-block:: bash salt-cloud -f list_networks my-vmware-config ''' if call != 'function': raise SaltCloudSystemExit( 'The list_networks function must be called with ' '-f or --function.' ) return {'Networks': salt.utils.vmware.list_networks(_get_si())}
0.002203
def subvol_create(self, path): """ Create a btrfs subvolume in the specified path :param path: path to create """ args = { 'path': path } self._subvol_chk.check(args) self._client.sync('btrfs.subvol_create', args)
0.00692
def set_endTimestamp(self,etimestamp=None): """ Set the end timestamp of the linguistic processor, set to None for the current time @type etimestamp: string @param etimestamp: version of the linguistic processor """ if etimestamp is None: import time etimestamp = time.strftime('%Y-%m-%dT%H:%M:%S%Z') self.node.set('endTimestamp',etimestamp)
0.011876
def suggest_terms(self, fields, prefix, handler='terms', **kwargs): """ Accepts a list of field names and a prefix Returns a dictionary keyed on field name containing a list of ``(term, count)`` pairs Requires Solr 1.4+. """ params = { 'terms.fl': fields, 'terms.prefix': prefix, } params.update(kwargs) response = self._suggest_terms(params, handler=handler) result = self.decoder.decode(response) terms = result.get("terms", {}) res = {} # in Solr 1.x the value of terms is a flat list: # ["field_name", ["dance",23,"dancers",10,"dancing",8,"dancer",6]] # # in Solr 3.x the value of terms is a dict: # {"field_name": ["dance",23,"dancers",10,"dancing",8,"dancer",6]} if isinstance(terms, (list, tuple)): terms = dict(zip(terms[0::2], terms[1::2])) for field, values in terms.items(): tmp = [] while values: tmp.append((values.pop(0), values.pop(0))) res[field] = tmp self.log.debug("Found '%d' Term suggestions results.", sum(len(j) for i, j in res.items())) return res
0.002408
def lock_input_target_config_target_candidate_candidate(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") lock = ET.Element("lock") config = lock input = ET.SubElement(lock, "input") target = ET.SubElement(input, "target") config_target = ET.SubElement(target, "config-target") candidate = ET.SubElement(config_target, "candidate") candidate = ET.SubElement(candidate, "candidate") callback = kwargs.pop('callback', self._callback) return callback(config)
0.003472
def _dash_escape_text(text: str) -> str: """ Add dash '-' (0x2D) and space ' ' (0x20) as prefix on each line :param text: Text to dash-escape :return: """ dash_escaped_text = str() for line in text.splitlines(True): # add dash '-' (0x2D) and space ' ' (0x20) as prefix dash_escaped_text += DASH_ESCAPE_PREFIX + line return dash_escaped_text
0.00464
def validate_account_number(self, account): """ Check whether **account** is a valid account number :param account: Account number to check :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.validate_account_number( ... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000" ... ) True """ account = self._process_value(account, 'account') payload = {"account": account} resp = self.call('validate_account_number', payload) return resp['valid'] == '1'
0.00491
def isItemAllowed(self, obj): """Returns true if the current analysis to be rendered has a slot assigned for the current layout. :param obj: analysis to be rendered as a row in the list :type obj: ATContentType/DexterityContentType :return: True if the obj has an slot assigned. Otherwise, False. :rtype: bool """ uid = api.get_uid(obj) if not self.get_item_slot(uid): logger.warning("Slot not assigned to item %s" % uid) return False return BaseView.isItemAllowed(self, obj)
0.003448
def add_star(self, nodes, t=None): """Add a star at time t. The first node in nodes is the middle of the star. It is connected to all other nodes. Parameters ---------- nodes : iterable container A container of nodes. t : snapshot id (default=None) See Also -------- add_path, add_cycle Examples -------- >>> G = dn.DynGraph() >>> G.add_star([0,1,2,3], t=0) """ nlist = list(nodes) v = nlist[0] interaction = ((v, n) for n in nlist[1:]) self.add_interactions_from(interaction, t)
0.003086
def sample_upper_hull(upper_hull, random_stream): """ Return a single value randomly sampled from the given `upper_hull`. Parameters ---------- upper_hull : List[pyars.hull.HullNode] Upper hull to evaluate. random_stream : numpy.random.RandomState (Seeded) stream of random values to use during sampling. Returns ---------- sample : float Single value randomly sampled from `upper_hull`. """ cdf = cumsum([node.pr for node in upper_hull]) # randomly choose a line segment U = random_stream.rand() node = next( (node for node, cdf_value in zip(upper_hull, cdf) if U < cdf_value), upper_hull[-1] # default is last line segment ) # sample along that line segment U = random_stream.rand() m, left, right = node.m, node.left, node.right M = max(m * right, m * left) x = (log(U * (exp(m * right - M) - exp(m * left - M)) + exp(m * left - M)) + M) / m assert(x >= left and x <= right) if isinf(x) or isnan(x): raise ValueError("sampled an infinite or 'nan' x") return x
0.001787
def _take_bits(buf, count): """Return the booleans that were packed into bytes.""" # TODO: Verify output bytes_count = (count + 7) // 8 bytes_mod = count % 8 data = _unpack_from(buf, 'B', bytes_count) values = [] for i, byte in enumerate(data): for _ in range(8 if i != bytes_count - 1 else bytes_mod): # TODO: Convert to True / False values.append(byte & 0b10000000) byte <<= 1 return values
0.002132
def create_weight_stops(breaks): """Convert data breaks into a heatmap-weight ramp """ num_breaks = len(breaks) weight_breaks = scale_between(0, 1, num_breaks) stops = [] for i, b in enumerate(breaks): stops.append([b, weight_breaks[i]]) return stops
0.003484
def get_option_parser(): """ Build an ``optparse.OptionParser`` for pyrpo commandline use """ import optparse prs = optparse.OptionParser( usage=( "$0 pyrpo [-h] [-v] [-q] [-s .] " "[-r <report>] [--thg]")) prs.add_option('-s', '--scan', dest='scan', action='append', default=[], help='Path(s) to scan for repositories') prs.add_option('-r', '--report', dest='reports', action='append', default=[], help=("""origin, status, full, gitmodule, json, sh, """ """str, pip, hgsub""")) prs.add_option('--thg', dest='thg_report', action='store_true', help='Write a thg-reporegistry.xml file to stdout') prs.add_option('--template', dest='report_template', action='store', help='Report template') prs.add_option('-v', '--verbose', dest='verbose', action='store_true',) prs.add_option('-q', '--quiet', dest='quiet', action='store_true',) return prs
0.000775
def _load_audio_file(self): """ Load audio in memory. :rtype: :class:`~aeneas.audiofile.AudioFile` """ self._step_begin(u"load audio file") # NOTE file_format=None forces conversion to # PCM16 mono WAVE with default sample rate audio_file = AudioFile( file_path=self.task.audio_file_path_absolute, file_format=None, rconf=self.rconf, logger=self.logger ) audio_file.read_samples_from_file() self._step_end() return audio_file
0.003478
def _join_all_filenames_and_text( self): """ *join all file names, driectory names and text content together* """ self.log.info('starting the ``_join_all_filenames_and_text`` method') contentString = u"" for i in self.directoryContents: contentString += u"%(i)s\n" % locals() if os.path.isfile(os.path.join(i)): if i[-4:] in [".png", ".jpg", ".gif"]: continue readFile = codecs.open(i, encoding='ISO-8859-1', mode='r') if ".DS_Store" in i: continue data = readFile.read() contentString += u"%(data)s\n" % locals() readFile.close() self.contentString = contentString self.log.info('completed the ``_join_all_filenames_and_text`` method') return None
0.002235
def duplicate_sheet( self, source_sheet_id, insert_sheet_index=None, new_sheet_id=None, new_sheet_name=None ): """Duplicates the contents of a sheet. :param int source_sheet_id: The sheet ID to duplicate. :param int insert_sheet_index: (optional) The zero-based index where the new sheet should be inserted. The index of all sheets after this are incremented. :param int new_sheet_id: (optional) The ID of the new sheet. If not set, an ID is chosen. If set, the ID must not conflict with any existing sheet ID. If set, it must be non-negative. :param str new_sheet_name: (optional) The name of the new sheet. If empty, a new name is chosen for you. :returns: a newly created :class:`<gspread.models.Worksheet>`. .. versionadded:: 3.1.0 """ body = { 'requests': [{ 'duplicateSheet': { 'sourceSheetId': source_sheet_id, 'insertSheetIndex': insert_sheet_index, 'newSheetId': new_sheet_id, 'newSheetName': new_sheet_name } }] } data = self.batch_update(body) properties = data['replies'][0]['duplicateSheet']['properties'] worksheet = Worksheet(self, properties) return worksheet
0.001845
def SegmentMean(a, ids): """ Segmented mean op. """ func = lambda idxs: np.mean(a[idxs], axis=0) return seg_map(func, a, ids),
0.013699
def workspace_backup_add(ctx): """ Create a new backup """ backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup)) backup_manager.add()
0.007663
def get_past_events(self): """ Get past PythonKC meetup events. Returns ------- List of ``pythonkc_meetups.types.MeetupEvent``, ordered by event time, descending. Exceptions ---------- * PythonKCMeetupsBadJson * PythonKCMeetupsBadResponse * PythonKCMeetupsMeetupDown * PythonKCMeetupsNotJson * PythonKCMeetupsRateLimitExceeded """ def get_attendees(event): return [attendee for event_id, attendee in events_attendees if event_id == event['id']] def get_photos(event): return [photo for event_id, photo in events_photos if event_id == event['id']] params = {'key': self._api_key, 'group_urlname': GROUP_URLNAME, 'status': 'past', 'desc': 'true'} if self._num_past_events: params['page'] = str(self._num_past_events) query = urllib.urlencode(params) url = '{0}?{1}'.format(EVENTS_URL, query) data = self._http_get_json(url) events = data['results'] event_ids = [event['id'] for event in events] events_attendees = self.get_events_attendees(event_ids) events_photos = self.get_events_photos(event_ids) return [parse_event(event, get_attendees(event), get_photos(event)) for event in events]
0.001379
def de_casteljau_one_round(nodes, lambda1, lambda2): """Perform one round of de Casteljau's algorithm. .. note:: This is a helper for :func:`_specialize_curve`. It does not have a Fortran speedup because it is **only** used by a function which has a Fortran speedup. The weights are assumed to sum to one. Args: nodes (numpy.ndarray): Control points for a curve. lambda1 (float): First barycentric weight on interval. lambda2 (float): Second barycentric weight on interval. Returns: numpy.ndarray: The nodes for a "blended" curve one degree lower. """ return np.asfortranarray(lambda1 * nodes[:, :-1] + lambda2 * nodes[:, 1:])
0.001383
def get_method_returning_field_value(self, field_name): """ Field values can be obtained from view or core. """ return ( super().get_method_returning_field_value(field_name) or self.core.get_method_returning_field_value(field_name) )
0.006734
def start(self): ''' Starts execution of the script ''' # invoke the appropriate sub-command as requested from command-line try: self.args.func() except SystemExit as e: if e.code != 0: raise except KeyboardInterrupt: self.log.warning("exited via keyboard interrupt") except: self.log.exception("exited start function") # set exit code so we know it did not end successfully # TODO different exit codes based on signals ? finally: self._flush_metrics_q.put(None, block=True) self._flush_metrics_q.put(None, block=True, timeout=1) self.log.debug("exited_successfully")
0.003947
def system_properties_present(server=None, **kwargs): ''' Ensures that the system properties are present properties The system properties ''' ret = {'name': '', 'result': None, 'comment': None, 'changes': {}} del kwargs['name'] try: data = __salt__['glassfish.get_system_properties'](server=server) except requests.ConnectionError as error: if __opts__['test']: ret['changes'] = kwargs ret['result'] = None return ret else: ret['error'] = "Can't connect to the server" return ret ret['changes'] = {'data': data, 'kwargs': kwargs} if not data == kwargs: data.update(kwargs) if not __opts__['test']: try: __salt__['glassfish.update_system_properties'](data, server=server) ret['changes'] = kwargs ret['result'] = True ret['comment'] = 'System properties updated' except CommandExecutionError as error: ret['comment'] = error ret['result'] = False else: ret['result'] = None ret['changes'] = kwargs ret['coment'] = 'System properties would have been updated' else: ret['changes'] = {} ret['result'] = True ret['comment'] = 'System properties are already up-to-date' return ret
0.001408
def readlines(self, sizehint=-1): """ readlines([size]) -> list of strings, each a line from the file. Call readline() repeatedly and return a list of the lines so read. The optional size argument, if given, is an approximate bound on the total number of bytes in the lines returned. """ if self.closed: raise ValueError('I/O operation on closed file') lines = [] while True: line = self.readline(sizehint) if not line: break lines.append(line) if sizehint >= 0: sizehint -= len(line) if sizehint <= 0: break return lines
0.00274
def css_text(self, path, default=NULL, smart=False, normalize_space=True): """ Get normalized text of node which matches the css path. """ try: return get_node_text(self.css_one(path), smart=smart, normalize_space=normalize_space) except IndexError: if default is NULL: raise else: return default
0.004566
def validate(self, value): """Make sure that value is of the right type """ if not isinstance(value, self.nested_klass): self.raise_error('NestedClass is of the wrong type: {0} vs expected {1}' .format(value.__class__.__name__, self.nested_klass.__name__)) super(NestedDocumentField, self).validate(value)
0.010811
def dragMouseButtonLeft(self, coord, dest_coord, interval=0.5): """Drag the left mouse button without modifiers pressed. Parameters: coordinates to click on screen (tuple (x, y)) dest coordinates to drag to (tuple (x, y)) interval to send event of btn down, drag and up Returns: None """ modFlags = 0 self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags, dest_coord=dest_coord) self._postQueuedEvents(interval=interval)
0.003565
def _archive_self(self, logfile, key=JobDetails.topkey, status=JobStatus.unknown): """Write info about a job run by this `Link` to the job archive""" self._register_self(logfile, key, status) if self._job_archive is None: return self._job_archive.register_jobs(self.get_jobs())
0.009346
def parse_json(self, page): '''Returns json feed.''' if not isinstance(page, basestring): page = util.decode_page(page) self.doc = json.loads(page) results = self.doc.get(self.result_name, []) if not results: self.check_status(self.doc.get('status')) return None return results
0.017284
def myFunc(parameter): """This function will be executed on the remote host even if it was not available at launch.""" print('Hello World from {0}!'.format(scoop.worker)) # It is possible to get a constant anywhere print(shared.getConst('myVar')[2]) # Parameters are handled as usual return parameter + 1
0.002967
def download_dxfile(dxid, filename, chunksize=dxfile.DEFAULT_BUFFER_SIZE, append=False, show_progress=False, project=None, describe_output=None, **kwargs): ''' :param dxid: DNAnexus file ID or DXFile (file handler) object :type dxid: string or DXFile :param filename: Local filename :type filename: string :param append: If True, appends to the local file (default is to truncate local file if it exists) :type append: boolean :param project: project to use as context for this download (may affect which billing account is billed for this download). If None or DXFile.NO_PROJECT_HINT, no project hint is supplied to the API server. :type project: str or None :param describe_output: (experimental) output of the file-xxxx/describe API call, if available. It will make it possible to skip another describe API call. It should contain the default fields of the describe API call output and the "parts" field, not included in the output by default. :type describe_output: dict or None Downloads the remote file referenced by *dxid* and saves it to *filename*. Example:: download_dxfile("file-xxxx", "localfilename.fastq") ''' # retry the inner loop while there are retriable errors part_retry_counter = defaultdict(lambda: 3) success = False while not success: success = _download_dxfile(dxid, filename, part_retry_counter, chunksize=chunksize, append=append, show_progress=show_progress, project=project, describe_output=describe_output, **kwargs)
0.003688
def paged_search_ext_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0): """ Behaves exactly like LDAPObject.search_ext_s() but internally uses the simple paged results control to retrieve search results in chunks. """ req_ctrl = SimplePagedResultsControl(True, size=self.conf_LDAP_SYNC_BIND_PAGESIZE, cookie='') # Send first search request msgid = self.search_ext(base, ldap.SCOPE_SUBTREE, filterstr, attrlist=attrlist, serverctrls=(serverctrls or []) + [req_ctrl]) results = [] while True: rtype, rdata, rmsgid, rctrls = self.result3(msgid) results.extend(rdata) # Extract the simple paged results response control pctrls = [c for c in rctrls if c.controlType == SimplePagedResultsControl.controlType] if pctrls: if pctrls[0].cookie: # Copy cookie from response control to request control req_ctrl.cookie = pctrls[0].cookie msgid = self.search_ext(base, ldap.SCOPE_SUBTREE, filterstr, attrlist=attrlist, serverctrls=(serverctrls or []) + [req_ctrl]) else: break return results
0.007018
def fromordinal(cls, n): """Contruct a date from a proleptic Gregorian ordinal. January 1 of year 1 is day 1. Only the year, month and day are non-zero in the result. """ y, m, d = _ord2ymd(n) return cls(y, m, d)
0.007634
def unblockqueue(self, queue): ''' Remove blocked events from the queue and all subqueues. Usually used after queue clear/unblockall to prevent leak. :returns: the cleared events ''' subqueues = set() def allSubqueues(q): subqueues.add(q) subqueues.add(q.defaultQueue) for v in q.queueindex.values(): if len(v) == 3: allSubqueues(v[1]) allSubqueues(queue) events = [k for k,v in self.blockEvents.items() if v in subqueues] for e in events: del self.blockEvents[e] return events
0.009231
def split_levels(fields): """ Convert dot-notation such as ['a', 'a.b', 'a.d', 'c'] into current-level fields ['a', 'c'] and next-level fields {'a': ['b', 'd']}. """ first_level_fields = [] next_level_fields = {} if not fields: return first_level_fields, next_level_fields if not isinstance(fields, list): fields = [a.strip() for a in fields.split(",") if a.strip()] for e in fields: if "." in e: first_level, next_level = e.split(".", 1) first_level_fields.append(first_level) next_level_fields.setdefault(first_level, []).append(next_level) else: first_level_fields.append(e) first_level_fields = list(set(first_level_fields)) return first_level_fields, next_level_fields
0.001225
def add_sql_operation(self, app_label, sql_name, operation, dependencies): """ Add SQL operation and register it to be used as dependency for further sequential operations. """ deps = [(dp[0], SQL_BLOB, dp[1], self._sql_operations.get(dp)) for dp in dependencies] self.add_operation(app_label, operation, dependencies=deps) self._sql_operations[(app_label, sql_name)] = operation
0.006881
def stack_hist(ax, stacked_data, sty_cycle, bottoms=None, hist_func=None, labels=None, plot_func=None, plot_kwargs=None): """ ax : axes.Axes The axes to add artists too stacked_data : array or Mapping A (N, M) shaped array. The first dimension will be iterated over to compute histograms row-wise sty_cycle : Cycler or operable of dict Style to apply to each set bottoms : array, optional The initial positions of the bottoms, defaults to 0 hist_func : callable, optional Must have signature `bin_vals, bin_edges = f(data)`. `bin_edges` expected to be one longer than `bin_vals` labels : list of str, optional The label for each set. If not given and stacked data is an array defaults to 'default set {n}' If stacked_data is a mapping, and labels is None, default to the keys (which may come out in a random order). If stacked_data is a mapping and labels is given then only the columns listed by be plotted. plot_func : callable, optional Function to call to draw the histogram must have signature: ret = plot_func(ax, edges, top, bottoms=bottoms, label=label, **kwargs) plot_kwargs : dict, optional Any extra kwargs to pass through to the plotting function. This will be the same for all calls to the plotting function and will over-ride the values in cycle. Returns ------- arts : dict Dictionary of artists keyed on their labels """ # deal with default binning function if hist_func is None: hist_func = np.histogram # deal with default plotting function if plot_func is None: plot_func = filled_hist # deal with default if plot_kwargs is None: plot_kwargs = {} print(plot_kwargs) try: l_keys = stacked_data.keys() label_data = True if labels is None: labels = l_keys except AttributeError: label_data = False if labels is None: labels = itertools.repeat(None) if label_data: loop_iter = enumerate((stacked_data[lab], lab, s) for lab, s in zip(labels, sty_cycle)) else: loop_iter = enumerate(zip(stacked_data, labels, sty_cycle)) arts = {} for j, (data, label, sty) in loop_iter: if label is None: label = 'dflt set {n}'.format(n=j) label = sty.pop('label', label) vals, edges = hist_func(data) if bottoms is None: bottoms = np.zeros_like(vals) top = bottoms + vals print(sty) sty.update(plot_kwargs) print(sty) ret = plot_func(ax, edges, top, bottoms=bottoms, label=label, **sty) bottoms = top arts[label] = ret ax.legend(fontsize=10) return arts
0.000338
def install_json(self): """Return install.json contents.""" file_fqpn = os.path.join(self.app_path, 'install.json') if self._install_json is None: if os.path.isfile(file_fqpn): try: with open(file_fqpn, 'r') as fh: self._install_json = json.load(fh) except ValueError as e: self.handle_error('Failed to load "{}" file ({}).'.format(file_fqpn, e)) else: self.handle_error('File "{}" could not be found.'.format(file_fqpn)) return self._install_json
0.006504
def unix_install(): """ Edits or creates .bashrc, .bash_profile, and .profile files in the users HOME directory in order to add your current directory (hopefully your PmagPy directory) and assorted lower directories in the PmagPy/programs directory to your PATH environment variable. It also adds the PmagPy and the PmagPy/programs directories to PYTHONPATH. """ PmagPyDir = os.path.abspath(".") COMMAND = """\n for d in %s/programs/*/ "%s/programs/"; do case ":$PATH:" in *":$d:"*) :;; # already there *) PMAGPATHS="$PMAGPATHS:$d";; # or PATH="$PATH:$new_entry" esac done export PYTHONPATH="$PYTHONPATH:%s:%s/programs/" export PATH="$PATH:$PMAGPATHS" """ % (PmagPyDir, PmagPyDir, PmagPyDir, PmagPyDir) frc_path = os.path.join( os.environ["HOME"], ".bashrc") # not recommended, but hey it freaking works fbprof_path = os.path.join(os.environ["HOME"], ".bash_profile") fprof_path = os.path.join(os.environ["HOME"], ".profile") all_paths = [frc_path, fbprof_path, fprof_path] for f_path in all_paths: open_type = 'a' if not os.path.isfile(f_path): open_type = 'w+' fout = open(f_path, open_type) fout.write(COMMAND) fout.close() else: fin = open(f_path, 'r') current_f = fin.read() fin.close() if COMMAND not in current_f: fout = open(f_path, open_type) fout.write(COMMAND) fout.close() print("Install complete. Please restart the shell to complete install.\nIf you are seeing strange or non-existent paths in your PATH or PYTHONPATH variable please manually check your .bashrc, .bash_profile, and .profile or attempt to reinstall.")
0.002245
def dump_links(self, o): """Dump links.""" params = {'versionId': o.version_id} data = { 'self': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True, **(params if not o.is_head or o.deleted else {}) ), 'version': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True, **params ) } if o.is_head and not o.deleted: data.update({'uploads': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True ) + '?uploads', }) return data
0.002421
def case_insensitive(self): """Matching packages distinguish between uppercase and lowercase """ if "--case-ins" in self.flag: data = [] data = Utils().package_name(self.PACKAGES_TXT) data_dict = Utils().case_sensitive(data) for pkg in self.packages: index = self.packages.index(pkg) for key, value in data_dict.iteritems(): if key == pkg.lower(): self.packages[index] = value
0.003759
def find(self, node, list): """ Traverse the tree looking for matches. @param node: A node to match on. @type node: L{SchemaObject} @param list: A list to fill. @type list: list """ if self.matcher.match(node): list.append(node) self.limit -= 1 if self.limit == 0: return for c in node.rawchildren: self.find(c, list) return self
0.004228
def get_signing_key(self, key_type="", owner="", kid=None, **kwargs): """ Shortcut to use for signing keys only. :param key_type: Type of key (rsa, ec, oct, ..) :param owner: Who is the owner of the keys, "" == me (default) :param kid: A Key Identifier :param kwargs: Extra key word arguments :return: A possibly empty list of keys """ return self.get("sig", key_type, owner, kid, **kwargs)
0.00432
def update_summary(self, w): """Update summary. The new summary is a weighted average of reviews i.e. .. math:: \\frac{\\sum_{r \\in R} \\mbox{weight}(r) \\times \\mbox{review}(r)} {\\sum_{r \\in R} \\mbox{weight}(r)}, where :math:`R` is a set of reviewers reviewing this product, :math:`\\mbox{review}(r)` and :math:`\\mbox{weight}(r)` are the review and weight of the reviewer :math:`r`, respectively. Args: w: A weight function. Returns: absolute difference between old summary and updated one. """ old = self.summary.v # pylint: disable=no-member reviewers = self._graph.retrieve_reviewers(self) reviews = [self._graph.retrieve_review( r, self).score for r in reviewers] weights = [w(r.anomalous_score) for r in reviewers] if sum(weights) == 0: self.summary = np.mean(reviews) else: self.summary = np.average(reviews, weights=weights) return abs(self.summary.v - old)
0.00275
def _fusion_to_dsl(tokens) -> FusionBase: """Convert a PyParsing data dictionary to a PyBEL fusion data dictionary. :param tokens: A PyParsing data dictionary representing a fusion :type tokens: ParseResult """ func = tokens[FUNCTION] fusion_dsl = FUNC_TO_FUSION_DSL[func] member_dsl = FUNC_TO_DSL[func] partner_5p = member_dsl( namespace=tokens[FUSION][PARTNER_5P][NAMESPACE], name=tokens[FUSION][PARTNER_5P][NAME] ) partner_3p = member_dsl( namespace=tokens[FUSION][PARTNER_3P][NAMESPACE], name=tokens[FUSION][PARTNER_3P][NAME] ) range_5p = _fusion_range_to_dsl(tokens[FUSION][RANGE_5P]) range_3p = _fusion_range_to_dsl(tokens[FUSION][RANGE_3P]) return fusion_dsl( partner_5p=partner_5p, partner_3p=partner_3p, range_5p=range_5p, range_3p=range_3p, )
0.001135
def show_parser_results(self, parsed_list, unparsed_list): """Compile a formatted list of un/successfully parsed files. :param parsed_list: A list of files that were parsed successfully. :type parsed_list: list(str) :param unparsed_list: A list of files that were not parsable. :type unparsed_list: list(str) :returns: A formatted message outlining what could be parsed. :rtype: str """ parsed_message = self.tr( 'The file(s) below were parsed successfully:\n') unparsed_message = self.tr( 'The file(s) below were not parsed successfully:\n') parsed_contents = '\n'.join(parsed_list) unparsed_contents = '\n'.join(unparsed_list) if parsed_contents == '': parsed_contents = 'No successfully parsed files\n' if unparsed_contents == '': unparsed_contents = 'No failures in parsing files\n' full_messages = ( parsed_message + parsed_contents + '\n\n' + unparsed_message + unparsed_contents) return full_messages
0.001802
def stillRecording(self, deviceId, dataCount): """ For a device that is recording, updates the last timestamp so we now when we last received data. :param deviceId: the device id. :param dataCount: the no of items of data recorded in this batch. :return: """ status = self.recordingDevices[deviceId] if status is not None: if status['state'] == MeasurementStatus.RECORDING.name: status['last'] = datetime.datetime.utcnow().strftime(DATETIME_FORMAT) status['count'] = status['count'] + dataCount
0.006645
def add_vertex(self, x, y, z): """ Add a ``VEC3`` of ``floats`` to the ``vert_data`` buffer """ self.vert_data.write( struct.pack('<f', x) + struct.pack('<f', y) + struct.pack('<f', z) ) # retain min/max values self.vert_min = _list3_min(self.vert_min, (x, y, z)) self.vert_max = _list3_max(self.vert_max, (x, y, z))
0.004796
def _get_detail_value(var, attr): """ Given a variable and one of its attributes that are available inside of a template, return its 'method' if it is a callable, its class name if it is a model manager, otherwise return its value """ value = getattr(var, attr) # Rename common Django class names kls = getattr(getattr(value, '__class__', ''), '__name__', '') if kls in ('ManyRelatedManager', 'RelatedManager', 'EmptyManager'): return kls if callable(value): return 'routine' return value
0.001825
def getModelSummaryAsKml(self, session, path=None, documentName=None, withStreamNetwork=True, withNodes=False, styles={}): """ Retrieve a KML representation of the model. Includes polygonized mask map and vector stream network. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database path (str, optional): Path to file where KML file will be written. Defaults to None. documentName (str, optional): Name of the KML document. This will be the name that appears in the legend. Defaults to 'Stream Network'. withStreamNetwork (bool, optional): Include stream network. Defaults to True. withNodes (bool, optional): Include nodes. Defaults to False. styles (dict, optional): Custom styles to apply to KML geometry. Defaults to empty dictionary. Valid keys (styles) include: * streamLineColor: tuple/list of RGBA integers (0-255) e.g.: (255, 0, 0, 128) * streamLineWidth: float line width in pixels * nodeIconHref: link to icon image (PNG format) to represent nodes (see: http://kml4earth.appspot.com/icons.html) * nodeIconScale: scale of the icon image * maskLineColor: tuple/list of RGBA integers (0-255) e.g.: (255, 0, 0, 128) * maskLineWidth: float line width in pixels * maskFillColor: tuple/list of RGBA integers (0-255) e.g.: (255, 0, 0, 128) Returns: str: KML string """ # Get mask map watershedMaskCard = self.getCard('WATERSHED_MASK') maskFilename = watershedMaskCard.value maskExtension = maskFilename.strip('"').split('.')[1] maskMap = session.query(RasterMapFile).\ filter(RasterMapFile.projectFile == self).\ filter(RasterMapFile.fileExtension == maskExtension).\ one() # Get mask map as a KML polygon statement = """ SELECT val, ST_AsKML(geom) As polygon FROM ( SELECT (ST_DumpAsPolygons({0})).* FROM {1} WHERE id={2} ) As foo ORDER BY val; """.format('raster', maskMap.tableName, maskMap.id) result = session.execute(statement) maskMapKmlPolygon = '' for row in result: maskMapKmlPolygon = row.polygon # Set Default Styles streamLineColorValue = (255, 255, 0, 0) # Blue streamLineWidthValue = 2 nodeIconHrefValue = 'http://maps.google.com/mapfiles/kml/paddle/red-circle.png' nodeIconScaleValue = 1 maskLineColorValue = (255, 0, 0, 255) maskFillColorValue = (128, 64, 64, 64) maskLineWidthValue = 2 # Validate if 'streamLineColor' in styles: if len(styles['streamLineColor']) < 4: log.warning('streamLineColor style must be a list or a tuple of ' 'four elements representing integer RGBA values.') else: userLineColor = styles['streamLineColor'] streamLineColorValue = (userLineColor[3], userLineColor[2], userLineColor[1], userLineColor[0]) if 'streamLineWidth' in styles: try: float(styles['streamLineWidth']) streamLineWidthValue = styles['streamLineWidth'] except ValueError: log.warning('streamLineWidth must be a valid ' 'number representing the width of the line in pixels.') if 'nodeIconHref' in styles: nodeIconHrefValue = styles['nodeIconHref'] if 'nodeIconScale' in styles: try: float(styles['nodeIconScale']) nodeIconScaleValue = styles['nodeIconScale'] except ValueError: log.warning('nodeIconScaleValue must be a valid number representing' ' the width of the line in pixels.') if 'maskLineColor' in styles: if len(styles['maskLineColor']) < 4: log.warning('maskLineColor style must be a list or a tuple of four ' 'elements representing integer RGBA values.') else: userLineColor = styles['maskLineColor'] maskLineColorValue = (userLineColor[3], userLineColor[2], userLineColor[1], userLineColor[0]) if 'maskFillColor' in styles: if len(styles['maskFillColor']) < 4: log.warning('maskFillColor style must be a list or a tuple of four ' 'elements representing integer RGBA values.') else: userLineColor = styles['maskFillColor'] maskFillColorValue = (userLineColor[3], userLineColor[2], userLineColor[1], userLineColor[0]) if 'maskLineWidth' in styles: try: float(styles['maskLineWidth']) maskLineWidthValue = styles['maskLineWidth'] except ValueError: log.warning('maskLineWidth must be a valid number representing ' 'the width of the line in pixels.') if not documentName: documentName = self.name # Initialize KML Document kml = ET.Element('kml', xmlns='http://www.opengis.net/kml/2.2') document = ET.SubElement(kml, 'Document') docName = ET.SubElement(document, 'name') docName.text = documentName # Mask Map maskPlacemark = ET.SubElement(document, 'Placemark') maskPlacemarkName = ET.SubElement(maskPlacemark, 'name') maskPlacemarkName.text = 'Mask Map' # Mask Styles maskStyles = ET.SubElement(maskPlacemark, 'Style') # Set polygon line style maskLineStyle = ET.SubElement(maskStyles, 'LineStyle') # Set polygon line color and width maskLineColor = ET.SubElement(maskLineStyle, 'color') maskLineColor.text = '%02X%02X%02X%02X' % maskLineColorValue maskLineWidth = ET.SubElement(maskLineStyle, 'width') maskLineWidth.text = str(maskLineWidthValue) # Set polygon fill color maskPolyStyle = ET.SubElement(maskStyles, 'PolyStyle') maskPolyColor = ET.SubElement(maskPolyStyle, 'color') maskPolyColor.text = '%02X%02X%02X%02X' % maskFillColorValue # Mask Geometry maskPolygon = ET.fromstring(maskMapKmlPolygon) maskPlacemark.append(maskPolygon) if withStreamNetwork: # Get the channel input file for the stream network channelInputFile = self.channelInputFile # Retrieve Stream Links links = channelInputFile.getFluvialLinks() # Stream Network for link in links: placemark = ET.SubElement(document, 'Placemark') placemarkName = ET.SubElement(placemark, 'name') placemarkName.text = 'Stream Link {0}'.format(str(link.linkNumber)) # Create style tag and setup styles styles = ET.SubElement(placemark, 'Style') # Set line style lineStyle = ET.SubElement(styles, 'LineStyle') lineColor = ET.SubElement(lineStyle, 'color') lineColor.text = '%02X%02X%02X%02X' % streamLineColorValue lineWidth = ET.SubElement(lineStyle, 'width') lineWidth.text = str(streamLineWidthValue) # Add the geometry to placemark linkKML = link.getAsKml(session) if linkKML: lineString = ET.fromstring(linkKML) placemark.append(lineString) else: log.warning("No geometry found for link with id {0}".format(link.id)) if withNodes: # Create the node styles nodeStyles = ET.SubElement(document, 'Style', id='node_styles') # Hide labels nodeLabelStyle = ET.SubElement(nodeStyles, 'LabelStyle') nodeLabelScale = ET.SubElement(nodeLabelStyle, 'scale') nodeLabelScale.text = str(0) # Style icon nodeIconStyle = ET.SubElement(nodeStyles, 'IconStyle') # Set icon nodeIcon = ET.SubElement(nodeIconStyle, 'Icon') iconHref = ET.SubElement(nodeIcon, 'href') iconHref.text = nodeIconHrefValue # Set icon scale iconScale = ET.SubElement(nodeIconStyle, 'scale') iconScale.text = str(nodeIconScaleValue) for node in link.nodes: # New placemark for each node nodePlacemark = ET.SubElement(document, 'Placemark') nodePlacemarkName = ET.SubElement(nodePlacemark, 'name') nodePlacemarkName.text = str(node.nodeNumber) # Styles for the node nodeStyleUrl = ET.SubElement(nodePlacemark, 'styleUrl') nodeStyleUrl.text = '#node_styles' nodeString = ET.fromstring(node.getAsKml(session)) nodePlacemark.append(nodeString) kmlString = ET.tostring(kml) if path: with open(path, 'w') as f: f.write(kmlString) return kmlString
0.00381
def tags_with_text(xml, tags=None): """Return a list of tags that contain text retrieved recursively from an XML tree.""" if tags is None: tags = [] for element in xml: if element.text is not None: tags.append(element) elif len(element) > 0: # pylint: disable=len-as-condition tags_with_text(element, tags) else: message = 'Unknown XML structure: {}'.format(element) raise ValueError(message) return tags
0.00198
def create_empty(self, name=None, renderers=None, RootNetworkList=None, verbose=False): """ Create a new, empty network. The new network may be created as part of an existing network collection or a new network collection. :param name (string, optional): Enter the name of the new network. :param renderers (string, optional): Select the renderer to use for the new network view. By default, the standard Cytoscape 2D renderer (Ding) will be used = [''], :param RootNetworkList (string, optional): Choose the network collection the new network should be part of. If no network collection is selected, a new network collection is created. = [' -- Create new network collection --', 'cy:command_documentation_generation'] :param verbose: print more """ PARAMS=set_param(["name","renderers","RootNetworkList"],[name,renderers,RootNetworkList]) response=api(url=self.__url+"/create empty", PARAMS=PARAMS, method="POST", verbose=verbose) return response
0.014585
def calculate_hash(options): """returns an option_collection_hash given a list of options""" options = sorted(list(options)) sha_hash = sha1() # equivalent to loop over the options and call sha_hash.update() sha_hash.update(''.join(options).encode('utf-8')) return sha_hash.hexdigest()
0.006006
def standard_parser(cls, verbose=True, interactive=False, no_interactive=False, simulate=False, quiet=False, overwrite=False): """ Create a standard ``OptionParser`` instance. Typically used like:: class MyCommand(Command): parser = Command.standard_parser() Subclasses may redefine ``standard_parser``, so use the nearest superclass's class method. """ parser = BoolOptionParser() if verbose: parser.add_option('-v', '--verbose', action='count', dest='verbose', default=0) if quiet: parser.add_option('-q', '--quiet', action='count', dest='quiet', default=0) if no_interactive: parser.add_option('--no-interactive', action="count", dest="no_interactive", default=0) if interactive: parser.add_option('-i', '--interactive', action='count', dest='interactive', default=0) if simulate: parser.add_option('-n', '--simulate', action='store_true', dest='simulate', default=False) if overwrite: parser.add_option('-f', '--overwrite', dest="overwrite", action="store_true", help="Overwrite files (warnings will be emitted for non-matching files otherwise)") return parser
0.004126
def normalize_code_coicop(code): '''Normalize_coicop est function d'harmonisation de la colonne d'entiers posteCOICOP de la table matrice_passage_data_frame en la transformant en une chaine de 5 caractères afin de pouvoir par la suite agréger les postes COICOP selon les 12 postes agrégés de la nomenclature de la comptabilité nationale. Chaque poste contient 5 caractères, les deux premiers (entre 01 et 12) correspondent à ces postes agrégés de la CN. ''' # TODO: vérifier la formule !!! try: code = unicode(code) except: code = code if len(code) == 3: code_coicop = "0" + code + "0" # "{0}{1}{0}".format(0, code) elif len(code) == 4: if not code.startswith("0") and not code.startswith("1") and not code.startswith("45") and not code.startswith("9"): code_coicop = "0" + code # 022.. = cigarettes et tabacs => on les range avec l'alcool (021.0) elif code.startswith("0"): code_coicop = code + "0" elif code in ["1151", "1181", "4552", "4522", "4511", "9122", "9151", "9211", "9341", "1411"]: # 1151 = Margarines et autres graisses végétales # 1181 = Confiserie # 04522 = Achat de butane, propane # 04511 = Facture EDF GDF non dissociables code_coicop = "0" + code else: # 99 = loyer, impots et taxes, cadeaux... code_coicop = code + "0" elif len(code) == 5: if not code.startswith("13") and not code.startswith("44") and not code.startswith("51"): code_coicop = code else: code_coicop = "99000" else: log.error("Problematic code {}".format(code)) raise() return code_coicop
0.00514
def do_login(self, http_session): """Do vk login :param http_session: vk_requests.utils.VerboseHTTPSession: http session """ response = http_session.get(self.LOGIN_URL) action_url = parse_form_action_url(response.text) # Stop login it action url is not found if not action_url: logger.debug(response.text) raise VkParseError("Can't parse form action url") login_form_data = {'email': self._login, 'pass': self._password} login_response = http_session.post(action_url, login_form_data) logger.debug('Cookies: %s', http_session.cookies) response_url_query = parse_url_query_params( login_response.url, fragment=False) logger.debug('response_url_query: %s', response_url_query) act = response_url_query.get('act') # Check response url query params firstly if 'sid' in response_url_query: self.require_auth_captcha( response=login_response, query_params=response_url_query, login_form_data=login_form_data, http_session=http_session) elif act == 'authcheck': self.require_2fa(html=login_response.text, http_session=http_session) elif act == 'security_check': self.require_phone_number(html=login_response.text, session=http_session) session_cookies = ('remixsid' in http_session.cookies, 'remixsid6' in http_session.cookies) if any(session_cookies): logger.info('VK session is established') return True else: message = 'Authorization error: incorrect password or ' \ 'authentication code' logger.error(message) raise VkAuthError(message)
0.001044
async def service_messages(self, msg, _context): """Get all messages for a service.""" msgs = self.service_manager.service_messages(msg.get('name')) return [x.to_dict() for x in msgs]
0.009615
def parent_of(self, node): """Check if this node is the parent of the given node. :param node: The node to check if it is the child. :type node: NodeNG :returns: True if this node is the parent of the given node, False otherwise. :rtype: bool """ parent = node.parent while parent is not None: if self is parent: return True parent = parent.parent return False
0.004115
def get_bank_hierarchy_session(self): """Gets the session traversing bank hierarchies. return: (osid.assessment.BankHierarchySession) - a ``BankHierarchySession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_bank_hierarchy() is false`` *compliance: optional -- This method must be implemented if ``supports_bank_hierarchy()`` is true.* """ if not self.supports_bank_hierarchy(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.BankHierarchySession(runtime=self._runtime)
0.003086
def createEncoder(): """Create the encoder instance for our test and return it.""" consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption", clipInput=True) time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay") encoder = MultiEncoder() encoder.addEncoder("consumption", consumption_encoder) encoder.addEncoder("timestamp", time_encoder) return encoder
0.021687
def get_image_tags(self): """ Fetches image labels (repository / tags) from Docker. :return: A dictionary, with image name and tags as the key and the image id as value. :rtype: dict """ current_images = self.images() tags = {tag: i['Id'] for i in current_images for tag in i['RepoTags']} return tags
0.008219
def min_zoom(self): """ Get the minimal zoom level of all layers. Returns: int: the minimum of all zoom levels of all layers Raises: ValueError: if no layers exist """ zoom_levels = [map_layer.min_zoom for map_layer in self.layers] return min(zoom_levels)
0.005917
def update(self): """Delete the coefficients of the pure MA model and also all MA and AR coefficients of the ARMA model. Also calculate or delete the values of all secondary iuh parameters, depending on the completeness of the values of the primary parameters. """ del self.ma.coefs del self.arma.ma_coefs del self.arma.ar_coefs if self.primary_parameters_complete: self.calc_secondary_parameters() else: for secpar in self._SECONDARY_PARAMETERS.values(): secpar.__delete__(self)
0.003333