text
stringlengths
78
104k
score
float64
0
0.18
def custom(cls, customgrouper): """Custom grouping from a given implementation of ICustomGrouping :param customgrouper: The ICustomGrouping implemention to use """ if customgrouper is None: raise TypeError("Argument to custom() must be ICustomGrouping instance or classpath") if not isinstance(customgrouper, ICustomGrouping) and not isinstance(customgrouper, str): raise TypeError("Argument to custom() must be ICustomGrouping instance or classpath") serialized = default_serializer.serialize(customgrouper) return cls.custom_serialized(serialized, is_java=False)
0.009917
async def remove(self, von_wallet: Wallet) -> None: """ Remove serialized wallet if it exists. Raise WalletState if wallet is open. :param von_wallet: (closed) wallet to remove """ LOGGER.debug('WalletManager.remove >>> wallet %s', von_wallet) await von_wallet.remove() LOGGER.debug('WalletManager.remove <<<')
0.008108
def ask(type, payload): """ Publish a message with the specified action_type and payload over the event system. Useful for debugging. """ async def _produce(): # notify the user that we were successful print("Dispatching action with type {}...".format(type)) # fire an action with the given values response = await producer.ask(action_type=type, payload=payload) # show the user the reply print(response) # create a producer producer = ActionHandler() # start the producer producer.start() # get the current event loop loop = asyncio.get_event_loop() # run the production sequence loop.run_until_complete(_produce()) # start the producer producer.stop()
0.001294
def put_mapping(self, using=None, **kwargs): """ Register specific mapping definition for a specific type. Any additional keyword arguments will be passed to ``Elasticsearch.indices.put_mapping`` unchanged. """ return self._get_connection(using).indices.put_mapping(index=self._name, **kwargs)
0.008772
def list_functions(region=None, key=None, keyid=None, profile=None): ''' List all Lambda functions visible in the current scope. CLI Example: .. code-block:: bash salt myminion boto_lambda.list_functions ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = [] for funcs in __utils__['boto3.paged_call'](conn.list_functions): ret += funcs['Functions'] return ret
0.002242
def execute_transactions(conn, statements: Iterable): """Execute several statements each as a single DB transaction.""" with conn.cursor() as cursor: for statement in statements: try: cursor.execute(statement) conn.commit() except psycopg2.ProgrammingError: conn.rollback()
0.002762
def is_normal(self, k): ''' lmap.is_normal(k) yields True if k is a key in the given lazy map lmap that is neither lazy nor a formerly-lazy memoized key. ''' v = ps.PMap.__getitem__(self, k) if not isinstance(v, (types.FunctionType, partial)) or [] != getargspec_py27like(v)[0]: return True else: return False
0.010283
def create(cls, scheduled_analysis, tags=None, json_report_objects=None, raw_report_objects=None, additional_metadata=None, analysis_date=None): """ Create a new report. For convenience :func:`~mass_api_client.resources.scheduled_analysis.ScheduledAnalysis.create_report` of class :class:`.ScheduledAnalysis` can be used instead. :param scheduled_analysis: The :class:`.ScheduledAnalysis` this report was created for :param tags: A list of strings :param json_report_objects: A dictionary of JSON reports, where the key is the object name. :param raw_report_objects: A dictionary of binary file reports, where the key is the file name. :param analysis_date: A datetime object of the time the report was generated. Defaults to current time. :return: The newly created report object """ if tags is None: tags = [] if additional_metadata is None: additional_metadata = {} if analysis_date is None: analysis_date = datetime.datetime.now() url = cls._creation_point.format(scheduled_analysis=scheduled_analysis.id) return cls._create(url=url, analysis_date=analysis_date, additional_json_files=json_report_objects, additional_binary_files=raw_report_objects, tags=tags, additional_metadata=additional_metadata, force_multipart=True)
0.008276
def base_prompt(self, prompt): """Extract the base prompt pattern.""" if prompt is None: return None if not self.device.is_target: return prompt pattern = pattern_manager.pattern(self.platform, "prompt_dynamic", compiled=False) pattern = pattern.format(prompt="(?P<prompt>.*?)") result = re.search(pattern, prompt) if result: base = result.group("prompt") + "#" self.log("base prompt: {}".format(base)) return base else: self.log("Unable to extract the base prompt") return prompt
0.004777
def connect_get_namespaced_pod_exec(self, name, namespace, **kwargs): """ connect GET requests to exec of Pod This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_get_namespaced_pod_exec(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodExecOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str command: Command is the remote command to execute. argv array. Not executed within a shell. :param str container: Container in which to execute the command. Defaults to only container if there is only one container in the pod. :param bool stderr: Redirect the standard error stream of the pod for this call. Defaults to true. :param bool stdin: Redirect the standard input stream of the pod for this call. Defaults to false. :param bool stdout: Redirect the standard output stream of the pod for this call. Defaults to true. :param bool tty: TTY if true indicates that a tty will be allocated for the exec call. Defaults to false. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_get_namespaced_pod_exec_with_http_info(name, namespace, **kwargs) else: (data) = self.connect_get_namespaced_pod_exec_with_http_info(name, namespace, **kwargs) return data
0.006952
def to_dict(self): 'Convert a structure into a Python dictionary.' fsa = dict() for key in self._integer_members: fsa[key] = getattr(self, key) ra = [ self.RegisterArea[index] for index in compat.xrange(0, SIZE_OF_80387_REGISTERS) ] ra = tuple(ra) fsa['RegisterArea'] = ra return fsa
0.014245
def get_global_state(self): """ 获取全局状态 :return: (ret, data) ret == RET_OK data为包含全局状态的字典,含义如下 ret != RET_OK data为错误描述字符串 ===================== =========== ============================================================== key value类型 说明 ===================== =========== ============================================================== market_sz str 深圳市场状态,参见MarketState market_us str 美国市场状态,参见MarketState market_sh str 上海市场状态,参见MarketState market_hk str 香港市场状态,参见MarketState market_future str 香港期货市场状态,参见MarketState server_ver str FutuOpenD版本号 trd_logined str '1':已登录交易服务器,'0': 未登录交易服务器 qot_logined str '1':已登录行情服务器,'0': 未登录行情服务器 timestamp str Futu后台服务器当前时间戳(秒) local_timestamp double FutuOpenD运行机器当前时间戳( ===================== =========== ============================================================== :example: .. code:: python from futuquant import * quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) print(quote_ctx.get_global_state()) quote_ctx.close() """ query_processor = self._get_sync_query_processor( GlobalStateQuery.pack_req, GlobalStateQuery.unpack_rsp) kargs = { 'user_id': self.get_login_user_id(), 'conn_id': self.get_sync_conn_id(), } ret_code, msg, state_dict = query_processor(**kargs) if ret_code != RET_OK: return ret_code, msg return RET_OK, state_dict
0.00349
def plot_pseudosections(self, column, filename=None, return_fig=False): """Create a multi-plot with one pseudosection for each frequency. Parameters ---------- column : string which column to plot filename : None|string output filename. If set to None, do not write to file. Default: None return_fig : bool if True, return the generated figure object. Default: False Returns ------- fig : None|matplotlib.Figure if return_fig is set to True, return the generated Figure object """ assert column in self.data.columns g = self.data.groupby('frequency') fig, axes = plt.subplots( 4, 2, figsize=(15 / 2.54, 20 / 2.54), sharex=True, sharey=True ) for ax, (key, item) in zip(axes.flat, g): fig, ax, cb = PS.plot_pseudosection_type2( item, ax=ax, column=column ) ax.set_title('f: {} Hz'.format(key)) fig.tight_layout() if filename is not None: fig.savefig(filename, dpi=300) if return_fig: return fig else: plt.close(fig)
0.001595
def sandbag( adata, annotation, gene_names, sample_names, fraction=0.65, filter_genes=None, filter_samples=None): """Generate pairs of genes [Scialdone15]_ [Fechtner18]_. Calculates the pairs of genes serving as marker pairs for each phase, based on a matrix of gene counts and an annotation of known phases. This reproduces the approach of [Scialdone15]_ in the implementation of [Fechtner18]_. More information and bug reports `here <https://github.com/rfechtner/pypairs>`__. Parameters ---------- adata : :class:`~anndata.AnnData` The annotated data matrix. categories : `dict` Dictionary of lists, i.e. {phase: [sample, ...]}, containing annotation of samples to their phase gene_names: `list` List of genes. sample_names: `list` List of samples. fraction : `float`, optional (default: 0.5) Fraction to be used as threshold. filter_genes : `list` or `None`, optional (default: `None`) Genes for sampling the reference set. Default is all genes. filter_samples : `list` or `None`, optional (default: `None`) Cells for sampling the reference set. Default is all samples. Returns ------- `dict` of `list` of `tuple`, i.e. {phase: [(Gene1, Gene2), ...]}, containing marker pairs per phase """ try: from pypairs import __version__ as pypairsversion from distutils.version import LooseVersion if LooseVersion(pypairsversion) < LooseVersion("v3.0.9"): raise ImportError('Please only use `pypairs` >= v3.0.9 ') except ImportError: raise ImportError('You need to install the package `pypairs`.') from pypairs.pairs import sandbag from . import settings from pypairs import settings as pp_settings pp_settings.verbosity = settings.verbosity pp_settings.n_jobs = settings.n_jobs pp_settings.writedir = settings.writedir pp_settings.cachedir = settings.cachedir pp_settings.logfile = settings.logfile return sandbag( data = adata, annotation = annotation, gene_names = gene_names, sample_names = sample_names, fraction = fraction, filter_genes = filter_genes, filter_samples = filter_samples )
0.006806
def dump_age(age=None): """Formats the duration as a base-10 integer. :param age: should be an integer number of seconds, a :class:`datetime.timedelta` object, or, if the age is unknown, `None` (default). """ if age is None: return if isinstance(age, timedelta): # do the equivalent of Python 2.7's timedelta.total_seconds(), # but disregarding fractional seconds age = age.seconds + (age.days * 24 * 3600) age = int(age) if age < 0: raise ValueError("age cannot be negative") return str(age)
0.001669
def validatepubkey(pub): ''' Returns input key if it's a valid hex public key, or False otherwise. Input must be hex string, not bytes or integer/long or anything else. ''' try: pub = hexstrlify(unhexlify(pub)) except: return False if len(pub) == 130: if pub[:2] != '04': return False if uncompress(compress(pub)) != pub: return False elif len(pub) == 66: if pub[:2] != '02' and pub[:2] != '03': return False else: return False return pub
0.003497
def get_dh_params_length(server_handshake_bytes): """ Determines the length of the DH params from the ServerKeyExchange :param server_handshake_bytes: A byte string of the handshake data received from the server :return: None or an integer of the bit size of the DH parameters """ output = None dh_params_bytes = None for record_type, _, record_data in parse_tls_records(server_handshake_bytes): if record_type != b'\x16': continue for message_type, message_data in parse_handshake_messages(record_data): if message_type == b'\x0c': dh_params_bytes = message_data break if dh_params_bytes: break if dh_params_bytes: output = int_from_bytes(dh_params_bytes[0:2]) * 8 return output
0.003563
def download_segmentation_image_file(self, mapobject_type_name, plate_name, well_name, well_pos_y, well_pos_x, tpoint, zplane, align, directory): '''Downloads a segmentation image and writes it to a *PNG* file on disk. Parameters ---------- mapobject_type_name: str name of the segmented objects plate_name: str name of the plate well_name: str name of the well in which the image is located well_pos_y: int y-position of the site relative to the well grid well_pos_x: int x-position of the site relative to the well grid tpoint: int zero-based time point index zplane: int zero-based z-plane index align: bool option to apply alignment to download directory: str absolute path to the directory on disk where the file should be saved Warning ------- Due to the *PNG* file format the approach is limited to images which contain less than 65536 objects. See also -------- :meth:`tmclient.api.TmClient.download_segmentation_image` ''' response = self._download_segmentation_image( mapobject_type_name, plate_name, well_name, well_pos_y, well_pos_x, tpoint, zplane, align ) image = np.array(response, np.int32) if np.max(image) >= 2**16: raise ValueError( 'Cannot store segmentation image as PNG file because it ' 'contains more than 65536 objects.' ) filename = '{0}_{1}_{2}_y{3:03d}_x{4:03d}_z{5:03d}_t{6:03d}_{7}.png'.format( self.experiment_name, plate_name, well_name, well_pos_y, well_pos_x, zplane, tpoint, mapobject_type_name ) data = cv2.imencode(filename, image.astype(np.uint16))[1] self._write_file(directory, filename, data)
0.004008
def gtf(args): """ %prog gtf gffile Convert gff to gtf file. In gtf, only exon/CDS features are important. The first 8 columns are the same as gff, but in the attributes field, we need to specify "gene_id" and "transcript_id". """ p = OptionParser(gtf.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gff = Gff(gffile) transcript_info = AutoVivification() for g in gff: if g.type.endswith(("RNA", "transcript")): if "ID" in g.attributes and "Parent" in g.attributes: transcript_id = g.get_attr("ID") gene_id = g.get_attr("Parent") elif "mRNA" in g.attributes and "Gene" in g.attributes: transcript_id = g.get_attr("mRNA") gene_id = g.get_attr("Gene") else: transcript_id = g.get_attr("ID") gene_id = transcript_id transcript_info[transcript_id]["gene_id"] = gene_id transcript_info[transcript_id]["gene_type"] = g.type continue if g.type not in valid_gff_to_gtf_type.keys(): continue try: transcript_id = g.get_attr("Parent", first=False) except IndexError: transcript_id = g.get_attr("mRNA", first=False) g.type = valid_gff_to_gtf_type[g.type] for tid in transcript_id: if tid not in transcript_info: continue gene_type = transcript_info[tid]["gene_type"] if not gene_type.endswith("RNA") and not gene_type.endswith("transcript"): continue gene_id = transcript_info[tid]["gene_id"] g.attributes = dict(gene_id=[gene_id], transcript_id=[tid]) g.update_attributes(gtf=True, urlquote=False) print(g)
0.002138
def _check_proto(self, path): ''' Make sure that this path is intended for the salt master and trim it ''' if not path.startswith('salt://'): raise MinionError('Unsupported path: {0}'.format(path)) file_path, saltenv = salt.utils.url.parse(path) return file_path
0.006211
def postMetricsPointsByID(self, id, value, **kwargs): '''Add a metric point to a given metric. :param id: Metric ID :param value: Value to plot on the metric graph :param timestamp: Unix timestamp of the point was measured :return: :class:`Response <Response>` object :rtype: requests.Response ''' kwargs['value'] = value return self.__postRequest('/metrics/%s/points' % id, kwargs)
0.004386
def small_mind_image_recognition(): """! @brief Trains network using letters 'M', 'I', 'N', 'D' and recognize each of them with and without noise. """ images = []; images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_M; images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_I; images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_N; images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_D; template_recognition_image(images, 100, 10, 0.2);
0.020747
def _enqueue_init_updates(self): """Enqueues current routes to be shared with this peer.""" assert self.state.bgp_state == const.BGP_FSM_ESTABLISHED if self.is_mbgp_cap_valid(RF_RTC_UC): # Enqueues all best-RTC_NLRIs to be sent as initial update to this # peer. self._peer_manager.comm_all_rt_nlris(self) self._schedule_sending_init_updates() else: # Enqueues all best-path to be sent as initial update to this peer # expect for RTC route-family. tm = self._core_service.table_manager self.comm_all_best_paths(tm.global_tables)
0.003053
def deserialize(self, value, **kwargs): #pylint: disable=unused-argument """Deserialize input value to valid Property value This method uses the Property :code:`deserializer` if available. Otherwise, it uses :code:`from_json`. Any keyword arguments are passed through to these methods. """ kwargs.update({'trusted': kwargs.get('trusted', False)}) if self.deserializer is not None: return self.deserializer(value, **kwargs) if value is None: return None return self.from_json(value, **kwargs)
0.006421
def get(self, sid): """ Constructs a OutgoingCallerIdContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext :rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext """ return OutgoingCallerIdContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
0.011236
def generateExpectedList(numUniqueFeatures, numLocationsPerObject, maxNumObjects): """ Metric: How unique is each object's most unique feature? Calculate the expected number of occurrences of an object's most unique feature. """ # We're choosing a location, checking its feature, and checking how many # *other* occurrences there are of this feature. So we check n - 1 locations. maxNumOtherLocations = maxNumObjects*10 - 1 results = zip(itertools.count(1), findBinomialNsWithExpectedSampleMinimum( itertools.count(1), 1./numUniqueFeatures, numLocationsPerObject, maxNumOtherLocations)) finalResults = [(numOtherLocations, interpolatedN / numLocationsPerObject) for numOtherLocations, (interpolatedN, _, _) in results] return [(0, 0.)] + finalResults
0.011862
def registIssue(self, CorpNum, statement, Memo=None, UserID=None): """ 즉시발행 args CorpNum : 팝빌회원 사업자번호 statement : 등록할 전자명세서 object. made with Statement(...) Memo : 즉시발행메모 UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException """ if statement == None: raise PopbillException(-99999999, "등록할 전자명세서 정보가 입력되지 않았습니다.") if Memo != None or Memo != '': statement.memo = Memo postData = self._stringtify(statement) return self._httppost('/Statement', postData, CorpNum, UserID, "ISSUE")
0.00545
def deduplicate(directories: List[str], recursive: bool, dummy_run: bool) -> None: """ De-duplicate files within one or more directories. Remove files that are identical to ones already considered. Args: directories: list of directories to process recursive: process subdirectories (recursively)? dummy_run: say what it'll do, but don't do it """ # ------------------------------------------------------------------------- # Catalogue files by their size # ------------------------------------------------------------------------- files_by_size = {} # type: Dict[int, List[str]] # maps size to list of filenames # noqa num_considered = 0 for filename in gen_filenames(directories, recursive=recursive): if not os.path.isfile(filename): continue size = os.stat(filename)[stat.ST_SIZE] a = files_by_size.setdefault(size, []) a.append(filename) num_considered += 1 log.debug("files_by_size =\n{}", pformat(files_by_size)) # ------------------------------------------------------------------------- # By size, look for duplicates using a hash of the first part only # ------------------------------------------------------------------------- log.info("Finding potential duplicates...") potential_duplicate_sets = [] potential_count = 0 sizes = list(files_by_size.keys()) sizes.sort() for k in sizes: files_of_this_size = files_by_size[k] out_files = [] # type: List[str] # ... list of all files having >1 file per hash, for this size hashes = {} # type: Dict[str, Union[bool, str]] # ... key is a hash; value is either True or a filename if len(files_of_this_size) == 1: continue log.info("Testing {} files of size {}...", len(files_of_this_size), k) for filename in files_of_this_size: if not os.path.isfile(filename): continue log.debug("Quick-scanning file: {}", filename) with open(filename, 'rb') as fd: hasher = md5() hasher.update(fd.read(INITIAL_HASH_SIZE)) hash_value = hasher.digest() if hash_value in hashes: # We have discovered the SECOND OR SUBSEQUENT hash match. first_file_or_true = hashes[hash_value] if first_file_or_true is not True: # We have discovered the SECOND file; # first_file_or_true contains the name of the FIRST. out_files.append(first_file_or_true) hashes[hash_value] = True out_files.append(filename) else: # We have discovered the FIRST file with this hash. hashes[hash_value] = filename if out_files: potential_duplicate_sets.append(out_files) potential_count = potential_count + len(out_files) del files_by_size log.info("Found {} sets of potential duplicates, based on hashing the " "first {} bytes of each...", potential_count, INITIAL_HASH_SIZE) log.debug("potential_duplicate_sets =\n{}", pformat(potential_duplicate_sets)) # ------------------------------------------------------------------------- # Within each set, check for duplicates using a hash of the entire file # ------------------------------------------------------------------------- log.info("Scanning for real duplicates...") num_scanned = 0 num_to_scan = sum(len(one_set) for one_set in potential_duplicate_sets) duplicate_sets = [] # type: List[List[str]] for one_set in potential_duplicate_sets: out_files = [] # type: List[str] hashes = {} for filename in one_set: num_scanned += 1 log.info("Scanning file [{}/{}]: {}", num_scanned, num_to_scan, filename) with open(filename, 'rb') as fd: hasher = md5() while True: r = fd.read(MAIN_READ_CHUNK_SIZE) if len(r) == 0: break hasher.update(r) hash_value = hasher.digest() if hash_value in hashes: if not out_files: out_files.append(hashes[hash_value]) out_files.append(filename) else: hashes[hash_value] = filename if len(out_files): duplicate_sets.append(out_files) log.debug("duplicate_sets = \n{}", pformat(duplicate_sets)) num_originals = 0 num_deleted = 0 for d in duplicate_sets: print("Original is: {}".format(d[0])) num_originals += 1 for f in d[1:]: if dummy_run: print("Would delete: {}".format(f)) else: print("Deleting: {}".format(f)) os.remove(f) num_deleted += 1 print() num_unique = num_considered - (num_originals + num_deleted) print( "{action} {d} duplicates, leaving {o} originals (and {u} unique files " "not touched; {c} files considered in total)".format( action="Would delete" if dummy_run else "Deleted", d=num_deleted, o=num_originals, u=num_unique, c=num_considered ) )
0.000181
def qemu_path(self, qemu_path): """ Sets the QEMU binary path this QEMU VM. :param qemu_path: QEMU path """ if qemu_path and os.pathsep not in qemu_path: if sys.platform.startswith("win") and ".exe" not in qemu_path.lower(): qemu_path += "w.exe" new_qemu_path = shutil.which(qemu_path, path=os.pathsep.join(self._manager.paths_list())) if new_qemu_path is None: raise QemuError("QEMU binary path {} is not found in the path".format(qemu_path)) qemu_path = new_qemu_path self._check_qemu_path(qemu_path) self._qemu_path = qemu_path self._platform = os.path.basename(qemu_path) if self._platform == "qemu-kvm": self._platform = "x86_64" else: qemu_bin = os.path.basename(qemu_path) qemu_bin = re.sub(r'(w)?\.(exe|EXE)$', '', qemu_bin) # Old version of GNS3 provide a binary named qemu.exe if qemu_bin == "qemu": self._platform = "i386" else: self._platform = re.sub(r'^qemu-system-(.*)$', r'\1', qemu_bin, re.IGNORECASE) if self._platform.split(".")[0] not in QEMU_PLATFORMS: raise QemuError("Platform {} is unknown".format(self._platform)) log.info('QEMU VM "{name}" [{id}] has set the QEMU path to {qemu_path}'.format(name=self._name, id=self._id, qemu_path=qemu_path))
0.005495
def get_literals(self): """ Return a list of all the literals contained in this expression. Include recursively subexpressions symbols. This includes duplicates. """ if self.isliteral: return [self] if not self.args: return [] return list(itertools.chain.from_iterable(arg.get_literals() for arg in self.args))
0.007538
def style_factory(self, style_name): """Retrieve the specified pygments style. If the specified style is not found, the vim style is returned. :type style_name: str :param style_name: The pygments style name. :rtype: :class:`pygments.style.StyleMeta` :return: Pygments style info. """ try: style = get_style_by_name(style_name) except ClassNotFound: style = get_style_by_name('vim') # Create a style dictionary. styles = {} styles.update(style.styles) styles.update(default_style_extensions) t = Token styles.update({ t.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000', t.Menu.Completions.Completion: 'bg:#008888 #ffffff', t.Menu.Completions.Meta.Current: 'bg:#00aaaa #000000', t.Menu.Completions.Meta: 'bg:#00aaaa #ffffff', t.Scrollbar.Button: 'bg:#003333', t.Scrollbar: 'bg:#00aaaa', t.Toolbar: 'bg:#222222 #cccccc', t.Toolbar.Off: 'bg:#222222 #696969', t.Toolbar.On: 'bg:#222222 #ffffff', t.Toolbar.Search: 'noinherit bold', t.Toolbar.Search.Text: 'nobold', t.Toolbar.System: 'noinherit bold', t.Toolbar.Arg: 'noinherit bold', t.Toolbar.Arg.Text: 'nobold' }) return style_from_dict(styles)
0.001395
def set_service_value(self, service_id, set_name, parameter_name, value): """Set a variable on the vera device. This will call the Vera api to change device state. """ payload = { 'id': 'lu_action', 'action': 'Set' + set_name, 'serviceId': service_id, parameter_name: value } result = self.vera_request(**payload) logger.debug("set_service_value: " "result of vera_request with payload %s: %s", payload, result.text)
0.007181
def simxSetObjectIntParameter(clientID, objectHandle, parameterID, parameterValue, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' return c_SetObjectIntParameter(clientID, objectHandle, parameterID, parameterValue, operationMode)
0.012987
def delete_data_disk(self, service_name, deployment_name, role_name, lun, delete_vhd=False): ''' Removes the specified data disk from a virtual machine. service_name: The name of the service. deployment_name: The name of the deployment. role_name: The name of the role. lun: The Logical Unit Number (LUN) for the disk. delete_vhd: Deletes the underlying vhd blob in Azure storage. ''' _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) _validate_not_none('role_name', role_name) _validate_not_none('lun', lun) path = self._get_data_disk_path(service_name, deployment_name, role_name, lun) if delete_vhd: path += '?comp=media' return self._perform_delete(path, as_async=True)
0.004348
def get_name_dictionary_extractor(name_trie): """Method for creating default name dictionary extractor""" return DictionaryExtractor()\ .set_trie(name_trie)\ .set_pre_filter(VALID_TOKEN_RE.match)\ .set_pre_process(lambda x: x.lower())\ .set_metadata({'extractor': 'dig_name_dictionary_extractor'})
0.002959
def try_next(self): """Advance the cursor without blocking indefinitely. This method returns the next change document without waiting indefinitely for the next change. For example:: with db.collection.watch() as stream: while stream.alive: change = stream.try_next() if change is not None: print(change) elif stream.alive: # We end up here when there are no recent changes. # Sleep for a while to avoid flooding the server with # getMore requests when no changes are available. time.sleep(10) If no change document is cached locally then this method runs a single getMore command. If the getMore yields any documents, the next document is returned, otherwise, if the getMore returns no documents (because there have been no changes) then ``None`` is returned. :Returns: The next change document or ``None`` when no document is available after running a single getMore or when the cursor is closed. .. versionadded:: 3.8 """ # Attempt to get the next change with at most one getMore and at most # one resume attempt. try: change = self._cursor._try_next(True) except ConnectionFailure: self._resume() change = self._cursor._try_next(False) except OperationFailure as exc: if exc.code in _NON_RESUMABLE_GETMORE_ERRORS: raise self._resume() change = self._cursor._try_next(False) # No changes are available. if change is None: return None try: resume_token = change['_id'] except KeyError: self.close() raise InvalidOperation( "Cannot provide resume functionality when the resume " "token is missing.") self._resume_token = copy.copy(resume_token) self._start_at_operation_time = None if self._decode_custom: return _bson_to_dict(change.raw, self._orig_codec_options) return change
0.000878
def derive_identity_arf(name, arf): """Create an "identity" ARF that has uniform sensitivity. *name* The name of the ARF object to be created; passed to Sherpa. *arf* An existing ARF object on which to base this one. Returns: A new ARF1D object that has a uniform spectral response vector. In many X-ray observations, the relevant background signal does not behave like an astrophysical source that is filtered through the telescope's response functions. However, I have been unable to get current Sherpa (version 4.9) to behave how I want when working with backround models that are *not* filtered through these response functions. This function constructs an "identity" ARF response function that has uniform sensitivity as a function of detector channel. """ from sherpa.astro.data import DataARF from sherpa.astro.instrument import ARF1D darf = DataARF( name, arf.energ_lo, arf.energ_hi, np.ones(arf.specresp.shape), arf.bin_lo, arf.bin_hi, arf.exposure, header = None, ) return ARF1D(darf, pha=arf._pha)
0.002582
def fin_session(self): """ Finalize current session :return: None """ self.__prompt_show = False self.__history.add(self.row()) self.exec(self.row())
0.049383
def queue_file_io_task(self, fileobj, data, offset): """Queue IO write for submission to the IO executor. This method accepts an IO executor and information about the downloaded data, and handles submitting this to the IO executor. This method may defer submission to the IO executor if necessary. """ self._transfer_coordinator.submit( self._io_executor, self.get_io_write_task(fileobj, data, offset) )
0.004107
def init(): """Initialize the pipeline in maya so everything works Init environment and load plugins. This also creates the initial Jukebox Menu entry. :returns: None :rtype: None :raises: None """ main.init_environment() pluginpath = os.pathsep.join((os.environ.get('JUKEBOX_PLUGIN_PATH', ''), BUILTIN_PLUGIN_PATH)) os.environ['JUKEBOX_PLUGIN_PATH'] = pluginpath try: maya.standalone.initialize() jukeboxmaya.STANDALONE_INITIALIZED = True except RuntimeError as e: jukeboxmaya.STANDALONE_INITIALIZED = False if str(e) == "maya.standalone may only be used from an external Python interpreter": mm = MenuManager.get() mainmenu = mm.create_menu("Jukebox", tearOff=True) mm.create_menu("Help", parent=mainmenu, command=show_help) # load plugins pmanager = MayaPluginManager.get() pmanager.load_plugins() load_mayaplugins()
0.003148
def shutdown(self): """ Shuts down the TLS session and then shuts down the underlying socket :raises: OSError - when an error is returned by the OS crypto library """ if self._context_handle_pointer is None: return out_buffers = None try: # ApplyControlToken fails with SEC_E_UNSUPPORTED_FUNCTION # when called on Windows 7 if _win_version_info >= (6, 2): buffers = new(secur32, 'SecBuffer[1]') # This is a SCHANNEL_SHUTDOWN token (DWORD of 1) buffers[0].cbBuffer = 4 buffers[0].BufferType = Secur32Const.SECBUFFER_TOKEN buffers[0].pvBuffer = cast(secur32, 'BYTE *', buffer_from_bytes(b'\x01\x00\x00\x00')) sec_buffer_desc_pointer = struct(secur32, 'SecBufferDesc') sec_buffer_desc = unwrap(sec_buffer_desc_pointer) sec_buffer_desc.ulVersion = Secur32Const.SECBUFFER_VERSION sec_buffer_desc.cBuffers = 1 sec_buffer_desc.pBuffers = buffers result = secur32.ApplyControlToken(self._context_handle_pointer, sec_buffer_desc_pointer) handle_error(result, TLSError) out_sec_buffer_desc_pointer, out_buffers = self._create_buffers(2) out_buffers[0].BufferType = Secur32Const.SECBUFFER_TOKEN out_buffers[1].BufferType = Secur32Const.SECBUFFER_ALERT output_context_flags_pointer = new(secur32, 'ULONG *') result = secur32.InitializeSecurityContextW( self._session._credentials_handle, self._context_handle_pointer, self._hostname, self._context_flags, 0, 0, null(), 0, null(), out_sec_buffer_desc_pointer, output_context_flags_pointer, null() ) acceptable_results = set([ Secur32Const.SEC_E_OK, Secur32Const.SEC_E_CONTEXT_EXPIRED, Secur32Const.SEC_I_CONTINUE_NEEDED ]) if result not in acceptable_results: handle_error(result, TLSError) token = bytes_from_buffer(out_buffers[0].pvBuffer, out_buffers[0].cbBuffer) try: # If there is an error sending the shutdown, ignore it since the # connection is likely gone at this point self._socket.send(token) except (socket_.error): pass finally: if out_buffers: if not is_null(out_buffers[0].pvBuffer): secur32.FreeContextBuffer(out_buffers[0].pvBuffer) if not is_null(out_buffers[1].pvBuffer): secur32.FreeContextBuffer(out_buffers[1].pvBuffer) secur32.DeleteSecurityContext(self._context_handle_pointer) self._context_handle_pointer = None try: self._socket.shutdown(socket_.SHUT_RDWR) except (socket_.error): pass
0.001871
def _update_attribute_details(self, **update_props): """ Update operation for ISO Attribute Details metadata: write to "MD_Metadata/featureType" """ tree_to_update = update_props['tree_to_update'] xpath = self._data_map['_attr_citation'] # Cannot write to remote file: remove the featureCatalogueCitation element self._attr_details_file_url = None remove_element(tree_to_update, xpath, True) return self._update_complex_list(**update_props)
0.008
def draw(self, texture, pos=(0.0, 0.0), scale=(1.0, 1.0)): """ Draw texture using a fullscreen quad. By default this will conver the entire screen. :param pos: (tuple) offset x, y :param scale: (tuple) scale x, y """ if not self.initialized: self.init() self._texture2d_shader["offset"].value = (pos[0] - 1.0, pos[1] - 1.0) self._texture2d_shader["scale"].value = (scale[0], scale[1]) texture.use(location=0) self._texture2d_sampler.use(location=0) self._texture2d_shader["texture0"].value = 0 self._quad.render(self._texture2d_shader) self._texture2d_sampler.clear(location=0)
0.002778
def parse_doctype(cls, file, encoding=None): '''Get the doctype from the document. Returns: str, None ''' if encoding: lxml_encoding = to_lxml_encoding(encoding) or 'latin1' else: lxml_encoding = encoding try: parser = lxml.etree.XMLParser(encoding=lxml_encoding, recover=True) tree = lxml.etree.parse( io.BytesIO(wpull.util.peek_file(file)), parser=parser ) if tree.getroot() is not None: return tree.docinfo.doctype except lxml.etree.LxmlError: pass
0.00314
def realpath_with_context(path, context): """ Convert a path into its realpath: * For relative path: use :attr:`context.workdir` as root directory * For absolute path: Pass-through without any changes. :param path: Filepath to convert (as string). :param context: Behave context object (with :attr:`context.workdir`) :return: Converted path. """ if not os.path.isabs(path): # XXX ensure_workdir_exists(context) assert context.workdir path = os.path.join(context.workdir, os.path.normpath(path)) return path
0.001736
def skip(self, sched_rule_id): """Skip a schedule rule (watering time).""" path = 'schedulerule/skip' payload = {'id': sched_rule_id} return self.rachio.put(path, payload)
0.009852
def open_archive(fs_url, archive): """Open an archive on a filesystem. This function tries to mimick the behaviour of `fs.open_fs` as closely as possible: it accepts either a FS URL or a filesystem instance, and will close all resources it had to open. Arguments: fs_url (FS or text_type): a FS URL, or a filesystem instance, where the archive file is located. archive (text_type): the path to the archive file on the given filesystem. Raises: `fs.opener._errors.Unsupported`: when the archive type is not supported (either the file extension is unknown or the opener requires unmet dependencies). Example: >>> from fs.archive import open_archive >>> with open_archive('mem://', 'test.tar.gz') as archive_fs: ... type(archive_fs) <class 'fs.archive.tarfs.TarFS'> Hint: This function finds the entry points defined in group ``fs.archive.open_archive``, using the names of the entry point as the registered extension. """ it = pkg_resources.iter_entry_points('fs.archive.open_archive') entry_point = next((ep for ep in it if archive.endswith(ep.name)), None) if entry_point is None: raise UnsupportedProtocol( 'unknown archive extension: {}'.format(archive)) try: archive_opener = entry_point.load() except pkg_resources.DistributionNotFound as df: # pragma: no cover six.raise_from(UnsupportedProtocol( 'extension {} requires {}'.format(entry_point.name, df.req)), None) try: binfile = None archive_fs = None fs = open_fs(fs_url) if issubclass(archive_opener, base.ArchiveFS): try: binfile = fs.openbin(archive, 'r+') except errors.ResourceNotFound: binfile = fs.openbin(archive, 'w') except errors.ResourceReadOnly: binfile = fs.openbin(archive, 'r') archive_opener = archive_opener._read_fs_cls elif issubclass(archive_opener, base.ArchiveReadFS): binfile = fs.openbin(archive, 'r') if not hasattr(binfile, 'name'): binfile.name = basename(archive) archive_fs = archive_opener(binfile) except Exception: getattr(archive_fs, 'close', lambda: None)() getattr(binfile, 'close', lambda: None)() raise else: return archive_fs
0.000801
def default_privileges_grant(name, object_name, object_type, defprivileges=None, grant_option=None, prepend='public', maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2019.0.0 Grant default privileges on a postgres object CLI Example: .. code-block:: bash salt '*' postgres.default_privileges_grant user_name table_name table \\ SELECT,UPDATE maintenance_db=db_name name Name of the role to which default privileges should be granted object_name Name of the object on which the grant is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - group - function privileges Comma separated list of privileges to grant, from the list below: - INSERT - CREATE - TRUNCATE - TRIGGER - SELECT - USAGE - UPDATE - EXECUTE - REFERENCES - DELETE - ALL grant_option If grant_option is set to True, the recipient of the default privilege can in turn grant it to others prepend Table and Sequence object types live under a schema so this should be provided if the object is not under the default `public` schema maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' object_type, pdefrivileges, _defprivs = _mod_defpriv_opts(object_type, defprivileges) _validate_default_privileges(object_type, _defprivs, defprivileges) if has_default_privileges(name, object_name, object_type, defprivileges, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas): log.info('The object: %s of type: %s already has default privileges: %s set', object_name, object_type, defprivileges) return False _grants = ','.join(_defprivs) if object_type in ['table', 'sequence']: on_part = '{0}."{1}"'.format(prepend, object_name) elif object_type == 'function': on_part = '{0}'.format(object_name) else: on_part = '"{0}"'.format(object_name) if grant_option: if object_type == 'group': query = ' ALTER DEFAULT PRIVILEGES GRANT {0} TO "{1}" WITH ADMIN OPTION'.format( object_name, name) elif (object_type in ('table', 'sequence', 'function') and object_name.upper() == 'ALL'): query = 'ALTER DEFAULT PRIVILEGES IN SCHEMA {2} GRANT {0} ON {1}S TO ' \ '"{3}" WITH GRANT OPTION'.format( _grants, object_type.upper(), prepend, name) else: query = 'ALTER DEFAULT PRIVILEGES IN SCHEMA {2} GRANT {0} ON {1}S TO "{3}" WITH GRANT OPTION'.format( _grants, object_type.upper(), on_part, name) else: if object_type == 'group': query = 'ALTER DEFAULT PRIVILEGES GRANT {0} TO "{1}"'.format(object_name, name) elif (object_type in ('table', 'sequence') and object_name.upper() == 'ALL'): query = 'ALTER DEFAULT PRIVILEGES IN SCHEMA {2} GRANT {0} ON {1}S TO "{3}"'.format( _grants, object_type.upper(), prepend, name) else: query = ' ALTER DEFAULT PRIVILEGES IN SCHEMA {2} GRANT {0} ON {1}S TO "{3}"'.format( _grants, object_type.upper(), prepend, name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0
0.006048
def image_load_time(self): """ Returns aggregate image load time for all pages. """ load_times = self.get_load_times('image') return round(mean(load_times), self.decimal_precision)
0.009091
def next_blob(self): """Get the next frame from file""" blob_file = self.blob_file try: preamble = DAQPreamble(file_obj=blob_file) except struct.error: raise StopIteration try: data_type = DATA_TYPES[preamble.data_type] except KeyError: log.error("Unkown datatype: {0}".format(preamble.data_type)) data_type = 'Unknown' blob = Blob() blob[data_type] = None blob['DAQPreamble'] = preamble if data_type == 'DAQSummaryslice': daq_frame = DAQSummaryslice(blob_file) blob[data_type] = daq_frame blob['DAQHeader'] = daq_frame.header elif data_type == 'DAQEvent': daq_frame = DAQEvent(blob_file) blob[data_type] = daq_frame blob['DAQHeader'] = daq_frame.header else: log.warning( "Skipping DAQ frame with data type code '{0}'.".format( preamble.data_type ) ) blob_file.seek(preamble.length - DAQPreamble.size, 1) return blob
0.001747
def __send_message(self, operation): """Send a getmore message and handle the response. """ def kill(): self.__killed = True self.__end_session(True) client = self.__collection.database.client try: response = client._run_operation_with_response( operation, self._unpack_response, address=self.__address) except OperationFailure: kill() raise except NotMasterError: # Don't send kill cursors to another server after a "not master" # error. It's completely pointless. kill() raise except ConnectionFailure: # Don't try to send kill cursors on another socket # or to another server. It can cause a _pinValue # assertion on some server releases if we get here # due to a socket timeout. kill() raise except Exception: # Close the cursor self.__die() raise from_command = response.from_command reply = response.data docs = response.docs if from_command: cursor = docs[0]['cursor'] documents = cursor['nextBatch'] self.__id = cursor['id'] else: documents = docs self.__id = reply.cursor_id if self.__id == 0: kill() self.__data = deque(documents)
0.001355
def parse_esmtp_extensions(message): """ Parses the response given by an ESMTP server after a *EHLO* command. The response is parsed to build: - A dict of supported ESMTP extensions (with parameters, if any). - A list of supported authentication methods. Returns: (dict, list): A (extensions, auth_mechanisms) 2-tuple containing the supported extensions and authentication methods. """ extns = {} auths = [] oldstyle_auth_regex = re.compile(r"auth=(?P<auth>.*)", re.IGNORECASE) extension_regex = re.compile( r"(?P<feature>[a-z0-9][a-z0-9\-]*) ?", re.IGNORECASE ) lines = message.splitlines() for line in lines[1:]: # To be able to communicate with as many SMTP servers as possible, # we have to take the old-style auth advertisement into account. match = oldstyle_auth_regex.match(line) if match: auth = match.group("auth")[0] auth = auth.lower().strip() if auth not in auths: auths.append(auth) # RFC 1869 requires a space between EHLO keyword and parameters. # It's actually stricter, in that only spaces are allowed between # parameters, but were not going to check for that here. # Note that the space isn't present if there are no parameters. match = extension_regex.match(line) if match: feature = match.group("feature").lower() params = match.string[match.end("feature") :].strip() extns[feature] = params if feature == "auth": auths.extend([param.strip().lower() for param in params.split()]) return extns, auths
0.002144
def _parse_response(self, response): """ Parse http raw respone into python dictionary object. :param str response: http response :returns: response dict :rtype: dict """ response_dict = {} for line in response.splitlines(): key, value = response.split("=", 1) response_dict[key] = value return response_dict
0.007109
def upload_stream(stream, server, account, projname, language=None, username=None, password=None, append=False, stage=False): """ Given a file-like object containing a JSON stream, upload it to Luminoso with the given account name and project name. """ client = LuminosoClient.connect(server, username=username, password=password) if not append: # If we're not appending to an existing project, create new project. info = client.post('/projects/' + account, name=projname) project_id = info['project_id'] print('New project ID:', project_id) else: projects = client.get('/projects/' + account, name=projname) if len(projects) == 0: print('No such project exists!') return if len(projects) > 1: print('Warning: Multiple projects with name "%s". ' % projname, end='') project_id = projects[0]['project_id'] print('Using existing project with id %s.' % project_id) project = client.change_path('/projects/' + account + '/' + project_id) counter = 0 for batch in batches(stream, 1000): counter += 1 documents = list(batch) project.upload('docs', documents) print('Uploaded batch #%d' % (counter)) if not stage: # Calculate the docs into the assoc space. print('Calculating.') kwargs = {} if language is not None: kwargs = {'language': language} job_id = project.post('docs/recalculate', **kwargs) project.wait_for(job_id)
0.000604
def parse_litezip(path): """Parse a litezip file structure to a data structure given the path to the litezip directory. """ struct = [parse_collection(path)] struct.extend([parse_module(x) for x in path.iterdir() if x.is_dir() and x.name.startswith('m')]) return tuple(sorted(struct))
0.003049
def flatten_container(self, container): """ Accepts a chronos container and pulls out the nested values into the top level """ for names in ARG_MAP.values(): if names[TransformationTypes.CHRONOS.value]['name'] and \ '.' in names[TransformationTypes.CHRONOS.value]['name']: chronos_dotted_name = names[TransformationTypes.CHRONOS.value]['name'] parts = chronos_dotted_name.split('.') if parts[-2] == 'parameters': # Special lookup for docker parameters common_type = names[TransformationTypes.CHRONOS.value].get('type') result = self._lookup_parameter(container, parts[-1], common_type) if result: container[chronos_dotted_name] = result else: result = lookup_nested_dict(container, *parts) if result: container[chronos_dotted_name] = result return container
0.006542
def combine(self, other_sequence): """ If this sequence is the prefix of another sequence, combine them into a single VariantSequence object. If the other sequence is contained in this one, then add its reads to this VariantSequence. Also tries to flip the order (e.g. this sequence is a suffix or this sequence is a subsequence). If sequences can't be combined then returns None. """ if other_sequence.alt != self.alt: logger.warn( "Cannot combine %s and %s with mismatching alt sequences", self, other_sequence) return None elif self.contains(other_sequence): return self.add_reads(other_sequence.reads) elif other_sequence.contains(self): return other_sequence.add_reads(self.reads) elif self.left_overlaps(other_sequence): # If sequences are like AABC and ABCC return VariantSequence( prefix=self.prefix, alt=self.alt, suffix=other_sequence.suffix, reads=self.reads.union(other_sequence.reads)) elif other_sequence.left_overlaps(self): return VariantSequence( prefix=other_sequence.prefix, alt=self.alt, suffix=self.suffix, reads=self.reads.union(other_sequence.reads)) else: # sequences don't overlap return None
0.001322
def unpack(self, buff=None, offset=0): """Unpack *buff* into this object. This method will convert a binary data into a readable value according to the attribute format. Args: buff (bytes): Binary buffer. offset (int): Where to begin unpacking. Raises: :exc:`~.exceptions.UnpackException`: If unpack fails. """ length = UBInt16() length.unpack(buff, offset) super().unpack(buff[:offset+length.value], offset)
0.003846
def setup_once(initfn): """ call class instance method for initial setup :: class B(object): def init(self, a): print 'init call:', a @setup_once(init) def mycall(self, a): print 'real call:', a b = B() b.mycall(222) b.mycall(333) :param function initfn: :return: decorated method """ def wrap(method): finit = initfn.__name__ fnname = method.__name__ @functools.wraps(method) def wrapped(self, *args, **kwargs): @functools.wraps(method) def aftersetup(*a, **kw): return method(self, *a, **kw) setupfn = getattr(self, finit) setupfn(*args, **kwargs) res = method(self, *args, **kwargs) setattr(self, fnname, aftersetup) return res return wrapped return wrap
0.001068
def register_pivot_wavelength(self, telescope, band, wlen): """Register precomputed pivot wavelengths.""" if (telescope, band) in self._pivot_wavelengths: raise AlreadyDefinedError('pivot wavelength for %s/%s already ' 'defined', telescope, band) self._note(telescope, band) self._pivot_wavelengths[telescope,band] = wlen return self
0.007092
def _kmeans_run(X, n_clusters, max_iter, tol): """ Run a single trial of k-means clustering on dataset X, and given number of clusters """ membs = np.empty(shape=X.shape[0], dtype=int) centers = _kmeans_init(X, n_clusters) sse_last = 9999.9 n_iter = 0 for it in range(1,max_iter): membs = _assign_clusters(X, centers) centers,sse_arr = _update_centers(X, membs, n_clusters) sse_total = np.sum(sse_arr) if np.abs(sse_total - sse_last) < tol: n_iter = it break sse_last = sse_total return(centers, membs, sse_total, sse_arr, n_iter)
0.004717
def get_bucket_notification_config(self, bucket): """ Get the notification configuration of a bucket. @param bucket: The name of the bucket. @return: A C{Deferred} that will request the bucket's notification configuration. """ details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name="?notification"), ) d = self._submit(self._query_factory(details)) d.addCallback(self._parse_notification_config) return d
0.005367
def _remove(self, timer): """Remove timer from heap lock and presence are assumed""" assert timer.timer_heap == self del self.timers[timer] assert timer in self.heap self.heap.remove(timer) heapq.heapify(self.heap)
0.007634
def get_session(config=None, config_file=None, debug=None, http_adapter_kwargs=None): """Return a new :class:`ArchiveSession` object. The :class:`ArchiveSession` object is the main interface to the ``internetarchive`` lib. It allows you to persist certain parameters across tasks. :type config: dict :param config: (optional) A dictionary used to configure your session. :type config_file: str :param config_file: (optional) A path to a config file used to configure your session. :type http_adapter_kwargs: dict :param http_adapter_kwargs: (optional) Keyword arguments that :py:class:`requests.adapters.HTTPAdapter` takes. :returns: :class:`ArchiveSession` object. Usage: >>> from internetarchive import get_session >>> config = dict(s3=dict(access='foo', secret='bar')) >>> s = get_session(config) >>> s.access_key 'foo' From the session object, you can access all of the functionality of the ``internetarchive`` lib: >>> item = s.get_item('nasa') >>> item.download() nasa: ddddddd - success >>> s.get_tasks(task_ids=31643513)[0].server 'ia311234' """ return session.ArchiveSession(config, config_file, debug, http_adapter_kwargs)
0.004573
def get_cited_dois(arxiv_id): """ Get the DOIs of the papers cited in a .bbl file. .. note:: Bulk download of sources from arXiv is not permitted by their API. \ You should have a look at http://arxiv.org/help/bulk_data_s3. :param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \ a canonical form. :returns: A dict of cleaned plaintext citations and their associated DOI. """ dois = {} # Get the list of bbl files for this preprint bbl_files = arxiv.get_bbl(arxiv_id) for bbl_file in bbl_files: # Fetch the cited DOIs for each of the bbl files dois.update(bbl.get_cited_dois(bbl_file)) return dois
0.001399
def update_position(self, time, xs, ys, zs, vxs, vys, vzs, ethetas, elongans, eincls, ds=None, Fs=None, ignore_effects=False, component_com_x=None, **kwargs): """ Update the position of the star into its orbit :parameter float time: the current time :parameter list xs: a list/array of x-positions of ALL COMPONENTS in the :class:`System` :parameter list ys: a list/array of y-positions of ALL COMPONENTS in the :class:`System` :parameter list zs: a list/array of z-positions of ALL COMPONENTS in the :class:`System` :parameter list vxs: a list/array of x-velocities of ALL COMPONENTS in the :class:`System` :parameter list vys: a list/array of y-velocities of ALL COMPONENTS in the :class:`System` :parameter list vzs: a list/array of z-velocities of ALL COMPONENTS in the :class:`System` :parameter list ethetas: a list/array of euler-thetas of ALL COMPONENTS in the :class:`System` :parameter list elongans: a list/array of euler-longans of ALL COMPONENTS in the :class:`System` :parameter list eincls: a list/array of euler-incls of ALL COMPONENTS in the :class:`System` :parameter list ds: (optional) a list/array of instantaneous distances of ALL COMPONENTS in the :class:`System` :parameter list Fs: (optional) a list/array of instantaneous syncpars of ALL COMPONENTS in the :class:`System` """ self.reset_time(time, ethetas[self.ind_self], elongans[self.ind_self], eincls[self.ind_self]) #-- Get current position/euler information # TODO: get rid of this ugly _value stuff pos = (_value(xs[self.ind_self]), _value(ys[self.ind_self]), _value(zs[self.ind_self])) vel = (_value(vxs[self.ind_self_vel]), _value(vys[self.ind_self_vel]), _value(vzs[self.ind_self_vel])) euler = (_value(ethetas[self.ind_self]), _value(elongans[self.ind_self]), _value(eincls[self.ind_self])) euler_vel = (_value(ethetas[self.ind_self_vel]), _value(elongans[self.ind_self_vel]), _value(eincls[self.ind_self_vel])) # TODO: eventually pass etheta to has_standard_mesh # TODO: implement reprojection as an option based on a nearby standard? if self.needs_remesh or not self.has_standard_mesh(): logger.debug("{}.update_position: remeshing at t={}".format(self.component, time)) # track whether we did the remesh or not, so we know if we should # compute local quantities if not otherwise necessary did_remesh = True # TODO: allow time dependence on d and F from dynamics # d = _value(ds[self.ind_self]) # F = _value(Fs[self.ind_self]) new_mesh_dict, scale = self._build_mesh(mesh_method=self.mesh_method) if self.mesh_method != 'wd': new_mesh_dict = self._offset_mesh(new_mesh_dict) # We only need the gradients where we'll compute local # quantities which, for a marching mesh, is at the vertices. new_mesh_dict['normgrads'] = new_mesh_dict.pop('vnormgrads', np.array([])) # And lastly, let's fill the velocities column - with zeros # at each of the vertices new_mesh_dict['velocities'] = np.zeros(new_mesh_dict['vertices'].shape if self.mesh_method != 'wd' else new_mesh_dict['centers'].shape) new_mesh_dict['tareas'] = np.array([]) # TODO: need to be very careful about self.sma vs self._scale - maybe need to make a self._instantaneous_scale??? # self._scale = scale if not self.has_standard_mesh(): # then we only computed this because we didn't already have a # standard_mesh... so let's save this for future use # TODO: eventually pass etheta to save_as_standard_mesh protomesh = mesh.ProtoMesh(**new_mesh_dict) self.save_as_standard_mesh(protomesh) # Here we'll build a scaledprotomesh directly from the newly # marched mesh # NOTE that we're using scale from the new # mesh rather than self._scale since the instantaneous separation # has likely changed since periastron scaledprotomesh = mesh.ScaledProtoMesh(scale=scale, **new_mesh_dict) else: logger.debug("{}.update_position: accessing standard mesh at t={}".format(self.component, self.time)) # track whether we did the remesh or not, so we know if we should # compute local quantities if not otherwise necessary did_remesh = False # We still need to go through scaledprotomesh instead of directly # to mesh since features may want to process the body-centric # coordinates before placing in orbit # TODO: eventually pass etheta to get_standard_mesh scaledprotomesh = self.get_standard_mesh(scaled=True) # TODO: can we avoid an extra copy here? if not ignore_effects and len(self.features): logger.debug("{}.update_position: processing features at t={}".format(self.component, self.time)) # First allow features to edit the coords_for_computations (pvertices). # Changes here WILL affect future computations for logg, teff, # intensities, etc. Note that these WILL NOT affect the # coords_for_observations automatically - those should probably be # perturbed as well, unless there is a good reason not to. for feature in self.features: # NOTE: these are ALWAYS done on the protomesh coords_for_observations = feature.process_coords_for_computations(scaledprotomesh.coords_for_computations, s=self.polar_direction_xyz, t=self.time) if scaledprotomesh._compute_at_vertices: scaledprotomesh.update_columns(pvertices=coords_for_observations) else: scaledprotomesh.update_columns(centers=coords_for_observations) raise NotImplementedError("areas are not updated for changed mesh") for feature in self.features: coords_for_observations = feature.process_coords_for_observations(scaledprotomesh.coords_for_computations, scaledprotomesh.coords_for_observations, s=self.polar_direction_xyz, t=self.time) if scaledprotomesh._compute_at_vertices: scaledprotomesh.update_columns(vertices=coords_for_observations) # TODO [DONE?]: centers either need to be supported or we need to report # vertices in the frontend as x, y, z instead of centers updated_props = libphoebe.mesh_properties(scaledprotomesh.vertices, scaledprotomesh.triangles, tnormals=True, areas=True) scaledprotomesh.update_columns(**updated_props) else: scaledprotomesh.update_columns(centers=coords_for_observations) raise NotImplementedError("areas are not updated for changed mesh") # TODO NOW [OPTIMIZE]: get rid of the deepcopy here - but without it the # mesh velocities build-up and do terrible things. It may be possible # to just clear the velocities in get_standard_mesh()? logger.debug("{}.update_position: placing in orbit, Mesh.from_scaledproto at t={}".format(self.component, self.time)) self._mesh = mesh.Mesh.from_scaledproto(scaledprotomesh.copy(), pos, vel, euler, euler_vel, self.polar_direction_xyz*self.freq_rot*self._scale, component_com_x) # Lastly, we'll recompute physical quantities (not observables) if # needed for this time-step. # TODO [DONE?]: make sure features smartly trigger needs_recompute_instantaneous # TODO: get rid of the or True here... the problem is that we're saving the standard mesh before filling local quantities if self.needs_recompute_instantaneous or did_remesh: logger.debug("{}.update_position: calling compute_local_quantities at t={}".format(self.component, self.time)) self.compute_local_quantities(xs, ys, zs, ignore_effects) return
0.006394
def find_cookies_for_class(cookies_file, class_name): """ Return a RequestsCookieJar containing the cookies for .coursera.org and class.coursera.org found in the given cookies_file. """ path = "/" + class_name def cookies_filter(c): return c.domain == ".coursera.org" \ or (c.domain == "class.coursera.org" and c.path == path) cj = get_cookie_jar(cookies_file) new_cj = requests.cookies.RequestsCookieJar() for c in filter(cookies_filter, cj): new_cj.set_cookie(c) return new_cj
0.001815
def bootstrap_indexes(data, n_samples=10000): """ Given data points data, where axis 0 is considered to delineate points, return an generator for sets of bootstrap indexes. This can be used as a list of bootstrap indexes (with list(bootstrap_indexes(data))) as well. """ for _ in xrange(n_samples): yield randint(data.shape[0], size=(data.shape[0],))
0.002703
def get_hosts_from_file(filename, default_protocol='telnet', default_domain='', remove_duplicates=False, encoding='utf-8'): """ Reads a list of hostnames from the file with the given name. :type filename: string :param filename: A full filename. :type default_protocol: str :param default_protocol: Passed to the Host constructor. :type default_domain: str :param default_domain: Appended to each hostname that has no domain. :type remove_duplicates: bool :param remove_duplicates: Whether duplicates are removed. :type encoding: str :param encoding: The encoding of the file. :rtype: list[Host] :return: The newly created host instances. """ # Open the file. if not os.path.exists(filename): raise IOError('No such file: %s' % filename) # Read the hostnames. have = set() hosts = [] with codecs.open(filename, 'r', encoding) as file_handle: for line in file_handle: hostname = line.split('#')[0].strip() if hostname == '': continue if remove_duplicates and hostname in have: continue have.add(hostname) hosts.append(to_host(hostname, default_protocol, default_domain)) return hosts
0.000723
def encode(self, value): """ Return a bytestring representation of the value """ if isinstance(value, Token): return b(value.value) if isinstance(value, bytes): return value elif isinstance(value, (int, long)): value = b(str(value)) elif isinstance(value, float): value = repr(value) elif not isinstance(value, basestring): value = str(value) if isinstance(value, unicode): value = value.encode(self.encoding, self.encoding_errors) return value
0.006536
def t(self, point): ''' :point: Point subclass :return: float If :point: is collinear, determine the 't' coefficient of the parametric equation: xyz = A<xyz> + t ( B<xyz> - A<xyz> ) if t < 0, point is less than A and B on the line if t >= 0 and <= 1, point is between A and B if t > 1 point is greater than B ''' # XXX could use for an ordering on points? if point not in self: msg = "'{p}' is not collinear with '{l}'" raise CollinearPoints(msg.format(p=point, l=self)) # p = A + t ( B - A) # p - A = t ( B - A) # p - A / (B -A) = t return (point - self.A) / self.m
0.002759
def activate(admin=True, browser=True, name='admin', reflect_all=False): """Activate each pre-registered model or generate the model classes and (possibly) register them for the admin. :param bool admin: should we generate the admin interface? :param bool browser: should we open the browser for the user? :param name: name to use for blueprint created by the admin interface. Set this to avoid naming conflicts with other blueprints (if trying to use sandman to connect to multiple databases simultaneously) """ with app.app_context(): generate_pks = app.config.get('SANDMAN_GENERATE_PKS', None) or False if getattr(app, 'class_references', None) is None or reflect_all: app.class_references = collections.OrderedDict() generate_endpoint_classes(db, generate_pks) else: Model.prepare(db.engine) prepare_relationships(db, current_app.class_references) if admin: try: show_pks = current_app.config['SANDMAN_SHOW_PKS'] except KeyError: show_pks = False register_classes_for_admin(db.session, show_pks, name) if browser: port = app.config.get('SERVER_PORT', None) or 5000 webbrowser.open('http://localhost:{}/admin'.format(port))
0.000729
def django(line): ''' >>> import pprint >>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }' >>> output_line1 = django(input_line1) >>> pprint.pprint(output_line1) {'data': {'loglevel': 'INFO', 'logname': '[app.middleware_log_req:50]', 'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }', 'timestamp': '2017-08-23T11:35:25'}, 'level': 'INFO', 'timestamp': '2017-08-23T11:35:25'} >>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}' >>> output_line2 = django(input_line2) >>> pprint.pprint(output_line2) {'data': {'loglevel': 'INFO', 'logname': '[app.function:6022]', 'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7', u'host_url': u'localhost:8888', u'message': u'ajax success', u'misc': {u'end_time_ms': 1506061932546L, u'ready_state': 4, u'request_time_ms': 433, u'response_length': 31, u'start_time_ms': 1506061932113L, u'status': 200, u'status_message': u'OK', u'url': u'/api/function?'}, u'timestamp': 1506061932546L, u'user': u'root'}, 'timestamp': '2017-09-22T06:32:15'}, 'level': 'INFO', 'timestamp': '2017-09-22T06:32:15'} Case2: [18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version') Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__ result = self.collection.find_one({"_id": key}) OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true } ''' #TODO we need to handle case2 logs data = {} log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line) if len(log) == 1: data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\ log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat() data['loglevel'] = re.findall('[A-Z]+', log[0])[1] data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0] message = re.findall('\{.+\}', log[0]) try: if len(message) > 0: message = json.loads(message[0]) else: message = re.split(']', log[0]) message = ''.join(message[2:]) except ValueError: message = re.split(']', log[0]) message = ''.join(message[2:]) data['message'] = message return dict( timestamp=data['timestamp'], level=data['loglevel'], data=data, ) else: return dict( timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()), data={raw:line} )
0.004952
async def set_lock(self, resource, lock_identifier, lock_timeout): """ Lock this instance and set lock expiration time to lock_timeout :param resource: redis key to set :param lock_identifier: uniquie id of lock :param lock_timeout: timeout for lock in seconds :raises: LockError if lock is not acquired """ lock_timeout_ms = int(lock_timeout * 1000) try: with await self.connect() as redis: await redis.eval( self.set_lock_script, keys=[resource], args=[lock_identifier, lock_timeout_ms] ) except aioredis.errors.ReplyError as exc: # script fault self.log.debug('Can not set lock "%s" on %s', resource, repr(self)) raise LockError('Can not set lock') from exc except (aioredis.errors.RedisError, OSError) as exc: self.log.error('Can not set lock "%s" on %s: %s', resource, repr(self), repr(exc)) raise LockError('Can not set lock') from exc except asyncio.CancelledError: self.log.debug('Lock "%s" is cancelled on %s', resource, repr(self)) raise except Exception as exc: self.log.exception('Can not set lock "%s" on %s', resource, repr(self)) raise else: self.log.debug('Lock "%s" is set on %s', resource, repr(self))
0.001284
def init(script=sys.argv[0], base='lib', append=True, ignore=['/','/usr'], realpath=False, pythonpath=False, throw=False): """ Parameters: * `script`: Path to script file. Default is currently running script file * `base`: Name of base module directory to add to sys.path. Default is "lib". * `append`: Append module directory to the end of sys.path, or insert at the beginning? Default is to append. * `ignore`: List of directories to ignore during the module search. Default is to ignore "/" and "/usr". * `realpath`: Should symlinks be resolved first? Default is False. * `pythonpath`: Should the modules directory be added to the PYTHONPATH environment variable? Default is False. * `throw`: Should an exception be thrown if no modules directory was found? Default is False. Returns: * The path to the modules directory if it was found, otherwise None. """ if type(ignore) is str: ignore = [ignore] script = os.path.realpath(script) if realpath else os.path.abspath(script) path = os.path.dirname(script) while os.path.dirname(path) != path and (path in ignore or not os.path.isdir(os.path.join(path, base))): path = os.path.dirname(path) modules_dir = os.path.join(path, base) if path not in ignore and os.path.isdir(modules_dir): if append: sys.path.append(modules_dir) else: sys.path.insert(1, modules_dir) if pythonpath: if 'PYTHONPATH' not in os.environ: os.environ['PYTHONPATH'] = '' if not append: os.environ['PYTHONPATH'] += modules_dir if os.environ['PYTHONPATH'] != '': os.environ['PYTHONPATH'] += os.pathsep if append: os.environ['PYTHONPATH'] += modules_dir return modules_dir elif throw: raise Exception("Could not find modules directory {} relative to {}" % (base, script)) return None
0.009922
def to_json(self, validate=False, pretty_print=True, data_path=None): """Convert data to JSON Parameters ---------- data_path : string If not None, then data is written to a separate file at the specified path. Note that the ``url`` attribute if the data must be set independently for the data to load correctly. Returns ------- string Valid Vega JSON. """ # TODO: support writing to separate file return super(self.__class__, self).to_json(validate=validate, pretty_print=pretty_print)
0.002999
def get_entity(self,entity_id): """ Returns the entity object for the given entity identifier @type entity_id: string @param entity_id: the token identifier @rtype: L{Centity} @return: the entity object """ entity_node = self.map_entity_id_to_node.get(entity_id) if entity_node is not None: return Centity(node=entity_node,type=self.type) else: for entity_node in self.__get_entity_nodes(): if self.type == 'NAF': label_id = 'id' elif self.type == 'KAF': label_id = 'eid' if entity_node.get(label_id) == entity_id: return Centity(node=entity_node, type=self.type) return None
0.009227
def parse(self, limit=None): """ Override Source.parse() Args: :param limit (int, optional) limit the number of rows processed Returns: :return None """ if limit is not None: LOG.info("Only parsing first %d rows", limit) ensembl_file = '/'.join((self.rawdir, self.files['ensembl2pathway']['file'])) self._parse_reactome_association_file( ensembl_file, limit, subject_prefix='ENSEMBL', object_prefix='REACT') chebi_file = '/'.join((self.rawdir, self.files['chebi2pathway']['file'])) self._parse_reactome_association_file( chebi_file, limit, subject_prefix='CHEBI', object_prefix='REACT') return
0.006729
def get_metric( self, name: str, labels: Union[Dict[str, str], None] = None) -> Metric: """Return a metric, optionally configured with labels.""" metric = self._metrics[name] if labels: return metric.labels(**labels) return metric
0.006689
def anonymize_column(self, col): """Map the values of column to new ones of the same type. It replaces the values from others generated using `faker`. It will however, keep the original distribution. That mean that the generated `probability_map` for both will have the same values, but different keys. Args: col (pandas.DataFrame): Dataframe containing the column to anonymize. Returns: pd.DataFrame: DataFrame with its values mapped to new ones, keeping the original distribution. Raises: ValueError: A `ValueError` is raised if faker is not able to provide enought different values. """ column = col[self.col_name] generator = self.get_generator() original_values = column[~pd.isnull(column)].unique() new_values = [generator() for x in range(len(original_values))] if len(new_values) != len(set(new_values)): raise ValueError( 'There are not enought different values on faker provider' 'for category {}'.format(self.category) ) value_map = dict(zip(original_values, new_values)) column = column.apply(value_map.get) return column.to_frame()
0.004535
def mac(address='', interface='', vlan=0, **kwargs): # pylint: disable=unused-argument ''' Returns the MAC Address Table on the device. :param address: MAC address to filter on :param interface: Interface name to filter on :param vlan: VLAN identifier :return: A list of dictionaries representing the entries in the MAC Address Table CLI Example: .. code-block:: bash salt '*' net.mac salt '*' net.mac vlan=10 Example output: .. code-block:: python [ { 'mac' : '00:1c:58:29:4a:71', 'interface' : 'xe-3/0/2', 'static' : False, 'active' : True, 'moves' : 1, 'vlan' : 10, 'last_move' : 1454417742.58 }, { 'mac' : '8c:60:4f:58:e1:c1', 'interface' : 'xe-1/0/1', 'static' : False, 'active' : True, 'moves' : 2, 'vlan' : 42, 'last_move' : 1453191948.11 } ] ''' proxy_output = salt.utils.napalm.call( napalm_device, # pylint: disable=undefined-variable 'get_mac_address_table', **{ } ) if not proxy_output.get('result'): # if negative, leave the output unchanged return proxy_output mac_address_table = proxy_output.get('out') if vlan and isinstance(vlan, int): mac_address_table = _filter_list(mac_address_table, 'vlan', vlan) if address: mac_address_table = _filter_list(mac_address_table, 'mac', address) if interface: mac_address_table = _filter_list(mac_address_table, 'interface', interface) proxy_output.update({ 'out': mac_address_table }) return proxy_output
0.002102
def process_request(): """ Retrieve a CameraStatus, Event or FileRecord from the request, based on the supplied type and ID. If the type is 'cached_request' then the ID must be specified in 'cached_request_id' - if this ID is not for an entity in the cache this method will return None and clear the cache (this should only happen under conditions where we've failed to correctly handle caching, such as a server restart or under extreme load, but will result in the server having to re-request a previous value from the exporting party). :return: A dict containing 'entity' - the entity for this request or None if there was an issue causing an unexpected cache miss, and 'entity-id' which will be the UUID of the entity requested. The entity corresponding to this request, or None if we had an issue and there was an unexpected cache miss. """ g.request_dict = safe_load(request.get_data()) entity_type = g.request_dict['type'] entity_id = g.request_dict[entity_type]['id'] ImportRequest.logger.debug("Received request, type={0}, id={1}".format(entity_type, entity_id)) entity = ImportRequest._get_entity(entity_id) ImportRequest.logger.debug("Entity with id={0} was {1}".format(entity_id, entity)) return ImportRequest(entity=entity, entity_id=entity_id)
0.007779
def scan_video(path): """Scan a video from a `path`. :param str path: existing path to the video. :return: the scanned video. :rtype: :class:`~subliminal.video.Video` """ # check for non-existing path if not os.path.exists(path): raise ValueError('Path does not exist') # check video extension if not path.endswith(VIDEO_EXTENSIONS): raise ValueError('%r is not a valid video extension' % os.path.splitext(path)[1]) dirpath, filename = os.path.split(path) logger.info('Scanning video %r in %r', filename, dirpath) # guess video = Video.fromguess(path, guessit(path)) # size and hashes video.size = os.path.getsize(path) if video.size > 10485760: logger.debug('Size is %d', video.size) video.hashes['opensubtitles'] = hash_opensubtitles(path) video.hashes['shooter'] = hash_shooter(path) video.hashes['thesubdb'] = hash_thesubdb(path) video.hashes['napiprojekt'] = hash_napiprojekt(path) logger.debug('Computed hashes %r', video.hashes) else: logger.warning('Size is lower than 10MB: hashes not computed') return video
0.001712
def inventory(self, choices=None): """ Return a dictionary of the inventory items and status """ status = (True, None) if not choices: return (False, 'No choices made') try: # choices: repos, tools, images, built, running, enabled items = {'repos': [], 'tools': {}, 'images': {}, 'built': {}, 'running': {}, 'enabled': {}} tools = Template(self.manifest).list_tools() for choice in choices: for tool in tools: try: if choice == 'repos': if 'repo' in tool: if (tool['repo'] and tool['repo'] not in items[choice]): items[choice].append(tool['repo']) elif choice == 'tools': items[choice][tool['section']] = tool['name'] elif choice == 'images': # TODO also check against docker items[choice][tool['section']] = tool['image_name'] elif choice == 'built': items[choice][tool['section']] = tool['built'] elif choice == 'running': containers = Containers() status = 'not running' for container in containers: image_name = tool['image_name'] \ .rsplit(':' + tool['version'], 1)[0] image_name = image_name.replace(':', '-') image_name = image_name.replace('/', '-') self.logger.info('image_name: ' + image_name) if container[0] == image_name: status = container[1] elif container[0] == image_name + \ '-' + tool['version']: status = container[1] items[choice][tool['section']] = status elif choice == 'enabled': items[choice][tool['section']] = tool['enabled'] else: # unknown choice pass except Exception as e: # pragma: no cover self.logger.error('Unable to grab info about tool: ' + str(tool) + ' because: ' + str(e)) status = (True, items) except Exception as e: # pragma: no cover self.logger.error( 'Inventory failed with error: {0}'.format(str(e))) status = (False, str(e)) return status
0.000669
def _set_lsp_frr_priority(self, v, load=False): """ Setter method for lsp_frr_priority, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/lsp_frr/lsp_frr_priority (container) If this variable is read-only (config: false) in the source YANG file, then _set_lsp_frr_priority is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_lsp_frr_priority() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=lsp_frr_priority.lsp_frr_priority, is_container='container', presence=False, yang_name="lsp-frr-priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Setup/hold priorites', u'cli-sequence-commands': None, u'cli-full-no': None, u'cli-incomplete-command': None, u'alt-name': u'priority'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """lsp_frr_priority must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=lsp_frr_priority.lsp_frr_priority, is_container='container', presence=False, yang_name="lsp-frr-priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Setup/hold priorites', u'cli-sequence-commands': None, u'cli-full-no': None, u'cli-incomplete-command': None, u'alt-name': u'priority'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""", }) self.__lsp_frr_priority = t if hasattr(self, '_set'): self._set()
0.005155
def info(vm, info_type='all', key='uuid'): ''' Lookup info on running kvm vm : string vm to be targeted info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc] info type to return key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc salt '*' vmadm.info nacl key=alias salt '*' vmadm.info nacl vnc key=alias ''' ret = {} if info_type not in ['all', 'block', 'blockstats', 'chardev', 'cpus', 'kvm', 'pci', 'spice', 'version', 'vnc']: ret['Error'] = 'Requested info_type is not available' return ret if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm # vmadm info <uuid> [type,...] cmd = 'vmadm info {uuid} {type}'.format( uuid=vm, type=info_type ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return salt.utils.json.loads(res['stdout'])
0.002869
def get_configured_consensus_module(block_id, state_view): """Returns the consensus_module based on the consensus module set by the "sawtooth_settings" transaction family. Args: block_id (str): the block id associated with the current state_view state_view (:obj:`StateView`): the current state view to use for setting values Raises: UnknownConsensusModuleError: Thrown when an invalid consensus module has been configured. """ settings_view = SettingsView(state_view) default_consensus = \ 'genesis' if block_id == NULL_BLOCK_IDENTIFIER else 'devmode' consensus_module_name = settings_view.get_setting( 'sawtooth.consensus.algorithm', default_value=default_consensus) return ConsensusFactory.get_consensus_module( consensus_module_name)
0.002186
def get_raw(self): """Get a list with information about the file. The returned list contains name, size, last_modified and location. """ return [self.name, self.size, self.last_modified, self.location]
0.008547
def single_qubit_op_to_framed_phase_form( mat: np.ndarray) -> Tuple[np.ndarray, complex, complex]: """Decomposes a 2x2 unitary M into U^-1 * diag(1, r) * U * diag(g, g). U translates the rotation axis of M to the Z axis. g fixes a global phase factor difference caused by the translation. r's phase is the amount of rotation around M's rotation axis. This decomposition can be used to decompose controlled single-qubit rotations into controlled-Z operations bordered by single-qubit operations. Args: mat: The qubit operation as a 2x2 unitary matrix. Returns: A 2x2 unitary U, the complex relative phase factor r, and the complex global phase factor g. Applying M is equivalent (up to global phase) to applying U, rotating around the Z axis to apply r, then un-applying U. When M is controlled, the control must be rotated around the Z axis to apply g. """ vals, vecs = np.linalg.eig(mat) u = np.conj(vecs).T r = vals[1] / vals[0] g = vals[0] return u, r, g
0.000933
def create_insert_dict_string(self, tblname, d, PKfields=[], fields=None, check_existing = False): '''The main function of the insert_dict functions. This creates and returns the SQL query and parameters used by the other functions but does not insert any data into the database. Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and (False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d) is returned.''' if type(PKfields) == type(""): PKfields = [PKfields] if fields == None: fields = sorted(d.keys()) values = None SQL = None try: # Search for existing records wherestr = [] PKvalues = [] for PKfield in PKfields: if d[PKfield] == None: wherestr.append("%s IS NULL" % PKfield) else: wherestr.append("%s=%%s" % PKfield) PKvalues.append(d[PKfield]) PKfields = join(PKfields, ",") wherestr = join(wherestr, " AND ") record_exists = None if check_existing: record_exists = not(not(self.execute_select("SELECT %s FROM %s" % (PKfields, tblname) + " WHERE %s" % wherestr, parameters=tuple(PKvalues), locked = False))) SQL = 'INSERT INTO %s (%s) VALUES (%s)' % ( tblname, join(fields, ", "), join(['%s' for x in range(len(fields))], ',')) values = tuple([d[k] for k in fields]) return SQL, values, record_exists except Exception, e: raise Exception("Error occurred during database insertion: '%s'. %s" % (str(e), traceback.format_exc()))
0.00896
def httpretty_callback(request, uri, headers): """httpretty request handler. converts a call intercepted by httpretty to the stack-in-a-box infrastructure :param request: request object :param uri: the uri of the request :param headers: headers for the response :returns: tuple - (int, dict, string) containing: int - the http response status code dict - the headers for the http response string - http string response """ method = request.method response_headers = CaseInsensitiveDict() response_headers.update(headers) request_headers = CaseInsensitiveDict() request_headers.update(request.headers) request.headers = request_headers return StackInABox.call_into(method, request, uri, response_headers)
0.001073
def matrix(fasta_path: 'path to tictax annotated fasta input', scafstats_path: 'path to BBMap scaftstats file'): ''' Generate taxonomic count matrix from tictax classified contigs ''' records = SeqIO.parse(fasta_path, 'fasta') df = tictax.matrix(records, scafstats_path) df.to_csv(sys.stdout)
0.003058
def start_file_logger(self, name, log_file_level=logging.DEBUG, log_file_path='./'): """Start file logging.""" log_file_path = os.path.expanduser(log_file_path) / '{}.log'.format(name) logdir = log_file_path.parent try: logdir.mkdir(parents=True, exist_ok=True) # If the log file exists, backs it up before creating a new file handler if log_file_path.exists(): strtime = datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S') shutil.move(log_file_path, log_file_path + '.' + strtime) self.fh = TimedRotatingFileHandler(str(log_file_path), when='midnight', utc=True) self.fh.suffix = '%Y-%m-%d_%H:%M:%S' except (IOError, OSError) as ee: warnings.warn('log file {0!r} could not be opened for writing: ' '{1}'.format(log_file_path, ee), RuntimeWarning) else: self.fh.setFormatter(fmt) self.addHandler(self.fh) self.fh.setLevel(log_file_level) self.log_filename = log_file_path
0.006346
def get_parameter_or_create(name, shape=None, initializer=None, need_grad=True, as_need_grad=None): """ Returns an existing parameter variable with the provided name. If a variable with the provided name does not exist, a new variable with the provided name is returned. Args: name(str): The name under the current scope. If it already exists, the name is queried from the parameter manager. shape (:obj:`tuple` of :obj:`int`): Shape of created parameter. The shape of the specified parameter must match with this shape. The default is None which is only valid if initializer is given as an :obj:`numpy.ndarray`. initializer (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): An initialization function to be applied to the parameter. :obj:`numpy.ndarray` can also be given to initialize parameters from numpy array data. need_grad (bool): Register the parameter with the specified ``need_grad`` flag. The default is True. If the flag is different from the previously specified one, the flag will be overwritten, but the values will be kept. as_need_grad (bool): Get a parameter variable with the specified ``need_grad`` flag. Note that this doesn't overwrite the flag of the registered parameter variable with the provided name. Instead, if the given flag mismatches with the previously registered ``need_grad`` flag, it returns a new variable referring to the same array contents but with ``need_grad=as_need_grad``. """ names = name.split('/') if len(names) > 1: with parameter_scope(names[0]): return get_parameter_or_create('/'.join(names[1:]), shape, initializer, need_grad, as_need_grad) param = get_parameter(names[0]) if param is None: class VariableInfo: pass info = VariableInfo() info.initializer = initializer if initializer is not None: if isinstance(initializer, numpy.ndarray): # numpy init param = nn.Variable(initializer.shape, need_grad=need_grad) param.d = initializer # initializer init elif isinstance(initializer, nn.initializer.BaseInitializer) or initializer.__name__ == "<lambda>": assert shape is not None param = nn.Variable(shape, need_grad=need_grad) param.d = initializer(shape=param.shape) else: raise ValueError( "`initializer` must be either the :obj:`numpy.ndarray` or an instance inherited from `nnabla.initializer.BaseInitializer`.") else: # default init assert shape is not None param = nn.Variable(shape, need_grad=need_grad) set_parameter(name, param) else: if param.shape != tuple(shape): raise ValueError( 'The size of existing parameter "{}" {} is different from the size of new parameter {}.\n' 'To clear all parameters, call nn.clear_parameters().'.format(name, param.shape, tuple(shape))) if need_grad != param.need_grad: param.need_grad = need_grad if as_need_grad is None: return param if param.need_grad != as_need_grad: param = param.get_unlinked_variable(need_grad=as_need_grad) return param
0.002894
def defaults(self): """Return default metadata.""" return dict( access_right='open', description=self.description, license='other-open', publication_date=self.release['published_at'][:10], related_identifiers=list(self.related_identifiers), version=self.version, title=self.title, upload_type='software', )
0.004695
def _encode_request(self, request): """Encode a request object""" obj = request_to_dict(request, self.spider) return self.serializer.dumps(obj)
0.011976
def addFreetextAnnot(self, rect, text, fontsize=12, fontname=None, color=None, rotate=0): """Add a 'FreeText' annotation in rectangle 'rect'.""" CheckParent(self) val = _fitz.Page_addFreetextAnnot(self, rect, text, fontsize, fontname, color, rotate) if not val: return val.thisown = True val.parent = weakref.proxy(self) self._annot_refs[id(val)] = val return val
0.011628
def terminate(self): """Terminate all connections in the pool.""" if self._closed: return self._check_init() for ch in self._holders: ch.terminate() self._closed = True
0.008621
async def submit_request(pool_handle: int, request_json: str) -> str: """ Publishes request message to validator pool (no signing, unlike sign_and_submit_request). The request is sent to the validator pool as is. It's assumed that it's already prepared. :param pool_handle: pool handle (created by open_pool_ledger). :param request_json: Request data json. :return: Request result as json. """ logger = logging.getLogger(__name__) logger.debug("submit_request: >>> pool_handle: %r, request_json: %r", pool_handle, request_json) if not hasattr(submit_request, "cb"): logger.debug("submit_request: Creating callback") submit_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_pool_handle = c_int32(pool_handle) c_request_json = c_char_p(request_json.encode('utf-8')) request_result = await do_call('indy_submit_request', c_pool_handle, c_request_json, submit_request.cb) res = request_result.decode() logger.debug("submit_request: <<< res: %r", res) return res
0.00326
def multiply(self, other, out=None): """Return ``out = self * other``. If ``out`` is provided, the result is written to it. See Also -------- LinearSpace.multiply """ return self.space.multiply(self, other, out=out)
0.007326