text
stringlengths
78
104k
score
float64
0
0.18
def rebase(self, key, start=None, decimals=5): """Rebase a channel (key) on start. The step (between elements) need to be constant all through, else ValueError is raised. The exception to this is the border step between data loaded from two different files. key: int or str The key for the channel to rebase. start: int or float or None If specified - replace the first element in the first loaded data channel with start. decimals: int Diffs are rounded to this number of decimals before the step through arrays are checked. The diffs are otherwise likely never to be all equal. Typically this would be used to make a time channel continuous. Like, not start over from 0, when data is appended from multiple files. Or simply to rebase a channel on 'start'. If start is None, and the instance is loaded from one file only, this method has no effect. .. note:: The instance channel is modified on success. """ diffs = [] def diffsappend(d, sc): diff = np.around(np.diff(d), decimals) diffs.append((diff, diff[0], sc)) if hasattr(self, 'metamulti'): for sc in self.metamulti['slices']: diffsappend(self(key)[sc], sc) else: diffsappend(self(key), slice(0, self.rec_cnt)) for diff, d, sc in diffs: if not np.all(diff == d): raise ValueError('All diffs not equal within ' + 'indexes ' + str(sc)) S = set([t[1] for t in diffs]) if len(S) > 1: raise ValueError('Diffs not equal between appended data files: ' + str(S)) # Now modify: if start is None: start = self(key)[0] self.D[self._key(key)] = np.linspace(start, d * self.rec_cnt + start, num=self.rec_cnt, endpoint=False) assert len(self(key)) == self.rec_cnt, 'Semantic error'
0.000933
def create_local_copy(self, effects=None, store=None): """Creates a Local File Copy on Uploadcare Storage. Args: - effects: Adds CDN image effects. If ``self.default_effects`` property is set effects will be combined with default effects. - store: If ``store`` option is set to False the copy of your file will be deleted in 24 hour period after the upload. Works only if `autostore` is enabled in the project. """ effects = self._build_effects(effects) store = store or '' data = { 'source': self.cdn_path(effects) } if store: data['store'] = store return rest_request('POST', 'files/', data=data)
0.0025
def import_demo_experience(self, states, internals, actions, terminal, reward): """ Stores demonstrations in the demo memory. """ fetches = self.import_demo_experience_output feed_dict = self.get_feed_dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward ) self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)
0.004193
def LT(classical_reg1, classical_reg2, classical_reg3): """ Produce an LT instruction. :param classical_reg1: Memory address to which to store the comparison result. :param classical_reg2: Left comparison operand. :param classical_reg3: Right comparison operand. :return: A ClassicalLessThan instance. """ classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(classical_reg1, classical_reg2, classical_reg3) return ClassicalLessThan(classical_reg1, classical_reg2, classical_reg3)
0.007215
def train(train_dir=None, train_csv=None, epochs=30, batch_size=32): """Function responsible for running the training the model.""" if not train_dir or not os.path.exists(train_dir) or not train_csv: warnings.warn("No train directory could be found ") return # Make a dataset from the local folder containing Audio data print("\nMaking an Audio Dataset...\n") tick = time.time() aud_dataset = AudioFolderDataset(train_dir, train_csv=train_csv, file_format='.wav', skip_header=True) tock = time.time() print("Loading the dataset took ", (tock-tick), " seconds.") print("\n=======================================\n") print("Number of output classes = ", len(aud_dataset.synsets)) print("\nThe labels are : \n") print(aud_dataset.synsets) # Get the model to train net = model.get_net(len(aud_dataset.synsets)) print("\nNeural Network = \n") print(net) print("\nModel - Neural Network Generated!\n") print("=======================================\n") #Define the loss - Softmax CE Loss softmax_loss = gluon.loss.SoftmaxCELoss(from_logits=False, sparse_label=True) print("Loss function initialized!\n") print("=======================================\n") #Define the trainer with the optimizer trainer = gluon.Trainer(net.collect_params(), 'adadelta') print("Optimizer - Trainer function initialized!\n") print("=======================================\n") print("Loading the dataset to the Gluon's OOTB Dataloader...") #Getting the data loader out of the AudioDataset and passing the transform from transforms import MFCC aud_transform = MFCC() tick = time.time() audio_train_loader = gluon.data.DataLoader(aud_dataset.transform_first(aud_transform), batch_size=32, shuffle=True) tock = time.time() print("Time taken to load data and apply transform here is ", (tock-tick), " seconds.") print("=======================================\n") print("Starting the training....\n") # Training loop tick = time.time() batch_size = batch_size num_examples = len(aud_dataset) for epoch in range(epochs): cumulative_loss = 0 for data, label in audio_train_loader: with autograd.record(): output = net(data) loss = softmax_loss(output, label) loss.backward() trainer.step(batch_size) cumulative_loss += mx.nd.sum(loss).asscalar() if epoch%5 == 0: train_accuracy = evaluate_accuracy(audio_train_loader, net) print("Epoch {}. Loss: {} Train accuracy : {} ".format(epoch, cumulative_loss/num_examples, train_accuracy)) print("\n------------------------------\n") train_accuracy = evaluate_accuracy(audio_train_loader, net) tock = time.time() print("\nFinal training accuracy: ", train_accuracy) print("Training the sound classification for ", epochs, " epochs, MLP model took ", (tock-tick), " seconds") print("====================== END ======================\n") print("Trying to save the model parameters here...") net.save_parameters("./net.params") print("Saved the model parameters in current directory.")
0.003686
def _vis_calibrate(self, data): """Calibrate visible channels to reflectance.""" solar_irradiance = self['esun'] esd = self["earth_sun_distance_anomaly_in_AU"].astype(float) factor = np.pi * esd * esd / solar_irradiance res = data * factor res.attrs = data.attrs res.attrs['units'] = '1' res.attrs['standard_name'] = 'toa_bidirectional_reflectance' return res
0.004619
def integrate(self, outevent, inevent): """Propagate function time ratio along the function calls. Must be called after finding the cycles. See also: - http://citeseer.ist.psu.edu/graham82gprof.html """ # Sanity checking assert outevent not in self for function in compat_itervalues(self.functions): assert outevent not in function assert inevent in function for call in compat_itervalues(function.calls): assert outevent not in call if call.callee_id != function.id: assert call.ratio is not None # Aggregate the input for each cycle for cycle in self.cycles: total = inevent.null() for function in compat_itervalues(self.functions): total = inevent.aggregate(total, function[inevent]) self[inevent] = total # Integrate along the edges total = inevent.null() for function in compat_itervalues(self.functions): total = inevent.aggregate(total, function[inevent]) self._integrate_function(function, outevent, inevent) self[outevent] = total
0.002457
def endSubscription(self, subscriber): """ Unregister a live subscription. """ self._reqId2Contract.pop(subscriber.reqId, None) self.reqId2Subscriber.pop(subscriber.reqId, None)
0.009217
async def open(self) -> 'Issuer': """ Explicit entry. Perform ancestor opening operations, then synchronize revocation registry to tails tree content. :return: current object """ LOGGER.debug('Issuer.open >>>') await super().open() for path_rr_id in Tails.links(self.dir_tails, self.did): await self._sync_revoc_for_issue(basename(path_rr_id)) LOGGER.debug('Issuer.open <<<') return self
0.004141
def call(fun, **kwargs): ''' Directly call a function inside a loader directory ''' args = kwargs.get('args', []) dirs = kwargs.get('dirs', []) funcs = LazyLoader( [os.path.join(SALT_BASE_PATH, 'modules')] + dirs, None, tag='modules', virtual_enable=False, ) return funcs[fun](*args)
0.002874
def create_machine(self, name=None, package=None, dataset=None, metadata=None, tags=None, boot_script=None, credentials=False, image=None, networks=None): """ :: POST /:login/machines Provision a machine in the current :py:class:`smartdc.datacenter.DataCenter`, returning an instantiated :py:class:`smartdc.machine.Machine` object. All of the parameter values are optional, as they are assigned default values by the datacenter's API itself. :param name: a human-readable label for the machine :type name: :py:class:`basestring` :param package: cluster of resource values identified by name :type package: :py:class:`basestring` or :py:class:`dict` :param image: an identifier for the base operating system image (formerly a ``dataset``) :type image: :py:class:`basestring` or :py:class:`dict` :param dataset: base operating system image identified by a globally unique ID or URN (deprecated) :type dataset: :py:class:`basestring` or :py:class:`dict` :param metadata: keys & values with arbitrary supplementary details for the machine, accessible from the machine itself :type metadata: :py:class:`dict` :param tags: keys & values with arbitrary supplementary identifying information for filtering when querying for machines :type tags: :py:class:`dict` :param networks: list of networks where this machine will belong to :type networks: :py:class:`list` :param boot_script: path to a file to upload for execution on boot :type boot_script: :py:class:`basestring` as file path :rtype: :py:class:`smartdc.machine.Machine` If `package`, `image`, or `dataset` are passed a :py:class:`dict` containing a `name` key (in the case of `package`) or an `id` key (in the case of `image` or `dataset`), it passes the corresponding value. The server API appears to resolve incomplete or ambiguous dataset URNs with the highest version number. """ params = {} if name: assert re.match(r'[a-zA-Z0-9]([a-zA-Z0-9\-\.]*[a-zA-Z0-9])?$', name), "Illegal name" params['name'] = name if package: if isinstance(package, dict): package = package['name'] params['package'] = package if image: if isinstance(image, dict): image = image['id'] params['image'] = image if dataset and not image: if isinstance(dataset, dict): dataset = dataset.get('id', dataset['urn']) params['dataset'] = dataset if metadata: for k, v in metadata.items(): params['metadata.' + str(k)] = v if tags: for k, v in tags.items(): params['tag.' + str(k)] = v if boot_script: with open(boot_script) as f: params['metadata.user-script'] = f.read() if networks: if isinstance(networks, list): params['networks'] = networks elif isinstance(networks, basestring): params['networks'] = [networks] j, r = self.request('POST', 'machines', data=params) if r.status_code >= 400: print(j, file=sys.stderr) r.raise_for_status() return Machine(datacenter=self, data=j)
0.007654
def Pack(cls, obj, version): """Pack the given object using AdWords-specific logic. Args: obj: an object to be packed for SOAP using AdWords-specific logic, if applicable. version: the version of the current API, e.g. 'v201809' Returns: The given object packed with AdWords-specific logic for SOAP, if applicable. Otherwise, returns the given object unmodified. """ if isinstance(obj, ServiceQuery): return str(obj) return obj
0.004065
def enver(*args): """ %prog [<name>=[value]] To show all environment variables, call with no parameters: %prog To Add/Modify/Delete environment variable: %prog <name>=[value] If <name> is PATH or PATHEXT, %prog will by default append the value using a semicolon as a separator. Use -r to disable this behavior or -a to force it for variables other than PATH and PATHEXT. If append is prescribed, but the value doesn't exist, the value will be created. If there is no value, %prog will delete the <name> environment variable. i.e. "PATH=" To remove a specific value or values from a semicolon-separated multi-value variable (such as PATH), use --remove-value. e.g. enver --remove-value PATH=C:\\Unwanted\\Dir\\In\\Path Remove-value matches case-insensitive and also matches any substring so the following would also be sufficient to remove the aforementioned undesirable dir. enver --remove-value PATH=UNWANTED Note that %prog does not affect the current running environment, and can only affect subsequently spawned applications. """ from optparse import OptionParser parser = OptionParser(usage=trim(enver.__doc__)) parser.add_option( '-U', '--user-environment', action='store_const', const=UserRegisteredEnvironment, default=MachineRegisteredEnvironment, dest='class_', help="Use the current user's environment", ) parser.add_option( '-a', '--append', action='store_true', default=False, help="Append the value to any existing value (default for PATH and PATHEXT)", ) parser.add_option( '-r', '--replace', action='store_true', default=False, help="Replace any existing value (used to override default append " "for PATH and PATHEXT)", ) parser.add_option( '--remove-value', action='store_true', default=False, help="Remove any matching values from a semicolon-separated " "multi-value variable", ) parser.add_option( '-e', '--edit', action='store_true', default=False, help="Edit the value in a local editor", ) options, args = parser.parse_args(*args) try: param = args.pop() if args: parser.error("Too many parameters specified") raise SystemExit(1) if '=' not in param and not options.edit: parser.error("Expected <name>= or <name>=<value>") raise SystemExit(2) name, sep, value = param.partition('=') method_name = 'set' if options.remove_value: method_name = 'remove_values' if options.edit: method_name = 'edit' method = getattr(options.class_, method_name) method(name, value, options) except IndexError: options.class_.show()
0.028158
def report(zap_helper, output, output_format): """Generate XML, MD or HTML report.""" if output_format == 'html': zap_helper.html_report(output) elif output_format == 'md': zap_helper.md_report(output) else: zap_helper.xml_report(output) console.info('Report saved to "{0}"'.format(output))
0.002985
def ErrorMessage(text, **kwargs): """Show an error message dialog to the user. This will raise a Zenity Error Dialog with a description of the error. text - A description of the error. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = ['--text=%s' % text] for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) run_zenity('--error', *args).wait()
0.006329
def _auto_adjust_panel_spans(dashboard): '''Adjust panel spans to take up the available width. For each group of panels that would be laid out on the same level, scale up the unspecified panel spans to fill up the level. ''' for row in dashboard.get('rows', []): levels = [] current_level = [] levels.append(current_level) for panel in row.get('panels', []): current_level_span = sum(panel.get('span', _DEFAULT_PANEL_SPAN) for panel in current_level) span = panel.get('span', _DEFAULT_PANEL_SPAN) if current_level_span + span > _FULL_LEVEL_SPAN: current_level = [panel] levels.append(current_level) else: current_level.append(panel) for level in levels: specified_panels = [panel for panel in level if 'span' in panel] unspecified_panels = [panel for panel in level if 'span' not in panel] if not unspecified_panels: continue specified_span = sum(panel['span'] for panel in specified_panels) available_span = _FULL_LEVEL_SPAN - specified_span auto_span = float(available_span) / len(unspecified_panels) for panel in unspecified_panels: panel['span'] = auto_span
0.000711
def verifyChainFromCAPath(self, capath, untrusted_file=None): """ Does the same job as .verifyChainFromCAFile() but using the list of anchors in capath directory. The directory should (only) contain certificates files in PEM format. As for .verifyChainFromCAFile(), a list of untrusted certificates can be passed as a file (concatenation of the certificates in PEM format). """ try: anchors = [] for cafile in os.listdir(capath): anchors.append(Cert(open(os.path.join(capath, cafile), "rb").read())) # noqa: E501 except Exception: raise Exception("capath provided is not a valid cert path") untrusted = None if untrusted_file: try: f = open(untrusted_file, "rb") untrusted_certs = f.read() f.close() except Exception: raise Exception("Could not read from untrusted_file") untrusted = [Cert(c) for c in split_pem(untrusted_certs)] return self.verifyChain(anchors, untrusted)
0.001779
def get_file_list(): """Return a list of strings corresponding to file names supplied by drag and drop or standard input.""" if len(sys.argv) > 1: file_list = list(sys.argv[1:]) # make copy else: files_str = input('Select the files you want to process and drag and drop them onto this window, ' 'or type their names separated by spaces. Paths containing spaces should be ' 'surrounded by quotation marks.\nPress ENTER when you\'re done: ') if "win" in sys.platform: # the POSIX shlex.split uses backslashes for escape sequences, so Windows paths need to set posix=False file_list = shlex.split(files_str, posix=False) # the non-POSIX shlex.split does not automatically clean quotation marks from the final product file_list = [f.replace('"', '').replace("'", "") for f in file_list] else: file_list = shlex.split(files_str, posix=True) # substitute in shell variables and get absolute paths for i in range(len(file_list)): file_list[i] = os.path.abspath( os.path.expanduser(os.path.expandvars(file_list[i])) ) return file_list
0.009106
def campaign_name(self, campaign_name): """ Sets the campaign_name of this ServicePackageQuotaHistoryReservation. Textual campaign name for this reservation. :param campaign_name: The campaign_name of this ServicePackageQuotaHistoryReservation. :type: str """ if campaign_name is None: raise ValueError("Invalid value for `campaign_name`, must not be `None`") if campaign_name is not None and len(campaign_name) > 250: raise ValueError("Invalid value for `campaign_name`, length must be less than or equal to `250`") if campaign_name is not None and len(campaign_name) < 1: raise ValueError("Invalid value for `campaign_name`, length must be greater than or equal to `1`") self._campaign_name = campaign_name
0.007264
def _setDriftList(self, drift_length): """ set drift length list of three elements :param drift_length: input drift_length in [m], single float, or list/tuple of float numbers """ if isinstance(drift_length, tuple) or isinstance(drift_length, list): if len(drift_length) == 1: self.dflist = drift_length * 3 elif len(drift_length) == 2: self.dflist = [] self.dflist.extend(drift_length) self.dflist.append(drift_length[0]) elif len(drift_length) >= 3: self.dflist = drift_length[0:3] if self.dflist[0] != self.dflist[-1]: print("warning: chicane is not symmetric.") else: print("drift_length is not a valid list or tuple.") self.mflag = False else: self.dflist = [] self.dflist.extend([drift_length, drift_length, drift_length])
0.003033
def CreateConstMuskingumXFile(x_value, in_connectivity_file, out_x_file): """ Create muskingum X file from value that is constant all the way through for each river segment. Parameters ---------- x_value: float Value for the muskingum X parameter [0-0.5]. in_connectivity_file: str The path to the RAPID connectivity file. out_x_file: str The path to the output x file. Example:: from RAPIDpy.gis.muskingum import CreateConstMuskingumXFile CreateConstMuskingumXFile( x_value=0.3, in_connectivity_file='/path/to/rapid_connect.csv', out_x_file='/path/to/x.csv') """ num_rivers = 0 with open_csv(in_connectivity_file, "r") as csvfile: reader = csv_reader(csvfile) for _ in reader: num_rivers += 1 with open_csv(out_x_file, 'w') as kfile: x_writer = csv_writer(kfile) for _ in xrange(num_rivers): x_writer.writerow([x_value])
0.000933
def get_importer(path_item): """Retrieve a PEP 302 "importer" for the given path item If there is no importer, this returns a wrapper around the builtin import machinery. The returned importer is only cached if it was created by a path hook. """ try: importer = sys.path_importer_cache[path_item] except KeyError: for hook in sys.path_hooks: try: importer = hook(path_item) except ImportError: pass else: break else: importer = None sys.path_importer_cache.setdefault(path_item,importer) if importer is None: try: importer = ImpWrapper(path_item) except ImportError: pass return importer
0.002532
def _getInputImage (input,group=None): """ Factory function to return appropriate imageObject class instance""" # extract primary header and SCI,1 header from input image sci_ext = 'SCI' if group in [None,'']: exten = '[sci,1]' phdu = fits.getheader(input, memmap=False) else: # change to use fits more directly here? if group.find(',') > 0: grp = group.split(',') if grp[0].isalpha(): grp = (grp[0],int(grp[1])) else: grp = int(grp[0]) else: grp = int(group) phdu = fits.getheader(input, memmap=False) phdu.extend(fits.getheader(input, ext=grp, memmap=False)) # Extract the instrument name for the data that is being processed by Multidrizzle _instrument = phdu['INSTRUME'] # Determine the instrument detector in use. NICMOS is a special case because it does # not use the 'DETECTOR' keyword. It instead used 'CAMERA' to identify which of it's # 3 camera's is in use. All other instruments support the 'DETECTOR' keyword. if _instrument == 'NICMOS': _detector = phdu['CAMERA'] else: try: _detector = phdu['DETECTOR'] except KeyError: # using the phdu as set above (fits.getheader) is MUCH faster and # works for the majority of data; but fileutil handles waivered fits phdu = fileutil.getHeader(input+exten) _detector = phdu['DETECTOR'] # if this fails, let it throw del phdu # just to keep clean # Match up the instrument and detector with the right class # only importing the instrument modules as needed. try: if _instrument == 'ACS': from . import acsData if _detector == 'HRC': return acsData.HRCInputImage(input,group=group) if _detector == 'WFC': return acsData.WFCInputImage(input,group=group) if _detector == 'SBC': return acsData.SBCInputImage(input,group=group) if _instrument == 'NICMOS': from . import nicmosData if _detector == 1: return nicmosData.NIC1InputImage(input) if _detector == 2: return nicmosData.NIC2InputImage(input) if _detector == 3: return nicmosData.NIC3InputImage(input) if _instrument == 'WFPC2': from . import wfpc2Data return wfpc2Data.WFPC2InputImage(input,group=group) """ if _detector == 1: return wfpc2Data.PCInputImage(input) if _detector == 2: return wfpc2Data.WF2InputImage(input) if _detector == 3: return wfpc2Data.WF3InputImage(input) if _detector == 4: return wfpc2Data.WF4InputImage(input) """ if _instrument == 'STIS': from . import stisData if _detector == 'CCD': return stisData.CCDInputImage(input,group=group) if _detector == 'FUV-MAMA': return stisData.FUVInputImage(input,group=group) if _detector == 'NUV-MAMA': return stisData.NUVInputImage(input,group=group) if _instrument == 'WFC3': from . import wfc3Data if _detector == 'UVIS': return wfc3Data.WFC3UVISInputImage(input,group=group) if _detector == 'IR': return wfc3Data.WFC3IRInputImage(input,group=group) except ImportError: msg = 'No module implemented for '+str(_instrument)+'!' raise ValueError(msg) # If a supported instrument is not detected, print the following error message # and raise an exception. msg = 'Instrument: ' + str(_instrument) + '/' + str(_detector) + ' not yet supported!' raise ValueError(msg)
0.011729
def deephash(obj): """ Given an object, return a hash using HashableJSON. This hash is not architecture, Python version or platform independent. """ try: return hash(json.dumps(obj, cls=HashableJSON, sort_keys=True)) except: return None
0.007246
def get_memory_map_xml(self): """! @brief Generate GDB memory map XML. """ root = ElementTree.Element('memory-map') for r in self._context.core.memory_map: # Look up the region type name. Regions default to ram if gdb doesn't # have a concept of the region type. gdbType = GDB_TYPE_MAP.get(r.type, 'ram') start = hex(r.start).rstrip("L") length = hex(r.length).rstrip("L") mem = ElementTree.SubElement(root, 'memory', type=gdbType, start=start, length=length) if r.is_flash: prop = ElementTree.SubElement(mem, 'property', name='blocksize') prop.text = hex(r.blocksize).rstrip("L") return MAP_XML_HEADER + ElementTree.tostring(root)
0.008739
def volume(self): """ Volume of the simplex. """ return abs(np.linalg.det(self.T)) / math.factorial(self.space_dim)
0.013605
def _token_to_subwords(self, token): """Greedily split token into subwords.""" subwords = [] start = 0 while start < len(token): subword = None for end in range( min(len(token), start + self._max_subword_len), start, -1): candidate = token[start:end] if (candidate in self._subword_to_id or candidate == _UNDERSCORE_REPLACEMENT): subword = candidate subwords.append(subword) start = end break # No subword match found. Consume a single (unicode) character. if subword is None: subwords.append(token[start]) start += 1 return subwords
0.016345
def one(self): """Return exactly one record or raise an exception. :return: - Dictionary containing the only item in the response content :raise: - MultipleResults: If more than one records are present in the content - NoResults: If the result is empty """ result, count = self._get_buffered_response() if count == 0: raise NoResults("No records found") elif count > 1: raise MultipleResults("Expected single-record result, got multiple") return result[0]
0.006849
def update_assessment_taken(self, assessment_taken_form): """Updates an existing assessment taken. arg: assessment_taken_form (osid.assessment.AssessmentTakenForm): the form containing the elements to be updated raise: IllegalState - ``assessment_taken_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``assessment_taken_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_offered_form`` did not originate from ``get_assessment_taken_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.update_resource_template collection = JSONClientValidated('assessment', collection='AssessmentTaken', runtime=self._runtime) if not isinstance(assessment_taken_form, ABCAssessmentTakenForm): raise errors.InvalidArgument('argument type is not an AssessmentTakenForm') if not assessment_taken_form.is_for_update(): raise errors.InvalidArgument('the AssessmentTakenForm is for update only, not create') try: if self._forms[assessment_taken_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('assessment_taken_form already used in an update transaction') except KeyError: raise errors.Unsupported('assessment_taken_form did not originate from this session') if not assessment_taken_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(assessment_taken_form._my_map) self._forms[assessment_taken_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned: return objects.AssessmentTaken( osid_object_map=assessment_taken_form._my_map, runtime=self._runtime, proxy=self._proxy)
0.003838
def process(name): ''' Return whether the specified signature is found in the process tree. This differs slightly from the services states, in that it may refer to a process that is not managed via the init system. ''' # Monitoring state, no changes will be made so no test interface needed ret = {'name': name, 'result': False, 'comment': '', 'changes': {}, 'data': {}} # Data field for monitoring state data = __salt__['status.pid'](name) if not data: ret['result'] = False ret['comment'] += 'Process signature "{0}" not found '.format( name ) return ret ret['data'] = data ret['comment'] += 'Process signature "{0}" was found '.format( name ) ret['result'] = True return ret
0.001203
def annotate_video( self, input_uri=None, input_content=None, features=None, video_context=None, output_uri=None, location_id=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Performs asynchronous video annotation. Progress and results can be retrieved through the ``google.longrunning.Operations`` interface. ``Operation.metadata`` contains ``AnnotateVideoProgress`` (progress). ``Operation.response`` contains ``AnnotateVideoResponse`` (results). Example: >>> from google.cloud import videointelligence_v1beta2 >>> from google.cloud.videointelligence_v1beta2 import enums >>> >>> client = videointelligence_v1beta2.VideoIntelligenceServiceClient() >>> >>> input_uri = 'gs://demomaker/cat.mp4' >>> features_element = enums.Feature.LABEL_DETECTION >>> features = [features_element] >>> >>> response = client.annotate_video(input_uri=input_uri, features=features) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: input_uri (str): Input video location. Currently, only `Google Cloud Storage <https://cloud.google.com/storage/>`__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For more information, see `Request URIs <https://cloud.google.com/storage/docs/reference-uris>`__. A video URI may include wildcards in ``object-id``, and thus identify multiple videos. Supported wildcards: '\*' to match 0 or more characters; '?' to match 1 character. If unset, the input video should be embedded in the request as ``input_content``. If set, ``input_content`` should be unset. input_content (bytes): The video data bytes. If unset, the input video(s) should be specified via ``input_uri``. If set, ``input_uri`` should be unset. features (list[~google.cloud.videointelligence_v1beta2.types.Feature]): Requested video annotation features. video_context (Union[dict, ~google.cloud.videointelligence_v1beta2.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1beta2.types.VideoContext` output_uri (str): Optional location where the output (in JSON format) should be stored. Currently, only `Google Cloud Storage <https://cloud.google.com/storage/>`__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For more information, see `Request URIs <https://cloud.google.com/storage/docs/reference-uris>`__. location_id (str): Optional cloud region where annotation should take place. Supported cloud regions: ``us-east1``, ``us-west1``, ``europe-west1``, ``asia-east1``. If no region is specified, a region will be determined based on video file location. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.videointelligence_v1beta2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "annotate_video" not in self._inner_api_calls: self._inner_api_calls[ "annotate_video" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.annotate_video, default_retry=self._method_configs["AnnotateVideo"].retry, default_timeout=self._method_configs["AnnotateVideo"].timeout, client_info=self._client_info, ) request = video_intelligence_pb2.AnnotateVideoRequest( input_uri=input_uri, input_content=input_content, features=features, video_context=video_context, output_uri=output_uri, location_id=location_id, ) operation = self._inner_api_calls["annotate_video"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, video_intelligence_pb2.AnnotateVideoResponse, metadata_type=video_intelligence_pb2.AnnotateVideoProgress, )
0.004461
def score_function(self, x, W): # need refector ''' Score function to calculate score ''' score = self.sign * np.sign(x[self.feature_index] - self.theta) return score
0.013825
def configure(self, config): """ Configures component by passing configuration parameters. :param config: configuration parameters to be set. """ self._timeout = config.get_as_long_with_default("options.timeout", self._default_timeout) self._max_size = config.get_as_long_with_default("options.max_size", self._default_max_size)
0.01061
def update(self, **kwargs): """Change the configuration of the resource on the device. This method uses Http PUT alter the service state on the device. The attributes of the instance will be packaged as a dictionary. That dictionary will be updated with kwargs. It is then submitted as JSON to the device. Various edge cases are handled: * read-only attributes that are unchangeable are removed * ``agent`` attribute removed prior to PUT * ``post`` attribute removed prior to PUT * ``method`` attribute removed prior to PUT :param kwargs: keys and associated values to alter on the device """ self.__dict__.pop('agent', '') self.__dict__.pop('post', '') self.__dict__.pop('method', '') super(Wmi, self).update(**kwargs)
0.002361
def records(account_id): """Fetch locks data """ s = boto3.Session() table = s.resource('dynamodb').Table('Sphere11.Dev.ResourceLocks') results = table.scan() for r in results['Items']: if 'LockDate' in r: r['LockDate'] = datetime.fromtimestamp(r['LockDate']) if 'RevisionDate' in r: r['RevisionDate'] = datetime.fromtimestamp(r['RevisionDate']) print(tabulate.tabulate( results['Items'], headers="keys", tablefmt='fancy_grid'))
0.001916
def color_palette(name=None, n_colors=6, desat=None): """Return a list of colors defining a color palette. Availible seaborn palette names: deep, muted, bright, pastel, dark, colorblind Other options: hls, husl, any matplotlib palette Matplotlib paletes can be specified as reversed palettes by appending "_r" to the name or as dark palettes by appending "_d" to the name. This function can also be used in a ``with`` statement to temporarily set the color cycle for a plot or set of plots. Parameters ---------- name: None, string, or sequence Name of palette or None to return current palette. If a sequence, input colors are used but possibly cycled and desaturated. n_colors : int Number of colors in the palette. If larger than the number of colors in the palette, they will cycle. desat : float Value to desaturate each color by. Returns ------- palette : list of RGB tuples. Color palette. Examples -------- >>> p = color_palette("muted") >>> p = color_palette("Blues_d", 10) >>> p = color_palette("Set1", desat=.7) >>> import matplotlib.pyplot as plt >>> with color_palette("husl", 8): ... f, ax = plt.subplots() ... ax.plot(x, y) # doctest: +SKIP See Also -------- set_palette : set the default color cycle for all plots. axes_style : define parameters to set the style of plots plotting_context : define parameters to scale plot elements """ seaborn_palettes = dict( deep=["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"], muted=["#4878CF", "#6ACC65", "#D65F5F", "#B47CC7", "#C4AD66", "#77BEDB"], pastel=["#92C6FF", "#97F0AA", "#FF9F9A", "#D0BBFF", "#FFFEA3", "#B0E0E6"], bright=["#003FFF", "#03ED3A", "#E8000B", "#8A2BE2", "#FFC400", "#00D7FF"], dark=["#001C7F", "#017517", "#8C0900", "#7600A1", "#B8860B", "#006374"], colorblind=["#0072B2", "#009E73", "#D55E00", "#CC79A7", "#F0E442", "#56B4E9"], ) if name is None: palette = mpl.rcParams["axes.color_cycle"] elif not isinstance(name, string_types): palette = name elif name == "hls": palette = hls_palette(n_colors) elif name == "husl": palette = husl_palette(n_colors) elif name in seaborn_palettes: palette = seaborn_palettes[name] elif name in dir(mpl.cm): palette = mpl_palette(name, n_colors) elif name[:-2] in dir(mpl.cm): palette = mpl_palette(name, n_colors) else: raise ValueError("%s is not a valid palette name" % name) if desat is not None: palette = [desaturate(c, desat) for c in palette] # Always return as many colors as we asked for pal_cycle = cycle(palette) palette = [next(pal_cycle) for _ in range(n_colors)] # Always return in r, g, b tuple format try: palette = map(mpl.colors.colorConverter.to_rgb, palette) palette = _ColorPalette(palette) except ValueError: raise ValueError("Could not generate a palette for %s" % str(name)) return palette
0.000304
def forward_message(self, chat_id, from_chat_id, message_id, disable_notification=None): """ Use this method to forward messages of any kind. :param disable_notification: :param chat_id: which chat to forward :param from_chat_id: which chat message from :param message_id: message id :return: API reply. """ return types.Message.de_json( apihelper.forward_message(self.token, chat_id, from_chat_id, message_id, disable_notification))
0.007737
def _rgb_triangle(ax, r_label, g_label, b_label, loc): """ Draw an RGB triangle legend on the desired axis """ if not loc in range(1, 11): loc = 2 from mpl_toolkits.axes_grid1.inset_locator import inset_axes inset_ax = inset_axes(ax, width=1, height=1, loc=loc) mesh = 35 x = [] y = [] color = [] for r in range(0, mesh): for g in range(0, mesh): for b in range(0, mesh): if not (r == 0 and b == 0 and g == 0): r1 = r / (r + g + b) g1 = g / (r + g + b) b1 = b / (r + g + b) x.append(0.33 * (2. * g1 + r1) / (r1 + b1 + g1)) y.append(0.33 * np.sqrt(3) * r1 / (r1 + b1 + g1)) rc = math.sqrt(r ** 2 / (r ** 2 + g ** 2 + b ** 2)) gc = math.sqrt(g ** 2 / (r ** 2 + g ** 2 + b ** 2)) bc = math.sqrt(b ** 2 / (r ** 2 + g ** 2 + b ** 2)) color.append([rc, gc, bc]) # x = [n + 0.25 for n in x] # nudge x coordinates # y = [n + (max_y - 1) for n in y] # shift y coordinates to top # plot the triangle inset_ax.scatter(x, y, s=7, marker='.', edgecolor=color) inset_ax.set_xlim([-0.35, 1.00]) inset_ax.set_ylim([-0.35, 1.00]) # add the labels inset_ax.text(0.70, -0.2, g_label, fontsize=13, family='Times New Roman', color=(0, 0, 0), horizontalalignment='left') inset_ax.text(0.325, 0.70, r_label, fontsize=13, family='Times New Roman', color=(0, 0, 0), horizontalalignment='center') inset_ax.text(-0.05, -0.2, b_label, fontsize=13, family='Times New Roman', color=(0, 0, 0), horizontalalignment='right') inset_ax.get_xaxis().set_visible(False) inset_ax.get_yaxis().set_visible(False)
0.00145
def update_translations(request): """ Update translations: delete orphan translations and creates empty translations for new objects in database. """ FieldTranslation.delete_orphan_translations() num_translations = FieldTranslation.update_translations() return render_to_response('modeltranslation/admin/update_translations_ok.html',{"num_translations":num_translations}, RequestContext(request))
0.027363
def namedb_get_name_preorder( db, preorder_hash, current_block ): """ Get a (singular) name preorder record outstanding at the given block, given the preorder hash. NOTE: returns expired preorders. Return the preorder record on success. Return None if not found. """ select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND op = ? AND block_number < ?;" args = (preorder_hash, NAME_PREORDER, current_block + NAME_PREORDER_EXPIRE) cur = db.cursor() preorder_rows = namedb_query_execute( cur, select_query, args ) preorder_row = preorder_rows.fetchone() if preorder_row is None: # no such preorder return None preorder_rec = {} preorder_rec.update( preorder_row ) unexpired_query, unexpired_args = namedb_select_where_unexpired_names( current_block ) # make sure that the name doesn't already exist select_query = "SELECT name_records.preorder_hash " + \ "FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE name_records.preorder_hash = ? AND " + \ unexpired_query + ";" args = (preorder_hash,) + unexpired_args cur = db.cursor() nm_rows = namedb_query_execute( cur, select_query, args ) nm_row = nm_rows.fetchone() if nm_row is not None: # name with this preorder exists return None return preorder_rec
0.015636
def set_bucket_props(self, bucket, props): """ Set the properties on the bucket object given """ bucket_type = self._get_bucket_type(bucket.bucket_type) url = self.bucket_properties_path(bucket.name, bucket_type=bucket_type) headers = {'Content-Type': 'application/json'} content = json.dumps({'props': props}) # Run the request... status, _, body = self._request('PUT', url, headers, content) if status == 401: raise SecurityError('Not authorized to set bucket properties.') elif status != 204: raise RiakError('Error setting bucket properties.') return True
0.002766
def _accumulateFrequencyCounts(values, freqCounts=None): """ Accumulate a list of values 'values' into the frequency counts 'freqCounts', and return the updated frequency counts For example, if values contained the following: [1,1,3,5,1,3,5], and the initial freqCounts was None, then the return value would be: [0,3,0,2,0,2] which corresponds to how many of each value we saw in the input, i.e. there were 0 0's, 3 1's, 0 2's, 2 3's, 0 4's, and 2 5's. If freqCounts is not None, the values will be added to the existing counts and the length of the frequency Counts will be automatically extended as necessary Parameters: ----------------------------------------------- values: The values to accumulate into the frequency counts freqCounts: Accumulated frequency counts so far, or none """ # How big does our freqCounts vector need to be? values = numpy.array(values) numEntries = values.max() + 1 if freqCounts is not None: numEntries = max(numEntries, freqCounts.size) # Where do we accumulate the results? if freqCounts is not None: if freqCounts.size != numEntries: newCounts = numpy.zeros(numEntries, dtype='int32') newCounts[0:freqCounts.size] = freqCounts else: newCounts = freqCounts else: newCounts = numpy.zeros(numEntries, dtype='int32') # Accumulate the new values for v in values: newCounts[v] += 1 return newCounts
0.012535
def convert(args): """ Download VM disks, and OVF from vCenter :param args: :return: created vmdk """ exporter = OvfExporter(user=args.vcenter_user,password=args.vcenter_pass, host=args.vcenter_host, port=args.vcenter_port, vm_name=args.vm_name, dir=args.directory) return exporter.export_ovf()
0.008596
def minutes_for_session(self, session_label): """ Given a session label, return the minutes for that session. Parameters ---------- session_label: pd.Timestamp (midnight UTC) A session label whose session's minutes are desired. Returns ------- pd.DateTimeIndex All the minutes for the given session. """ return self.minutes_in_range( start_minute=self.schedule.at[session_label, 'market_open'], end_minute=self.schedule.at[session_label, 'market_close'], )
0.003367
def exists(self, vars_list: List[str]) -> 'TensorFluent': '''Returns the TensorFluent for the exists aggregation function. Args: vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the exists aggregation function. ''' return self._aggregation_op(tf.reduce_any, self, vars_list)
0.005263
def populate(self, priority, address, rtr, data): """ data bytes (high + low) 1 + 2 = current temp 3 + 4 = min temp 5 + 6 = max temp :return: None """ assert isinstance(data, bytes) self.needs_no_rtr(rtr) self.needs_data(data, 6) self.set_attributes(priority, address, rtr) self.cur = (((data[0] << 8)| data[1]) / 32 ) * 0.0625 self.min = (((data[2] << 8) | data[3]) / 32 ) * 0.0625 self.max = (((data[4] << 8) | data[5]) / 32 ) * 0.0625
0.010582
def __gen_token_anno_file(self, top_level_layer): """ creates an etree representation of a <multiFeat> file that describes all the annotations that only span one token (e.g. POS, lemma etc.). Note: discoursegraphs will create one token annotation file for each top level layer (e.g. conano, tiger etc.). """ base_paula_id = '{0}.{1}.tok'.format(self.corpus_name, self.name) paula_id = '{0}.{1}.{2}.tok_multiFeat'.format(top_level_layer, self.corpus_name, self.name) E, tree = gen_paula_etree(paula_id) mflist = E('multiFeatList', {XMLBASE: base_paula_id+'.xml'}) for token_id in self.dg.tokens: mfeat = E('multiFeat', {XLINKHREF: '#{0}'.format(token_id)}) token_dict = self.dg.node[token_id] for feature in token_dict: # TODO: highly inefficient! refactor!1!! if feature not in IGNORED_TOKEN_ATTRIBS \ and feature.startswith(top_level_layer): mfeat.append(E('feat', {'name': feature, 'value': token_dict[feature]})) if self.human_readable: # adds token string as a <!-- comment --> mfeat.append(Comment(token_dict[self.dg.ns+':token'])) mflist.append(mfeat) tree.append(mflist) self.files[paula_id] = tree self.file2dtd[paula_id] = PaulaDTDs.multifeat return paula_id
0.00242
def get_updates(self, *args, **kwargs): """See :func:`get_updates`""" return get_updates(*args, **self._merge_overrides(**kwargs)).run()
0.013158
def modules(self): """ :return: dictionary {index: object} of all modules. """ if not self.get_objects_by_type('module'): self.inventory() return {int(c.index): c for c in self.get_objects_by_type('module')}
0.007692
def cooccurrences(self, domain): '''Get the cooccurrences of the given domain. For details, see https://investigate.umbrella.com/docs/api#co-occurrences ''' uri = self._uris["cooccurrences"].format(domain) return self.get_parse(uri)
0.010989
def fulfill(self, method, *args, **kwargs): """ Fulfill an HTTP request to Keen's API. """ return getattr(self.session, method)(*args, **kwargs)
0.012346
def log(self, _from=None, to=None): """Run git-log.""" command = ["git", "log"] if _from: to = "HEAD" if not to else to revision_range = f"{_from}..{to}" command.append(revision_range) git_log_text = _run_command(command) commit_text_lst = _extract_commit_texts(git_log_text) return [Commit(commit_text) for commit_text in commit_text_lst]
0.004684
def toposort_flatten(data, sort=True): """Returns a single list of dependencies. For any set returned by toposort(), those items are sorted and appended to the result (just to make the results deterministic).""" result = [] for d in toposort(data): try: result.extend((sorted if sort else list)(d)) except TypeError as e: result.extend(list(d)) return result
0.00241
def get_instance(self, payload): """ Build an instance of DefaultsInstance :param dict payload: Payload response from the API :returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsInstance :rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsInstance """ return DefaultsInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
0.007075
def genericCameraMatrix(shape, angularField=60): ''' Return a generic camera matrix [[fx, 0, cx], [ 0, fy, cy], [ 0, 0, 1]] for a given image shape ''' # http://nghiaho.com/?page_id=576 # assume that the optical centre is in the middle: cy = int(shape[0] / 2) cx = int(shape[1] / 2) # assume that the FOV is 60 DEG (webcam) fx = fy = cx / np.tan(angularField / 2 * np.pi / 180) # camera focal length # see # http://docs.opencv.org/doc/tutorials/calib3d/camera_calibration/camera_calibration.html return np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1] ], dtype=np.float32)
0.001339
def set_hosts_file_entry_for_role(self, role_name, network_name='user-net', fqdn=None, domain_name=None): """Adds an entry to the hosts file for a scenario host given the role name and network name :param role_name: (str) role name of the host to add :param network_name: (str) Name of the network to add to the hosts file :param fqdn: (str) Fully qualified domain name to use in the hosts file entry (trumps domain name) :param domain_name: (str) Domain name to include in the hosts file entries if provided :return: """ log = logging.getLogger(self.cls_logger + '.set_hosts_file_entry_for_role') # Determine the host file entry portion if fqdn: host_file_entry = fqdn else: if domain_name: host_file_entry = '{r}.{d} {r}'.format(r=role_name, d=domain_name) else: host_file_entry = role_name log.info('Using hosts file entry: {e}'.format(e=host_file_entry)) log.info('Scanning scenario hosts for role name [{r}] and network: {n}'.format(r=role_name, n=network_name)) for scenario_host in self.scenario_network_info: if scenario_host['scenario_role_name'] == role_name: for host_network_info in scenario_host['network_info']: if host_network_info['network_name'] == network_name: self.update_hosts_file(ip=host_network_info['internal_ip'], entry=host_file_entry)
0.005906
def user_cache_dir(self): """Return ``user_cache_dir``.""" directory = appdirs.user_cache_dir(self.appname, self.appauthor, version=self.version) if self.create: self._ensure_directory_exists(directory) return directory
0.010239
def ln(label): """Draw a 70-char-wide divider, with label in the middle. >>> ln('hello there') '---------------------------- hello there -----------------------------' """ label_len = len(label) + 2 chunk = (70 - label_len) // 2 out = '%s %s %s' % ('-' * chunk, label, '-' * chunk) pad = 70 - len(out) if pad > 0: out = out + ('-' * pad) return out
0.002519
def parse_request(cls, request_string): """JSONRPC allows for **batch** requests to be communicated as array of dicts. This method parses out each individual element in the batch and returns a list of tuples, each tuple a result of parsing of each item in the batch. :Returns: | tuple of (results, is_batch_mode_flag) | where: | - results is a tuple describing the request | - Is_batch_mode_flag is a Bool indicating if the | request came in in batch mode (as array of requests) or not. :Raises: RPCParseError, RPCInvalidRequest """ try: batch = cls.json_loads(request_string) except ValueError as err: raise errors.RPCParseError("No valid JSON. (%s)" % str(err)) if isinstance(batch, (list, tuple)) and batch: # batch is true batch. # list of parsed request objects, is_batch_mode_flag return [cls._parse_single_request_trap_errors(request) for request in batch], True elif isinstance(batch, dict): # `batch` is actually single request object return [cls._parse_single_request_trap_errors(batch)], False raise errors.RPCInvalidRequest("Neither a batch array nor a single request object found in the request.")
0.003621
def pluralize(word, pos=NOUN, custom=None, classical=True): """ Returns the plural of a given word, e.g., child => children. Handles nouns and adjectives, using classical inflection by default (i.e., where "matrix" pluralizes to "matrices" and not "matrixes"). The custom dictionary is for user-defined replacements. """ if custom and word in custom: return custom[word] # Recurse genitives. # Remove the apostrophe and any trailing -s, # form the plural of the resultant noun, and then append an apostrophe (dog's => dogs'). if word.endswith(("'", "'s")): w = word.rstrip("'s") w = pluralize(w, pos, custom, classical) if w.endswith("s"): return w + "'" else: return w + "'s" # Recurse compound words # (e.g., Postmasters General, mothers-in-law, Roman deities). w = word.replace("-", " ").split(" ") if len(w) > 1: if w[1] == "general" or \ w[1] == "General" and \ w[0] not in plural_categories["general-generals"]: return word.replace(w[0], pluralize(w[0], pos, custom, classical)) elif w[1] in plural_prepositions: return word.replace(w[0], pluralize(w[0], pos, custom, classical)) else: return word.replace(w[-1], pluralize(w[-1], pos, custom, classical)) # Only a very few number of adjectives inflect. n = range(len(plural_rules)) if pos.startswith(ADJECTIVE): n = [0, 1] # Apply pluralization rules. for i in n: for suffix, inflection, category, classic in plural_rules[i]: # A general rule, or a classic rule in classical mode. if category is None: if not classic or (classic and classical): if suffix.search(word) is not None: return suffix.sub(inflection, word) # A rule pertaining to a specific category of words. if category is not None: if word in plural_categories[category] and (not classic or (classic and classical)): if suffix.search(word) is not None: return suffix.sub(inflection, word) return word
0.001785
def _make_agent(self, entrez_id, text_id): """Make an Agent object, appropriately grounded. Parameters ---------- entrez_id : str Entrez id number text_id : str A plain text systematic name, or None if not listed. Returns ------- agent : indra.statements.Agent A grounded agent object. """ hgnc_name, db_refs = self._make_db_refs(entrez_id, text_id) if hgnc_name is not None: name = hgnc_name elif text_id is not None: name = text_id # Handle case where the name is None else: return None return Agent(name, db_refs=db_refs)
0.002782
def movMF( X, n_clusters, posterior_type="soft", force_weights=None, n_init=10, n_jobs=1, max_iter=300, verbose=False, init="random-class", random_state=None, tol=1e-6, copy_x=True, ): """Wrapper for parallelization of _movMF and running n_init times. """ if n_init <= 0: raise ValueError( "Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init ) random_state = check_random_state(random_state) if max_iter <= 0: raise ValueError( "Number of iterations should be a positive number," " got %d instead" % max_iter ) best_inertia = np.infty X = as_float_array(X, copy=copy_x) tol = _tolerance(X, tol) if hasattr(init, "__array__"): init = check_array(init, dtype=X.dtype.type, copy=True) _validate_center_shape(X, n_clusters, init) if n_init != 1: warnings.warn( "Explicit initial center position passed: " "performing only one init in k-means instead of n_init=%d" % n_init, RuntimeWarning, stacklevel=2, ) n_init = 1 # defaults best_centers = None best_labels = None best_weights = None best_concentrations = None best_posterior = None best_inertia = None if n_jobs == 1: # For a single thread, less memory is needed if we just store one set # of the best results (as opposed to one set per run per thread). for it in range(n_init): # cluster on the sphere (centers, weights, concentrations, posterior, labels, inertia) = _movMF( X, n_clusters, posterior_type=posterior_type, force_weights=force_weights, max_iter=max_iter, verbose=verbose, init=init, random_state=random_state, tol=tol, ) # determine if these results are the best so far if best_inertia is None or inertia < best_inertia: best_centers = centers.copy() best_labels = labels.copy() best_weights = weights.copy() best_concentrations = concentrations.copy() best_posterior = posterior.copy() best_inertia = inertia else: # parallelisation of movMF runs seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(_movMF)( X, n_clusters, posterior_type=posterior_type, force_weights=force_weights, max_iter=max_iter, verbose=verbose, init=init, random_state=random_state, tol=tol, ) for seed in seeds ) # Get results with the lowest inertia centers, weights, concentrations, posteriors, labels, inertia = zip(*results) best = np.argmin(inertia) best_labels = labels[best] best_inertia = inertia[best] best_centers = centers[best] best_concentrations = concentrations[best] best_posterior = posteriors[best] best_weights = weights[best] return ( best_centers, best_labels, best_inertia, best_weights, best_concentrations, best_posterior, )
0.001115
def mousePressEvent( self, event ): """ Changes the current date to the clicked on date. :param event | <QMousePressEvent> """ XPopupWidget.hideToolTip() # update the current date self.setCurrentDate(self.dateAt(event.scenePos())) super(XCalendarScene, self).mousePressEvent(event)
0.018041
def analyze_string_content(self, string, line_num, filename): """Searches string for custom pattern, and captures all high entropy strings that match self.regex, with a limit defined as self.entropy_limit. """ output = {} for result in self.secret_generator(string): if self._is_sequential_string(result): continue secret = PotentialSecret(self.secret_type, filename, result, line_num) output[secret] = secret return output
0.007619
def check_error(self, response, status, err_cd): " Check an error in the response." if 'status' not in response: return False if response['status'] != status: return False if 'msgs' not in response: return False if not isinstance(response['msgs'], list): return False for msg in response['msgs']: if 'LVL' in msg and msg['LVL'] != 'ERROR': continue if 'ERR_CD' in msg and msg['ERR_CD'] == err_cd: return True return False
0.003396
def convert_type(self, type): """Convert type to BigQuery """ # Mapping mapping = { 'any': 'STRING', 'array': None, 'boolean': 'BOOLEAN', 'date': 'DATE', 'datetime': 'DATETIME', 'duration': None, 'geojson': None, 'geopoint': None, 'integer': 'INTEGER', 'number': 'FLOAT', 'object': None, 'string': 'STRING', 'time': 'TIME', 'year': 'INTEGER', 'yearmonth': None, } # Not supported type if type not in mapping: message = 'Type %s is not supported' % type raise tableschema.exceptions.StorageError(message) return mapping[type]
0.002516
def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcode, body, decompressor, result_metadata): """ Decodes a native protocol message body :param protocol_version: version to use decoding contents :param user_type_map: map[keyspace name] = map[type name] = custom type to instantiate when deserializing this type :param stream_id: native protocol stream id from the frame header :param flags: native protocol flags bitmap from the header :param opcode: native protocol opcode from the header :param body: frame body :param decompressor: optional decompression function to inflate the body :return: a message decoded from the body and frame attributes """ if flags & COMPRESSED_FLAG: if decompressor is None: raise RuntimeError("No de-compressor available for compressed frame!") body = decompressor(body) flags ^= COMPRESSED_FLAG body = io.BytesIO(body) if flags & TRACING_FLAG: trace_id = UUID(bytes=body.read(16)) flags ^= TRACING_FLAG else: trace_id = None if flags & WARNING_FLAG: warnings = read_stringlist(body) flags ^= WARNING_FLAG else: warnings = None if flags & CUSTOM_PAYLOAD_FLAG: custom_payload = read_bytesmap(body) flags ^= CUSTOM_PAYLOAD_FLAG else: custom_payload = None flags &= USE_BETA_MASK # will only be set if we asserted it in connection estabishment if flags: log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) msg_class = cls.message_types_by_opcode[opcode] msg = msg_class.recv_body(body, protocol_version, user_type_map, result_metadata) msg.stream_id = stream_id msg.trace_id = trace_id msg.custom_payload = custom_payload msg.warnings = warnings if msg.warnings: for w in msg.warnings: log.warning("Server warning: %s", w) return msg
0.005053
def supprime(cls,table, **kwargs): """ Remove entries matchin given condition kwargs is a dict of column name : value , with length ONE. """ assert len(kwargs) == 1 field, value = kwargs.popitem() req = f"""DELETE FROM {table} WHERE {field} = """ + cls.mark_style args = (value,) return MonoExecutant((req, args))
0.007937
def ensure_yx_order(func): """Wrap a function to ensure all array arguments are y, x ordered, based on kwarg.""" @functools.wraps(func) def wrapper(*args, **kwargs): # Check what order we're given dim_order = kwargs.pop('dim_order', None) x_first = _is_x_first_dim(dim_order) # If x is the first dimension, flip (transpose) every array within the function args. if x_first: args = tuple(_check_and_flip(arr) for arr in args) for k, v in kwargs: kwargs[k] = _check_and_flip(v) ret = func(*args, **kwargs) # If we flipped on the way in, need to flip on the way out so that output array(s) # match the dimension order of the original input. if x_first: return _check_and_flip(ret) else: return ret # Inject a docstring for the dim_order argument into the function's docstring. dim_order_doc = """ dim_order : str or ``None``, optional The ordering of dimensions in passed in arrays. Can be one of ``None``, ``'xy'``, or ``'yx'``. ``'xy'`` indicates that the dimension corresponding to x is the leading dimension, followed by y. ``'yx'`` indicates that x is the last dimension, preceded by y. ``None`` indicates that the default ordering should be assumed, which is 'yx'. Can only be passed as a keyword argument, i.e. func(..., dim_order='xy').""" # Find the first blank line after the start of the parameters section params = wrapper.__doc__.find('Parameters') blank = wrapper.__doc__.find('\n\n', params) wrapper.__doc__ = wrapper.__doc__[:blank] + dim_order_doc + wrapper.__doc__[blank:] return wrapper
0.005161
def load(self, fileobj): '''Load the dict from the file object''' # try formats from most restrictive to least restrictive for loader in (pickle.load, json.load, csv.reader): fileobj.seek(0) try: return self.initial_update(loader(fileobj)) except Exception as e: pass raise ValueError('File not in a supported format')
0.004785
def check(text): """Suggest the preferred forms. source: Garner's Modern American Usage source_url: http://bit.ly/1T4alrY """ err = "terms.denizen_labels.garner" msg = "'{}' is the preferred denizen label." preferences = [ ["Afrikaner", ["Afrikaaner"]], ["Afrikaner", ["Afrikander"]], ["Alabamian", ["Alabaman"]], ["Albuquerquean", ["Albuquerquian"]], ["Anchorageite", ["Anchoragite"]], ["Angeleno", ["Los Angelean"]], ["Arizonan", ["Arizonian"]], ["Arkansan", ["Arkansawyer"]], ["Belarusian", ["Belarusan"]], ["Caymanian", ["Cayman Islander"]], ["Coloradan", ["Coloradoan"]], ["Fairbanksan", ["Fairbanksian"]], ["Fort Worthian", ["Fort Worther"]], ["Grenadan", ["Grenadian"]], ["Hong Konger", ["Hong Kongite", "Hong Kongian"]], ["Hoosier", ["Indianan", "Indianian"]], ["Illinoisan", ["Illinoisian"]], ["Iowan", ["Iowegian"]], ["Louisianian", ["Louisianan"]], ["Michigander", ["Michiganite", "Michiganian"]], ["Missourian", ["Missouran"]], ["Monegasque", ["Monacan"]], ["Neapolitan", ["Neopolitan"]], ["New Hampshirite", ["New Hampshireite", "New Hampshireman"]], ["New Jerseyan", ["New Jerseyite"]], ["New Orleanian", ["New Orleansian"]], ["Nutmegger", ["Connecticuter"]], ["Oklahoma Cityan", ["Oklahoma Citian"]], ["Oklahoman", ["Oklahomian"]], ["Seattleite", ["Seattlite"]], ["Surinamese", ["Surinamer"]], ["Tallahasseean", ["Tallahassean"]], ["Tennessean", ["Tennesseean"]], ["Tusconan", ["Tusconian", "Tusconite"]], ["Utahn", ["Utahan"]], ["Saudi", ["Saudi Arabian"]], ] return preferred_forms_check(text, preferences, err, msg)
0.000471
def write_numeric_array(fd, header, array): """Write the numeric array""" # make a memory file for writing array data bd = BytesIO() # write matrix header to memory file write_var_header(bd, header) if not isinstance(array, basestring) and header['dims'][0] > 1: # list array data in column major order array = list(chain.from_iterable(izip(*array))) # write matrix data to memory file write_elements(bd, header['mtp'], array) # write the variable to disk file data = bd.getvalue() bd.close() write_var_data(fd, data)
0.001709
def VFSMultiOpen(pathspecs, progress_callback=None): """Opens multiple files specified by given path-specs. See documentation for `VFSOpen` for more information. Args: pathspecs: A list of pathspec instances of files to open. progress_callback: A callback function to call to notify about progress Returns: A context manager yielding file-like objects. """ precondition.AssertIterableType(pathspecs, rdf_paths.PathSpec) vfs_open = functools.partial(VFSOpen, progress_callback=progress_callback) return context.MultiContext(map(vfs_open, pathspecs))
0.008621
def scored_to_phenotype(self,phenotypes): """ Convert binary pehnotypes to mutually exclusive phenotypes. If none of the phenotypes are set, then phenotype_label becomes nan If any of the phenotypes are multiply set then it throws a fatal error. Args: phenotypes (list): a list of scored_names to convert to phenotypes Returns: CellDataFrame """ def _apply_score(scored_calls,phenotypes): present = sorted(list(set(phenotypes)&set(scored_calls.keys()))) total = sum([scored_calls[x] for x in present]) if total > 1: raise ValueError("You cant extract phenotypes from scores if they are not mutually exclusive") if total == 0: return np.nan for label in present: if scored_calls[label] == 1: return label raise ValueError("Should have hit an exit criteria already") output = self.copy() output['phenotype_label'] = output.apply(lambda x: _apply_score(x['scored_calls'],phenotypes),1) # now update the phenotypes with these output['phenotype_calls'] = output.apply(lambda x: dict([(y,1 if x['phenotype_label']==y else 0) for y in phenotypes]) ,1) return output
0.015209
def _add_dependency(self, key, dependlist, i, isSubroutine, anexec): """Determines whether the item in the dependency list is a valid function call by excluding local variables and members.""" #First determine if the reference is to a derived type variable lkey = key.lower() if "%" in key: #Find the type of the base variable and then perform a tree #search at the module level to determine if the final reference #is a valid executable base = key.split("%")[0] ftype = None if base in anexec.members and anexec.members[base].is_custom: ftype = anexec.members[base].kind elif base in anexec.parameters and anexec.parameters[base].is_custom: ftype = anexec.parameters[base].kind if ftype is not None: end = anexec.module.type_search(ftype, key) if end is not None and isinstance(end, TypeExecutable): #We have to overwrite the key to include the actual name of the type #that is being referenced instead of the local name of its variable. tname = "{}%{}".format(ftype, '%'.join(key.split('%')[1:])) d = Dependency(tname, dependlist[i + 1], isSubroutine, anexec) anexec.add_dependency(d) elif lkey not in ["for", "forall", "do"]: #This is a straight forward function/subroutine call, make sure that #the symbol is not a local variable or parameter, then add it if not lkey in anexec.members and not lkey in anexec.parameters \ and not lkey in self._intrinsic: #One issue with the logic until now is that some one-line statements #like "if (cond) call subroutine" don't trigger the subroutine flag #of the dependency. if dependlist[i-1] == "call": isSubroutine = True d = Dependency(dependlist[i], dependlist[i + 1], isSubroutine, anexec) anexec.add_dependency(d)
0.0115
def on_remove_row(self, event, row_num=-1): """ Remove specified grid row. If no row number is given, remove the last row. """ if row_num == -1: default = (255, 255, 255, 255) # unhighlight any selected rows: for row in self.selected_rows: attr = wx.grid.GridCellAttr() attr.SetBackgroundColour(default) self.grid.SetRowAttr(row, attr) row_num = self.grid.GetNumberRows() - 1 self.deleteRowButton.Disable() self.selected_rows = {row_num} function_mapping = {'specimen': self.er_magic.delete_specimen, 'sample': self.er_magic.delete_sample, 'site': self.er_magic.delete_site, 'location': self.er_magic.delete_location, 'result': self.er_magic.delete_result} names = [self.grid.GetCellValue(row, 0) for row in self.selected_rows] orphans = [] for name in names: if name: try: row = self.grid.row_labels.index(name) function_mapping[self.grid_type](name) orphans.extend([name]) # if user entered a name, then deletes the row before saving, # there will be a ValueError except ValueError: pass self.grid.remove_row(row) self.selected_rows = set() self.deleteRowButton.Disable() self.grid.Refresh() self.main_sizer.Fit(self)
0.001231
async def get_historic_data(self, n_data): """Get historic data.""" query = gql( """ { viewer { home(id: "%s") { consumption(resolution: HOURLY, last: %s) { nodes { from totalCost consumption } } } } } """ % (self.home_id, n_data) ) data = await self._tibber_control.execute(query) if not data: _LOGGER.error("Could not find current the data.") return data = data["viewer"]["home"]["consumption"] if data is None: self._data = [] return self._data = data["nodes"]
0.002255
def build_url(api_key, spot_id, fields=None, unit=None, start=None, end=None): """ This function builds the request url API details https://magicseaweed.com/developer/forecast-api key: Magic Seaweed API key spot_id: The ID of a location, available from the URL when visiting the corresponding spot on the Magic Seaweed website. IE '616' in http://magicseaweed.com/Pipeline-Backdoor-Surf-Report/616/ fields: Comma separated list of fields to include in the request URL. Defaults to none, which returns all information. Specifying fields may reduce response time. Example: fields=timestamp,wind.*,condition.temperature units: A string of the preferred unit of measurement. Defaults to unit at location of spot_id. eu, uk, us are available start: Local timestamp for the start of a desired forecast range end: Local timestamp for the end of the desired forecast range """ params = {'spot_id': spot_id} if fields: _validate_field_types(fields) params['fields'] = ','.join(fields) if unit: _validate_unit_types(unit) params['units'] = unit if start and end: params['start'] = start params['end'] = end baseURL = requests.Request(HTTP_GET, MSW_URL.format(api_key), params=params).prepare().url return baseURL
0.004614
def _joint_sample_n(self, n, seed=None): """Draw a joint sample from the prior over latents and observations.""" with tf.name_scope("sample_n_joint"): stream = seed_stream.SeedStream( seed, salt="LinearGaussianStateSpaceModel_sample_n_joint") sample_and_batch_shape = distribution_util.prefer_static_value( tf.concat([[n], self.batch_shape_tensor()], axis=0)) # Sample the initial timestep from the prior. Since we want # this sample to have full batch shape (not just the batch shape # of the self.initial_state_prior object which might in general be # smaller), we augment the sample shape to include whatever # extra batch dimensions are required. with tf.control_dependencies(self.runtime_assertions): initial_latent = self.initial_state_prior.sample( sample_shape=_augment_sample_shape( self.initial_state_prior, sample_and_batch_shape, self.validate_args), seed=stream()) # Add a dummy dimension so that matmul() does matrix-vector # multiplication. initial_latent = initial_latent[..., tf.newaxis] initial_observation_matrix = ( self.get_observation_matrix_for_timestep(self.initial_step)) initial_observation_noise = ( self.get_observation_noise_for_timestep(self.initial_step)) initial_observation_pred = initial_observation_matrix.matmul( initial_latent) initial_observation = (initial_observation_pred + initial_observation_noise.sample( sample_shape=_augment_sample_shape( initial_observation_noise, sample_and_batch_shape, self.validate_args), seed=stream())[..., tf.newaxis]) sample_step = build_kalman_sample_step( self.get_transition_matrix_for_timestep, self.get_transition_noise_for_timestep, self.get_observation_matrix_for_timestep, self.get_observation_noise_for_timestep, full_sample_and_batch_shape=sample_and_batch_shape, stream=stream, validate_args=self.validate_args) # Scan over all timesteps to sample latents and observations. (latents, observations) = tf.scan( sample_step, elems=tf.range(self.initial_step+1, self.final_step), initializer=(initial_latent, initial_observation)) # Combine the initial sampled timestep with the remaining timesteps. latents = tf.concat([initial_latent[tf.newaxis, ...], latents], axis=0) observations = tf.concat([initial_observation[tf.newaxis, ...], observations], axis=0) # Put dimensions back in order. The samples we've computed are # ordered by timestep, with shape `[num_timesteps, num_samples, # batch_shape, size, 1]` where `size` represents `latent_size` # or `observation_size` respectively. But timesteps are really # part of each probabilistic event, so we need to return a Tensor # of shape `[num_samples, batch_shape, num_timesteps, size]`. latents = tf.squeeze(latents, -1) latents = distribution_util.move_dimension(latents, 0, -2) observations = tf.squeeze(observations, -1) observations = distribution_util.move_dimension(observations, 0, -2) return latents, observations
0.008092
def probabilities(items, params): """Compute the comparison outcome probabilities given a subset of items. This function computes, for each item in ``items``, the probability that it would win (i.e., be chosen) in a comparison involving the items, given model parameters. Parameters ---------- items : list Subset of items to compare. params : array_like Model parameters. Returns ------- probs : numpy.ndarray A probability distribution over ``items``. """ params = np.asarray(params) return softmax(params.take(items))
0.001658
def _check_delete_fw(self, tenant_id, drvr_name): """Deletes the Firewall, if all conditioms are met. This function after modifying the DB with delete operation status, calls the routine to remove the fabric cfg from DB and unconfigure the device. """ fw_dict = self.fwid_attr[tenant_id].get_fw_dict() ret = False try: with self.fwid_attr[tenant_id].mutex_lock: self.update_fw_db_final_result(fw_dict.get('fw_id'), ( fw_constants.RESULT_FW_DELETE_INIT)) ret = self._delete_fw_fab_dev(tenant_id, drvr_name, fw_dict) except Exception as exc: LOG.error("Exception raised in delete fw %s", str(exc)) return ret
0.002621
def add_record(self, record): """Add a record to the OAISet. :param record: Record to be added. :type record: `invenio_records.api.Record` or derivative. """ record.setdefault('_oai', {}).setdefault('sets', []) assert not self.has_record(record) record['_oai']['sets'].append(self.spec)
0.005797
def _get_port_range(range_str): """ Given a string with a port or port range: '80', '80-120' Returns tuple with range start and end ports: (80, 80), (80, 120) """ if range_str == '*': return PortsRangeHelper.PortsRange(start=0, end=65535) s = range_str.split('-') if len(s) == 2: return PortsRangeHelper.PortsRange(start=int(s[0]), end=int(s[1])) return PortsRangeHelper.PortsRange(start=int(s[0]), end=int(s[0]))
0.004008
def write_chapter(self): """ Create a chapter that contains a random number of paragraphs """ self.paragraphs = [] self.paragraphs.append('\n') for x in range(randint(0, 50)): p = Paragraph(self.model) self.paragraphs.append(p.get_paragraph()) self.paragraphs.append('\n') return self.paragraphs
0.004963
def run_algorithm(start, end, initialize, capital_base, handle_data=None, before_trading_start=None, analyze=None, data_frequency='daily', bundle='quantopian-quandl', bundle_timestamp=None, trading_calendar=None, metrics_set='default', benchmark_returns=None, default_extension=True, extensions=(), strict_extensions=True, environ=os.environ, blotter='default'): """ Run a trading algorithm. Parameters ---------- start : datetime The start date of the backtest. end : datetime The end date of the backtest.. initialize : callable[context -> None] The initialize function to use for the algorithm. This is called once at the very begining of the backtest and should be used to set up any state needed by the algorithm. capital_base : float The starting capital for the backtest. handle_data : callable[(context, BarData) -> None], optional The handle_data function to use for the algorithm. This is called every minute when ``data_frequency == 'minute'`` or every day when ``data_frequency == 'daily'``. before_trading_start : callable[(context, BarData) -> None], optional The before_trading_start function for the algorithm. This is called once before each trading day (after initialize on the first day). analyze : callable[(context, pd.DataFrame) -> None], optional The analyze function to use for the algorithm. This function is called once at the end of the backtest and is passed the context and the performance data. data_frequency : {'daily', 'minute'}, optional The data frequency to run the algorithm at. bundle : str, optional The name of the data bundle to use to load the data to run the backtest with. This defaults to 'quantopian-quandl'. bundle_timestamp : datetime, optional The datetime to lookup the bundle data for. This defaults to the current time. trading_calendar : TradingCalendar, optional The trading calendar to use for your backtest. metrics_set : iterable[Metric] or str, optional The set of metrics to compute in the simulation. If a string is passed, resolve the set with :func:`zipline.finance.metrics.load`. default_extension : bool, optional Should the default zipline extension be loaded. This is found at ``$ZIPLINE_ROOT/extension.py`` extensions : iterable[str], optional The names of any other extensions to load. Each element may either be a dotted module path like ``a.b.c`` or a path to a python file ending in ``.py`` like ``a/b/c.py``. strict_extensions : bool, optional Should the run fail if any extensions fail to load. If this is false, a warning will be raised instead. environ : mapping[str -> str], optional The os environment to use. Many extensions use this to get parameters. This defaults to ``os.environ``. blotter : str or zipline.finance.blotter.Blotter, optional Blotter to use with this algorithm. If passed as a string, we look for a blotter construction function registered with ``zipline.extensions.register`` and call it with no parameters. Default is a :class:`zipline.finance.blotter.SimulationBlotter` that never cancels orders. Returns ------- perf : pd.DataFrame The daily performance of the algorithm. See Also -------- zipline.data.bundles.bundles : The available data bundles. """ load_extensions(default_extension, extensions, strict_extensions, environ) return _run( handle_data=handle_data, initialize=initialize, before_trading_start=before_trading_start, analyze=analyze, algofile=None, algotext=None, defines=(), data_frequency=data_frequency, capital_base=capital_base, bundle=bundle, bundle_timestamp=bundle_timestamp, start=start, end=end, output=os.devnull, trading_calendar=trading_calendar, print_algo=False, metrics_set=metrics_set, local_namespace=False, environ=environ, blotter=blotter, benchmark_returns=benchmark_returns, )
0.000217
def _get_targets_by_declared_platform_with_placeholders(self, targets_by_platform): """ Aggregates a dict that maps a platform string to a list of targets that specify the platform. If no targets have platforms arguments, return a dict containing platforms inherited from the PythonSetup object. :param tgts: a list of :class:`Target` objects. :returns: a dict mapping a platform string to a list of targets that specify the platform. """ if not targets_by_platform: for platform in self._python_setup.platforms: targets_by_platform[platform] = ['(No target) Platform inherited from either the ' '--platforms option or a pants.ini file.'] return targets_by_platform
0.011858
def Get(self, name, default=utils.NotAValue, context=None): """Get the value contained by the named parameter. This method applies interpolation/escaping of the named parameter and retrieves the interpolated value. Args: name: The name of the parameter to retrieve. This should be in the format of "Section.name" default: If retrieving the value results in an error, return this default. context: A list of context strings to resolve the configuration. This is a set of roles the caller is current executing with. For example (client, windows). If not specified we take the context from the current thread's TLS stack. Returns: The value of the parameter. Raises: ConfigFormatError: if verify=True and the config doesn't validate. RuntimeError: if a value is retrieved before the config is initialized. ValueError: if a bad context is passed. """ if not self.initialized: if name not in self.constants: raise RuntimeError("Error while retrieving %s: " "Configuration hasn't been initialized yet." % name) if context: # Make sure it's not just a string and is iterable. if (isinstance(context, string_types) or not isinstance(context, collections.Iterable)): raise ValueError("context should be a list, got %r" % context) calc_context = context # Only use the cache if possible. cache_key = (name, tuple(context or ())) if default is utils.NotAValue and cache_key in self.cache: return self.cache[cache_key] # Use a default global context if context is not provided. if context is None: calc_context = self.context type_info_obj = self.FindTypeInfo(name) _, return_value = self._GetValue( name, context=calc_context, default=default) # If we returned the specified default, we just return it here. if return_value is default: return default try: return_value = self.InterpolateValue( return_value, default_section=name.split(".")[0], type_info_obj=type_info_obj, context=calc_context) except (lexer.ParseError, ValueError) as e: # We failed to parse the value, but a default was specified, so we just # return that. if default is not utils.NotAValue: return default raise ConfigFormatError("While parsing %s: %s" % (name, e)) try: new_value = type_info_obj.Validate(return_value) if new_value is not None: # Update the stored value with the valid data. return_value = new_value except ValueError: if default is not utils.NotAValue: return default raise # Cache the value for next time. if default is utils.NotAValue: self.cache[cache_key] = return_value return return_value
0.007256
def chebyshev(point1, point2): """Computes distance between 2D points using chebyshev metric :param point1: 1st point :type point1: list :param point2: 2nd point :type point2: list :returns: Distance between point1 and point2 :rtype: float """ return max(abs(point1[0] - point2[0]), abs(point1[1] - point2[1]))
0.002874
def BC_Rigidity(self): """ Utility function to help implement boundary conditions by specifying them for and applying them to the elastic thickness grid """ ######################################### # FLEXURAL RIGIDITY BOUNDARY CONDITIONS # ######################################### # West if self.BC_W == 'Periodic': self.BC_Rigidity_W = 'periodic' elif (self.BC_W == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any(): self.BC_Rigidity_W = '0 curvature' elif self.BC_W == 'Mirror': self.BC_Rigidity_W = 'mirror symmetry' else: sys.exit("Invalid Te B.C. case") # East if self.BC_E == 'Periodic': self.BC_Rigidity_E = 'periodic' elif (self.BC_E == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any(): self.BC_Rigidity_E = '0 curvature' elif self.BC_E == 'Mirror': self.BC_Rigidity_E = 'mirror symmetry' else: sys.exit("Invalid Te B.C. case") # North if self.BC_N == 'Periodic': self.BC_Rigidity_N = 'periodic' elif (self.BC_N == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any(): self.BC_Rigidity_N = '0 curvature' elif self.BC_N == 'Mirror': self.BC_Rigidity_N = 'mirror symmetry' else: sys.exit("Invalid Te B.C. case") # South if self.BC_S == 'Periodic': self.BC_Rigidity_S = 'periodic' elif (self.BC_S == np.array(['0Displacement0Slope', '0Moment0Shear', '0Slope0Shear'])).any(): self.BC_Rigidity_S = '0 curvature' elif self.BC_S == 'Mirror': self.BC_Rigidity_S = 'mirror symmetry' else: sys.exit("Invalid Te B.C. case") ############# # PAD ARRAY # ############# if np.isscalar(self.Te): self.D *= np.ones(self.qs.shape) # And leave Te as a scalar for checks else: self.Te_unpadded = self.Te.copy() self.Te = np.hstack(( np.nan*np.zeros((self.Te.shape[0], 1)), self.Te, np.nan*np.zeros((self.Te.shape[0], 1)) )) self.Te = np.vstack(( np.nan*np.zeros(self.Te.shape[1]), self.Te, np.nan*np.zeros(self.Te.shape[1]) )) self.D = np.hstack(( np.nan*np.zeros((self.D.shape[0], 1)), self.D, np.nan*np.zeros((self.D.shape[0], 1)) )) self.D = np.vstack(( np.nan*np.zeros(self.D.shape[1]), self.D, np.nan*np.zeros(self.D.shape[1]) )) ############################################################### # APPLY FLEXURAL RIGIDITY BOUNDARY CONDITIONS TO PADDED ARRAY # ############################################################### if self.BC_Rigidity_W == "0 curvature": self.D[:,0] = 2*self.D[:,1] - self.D[:,2] if self.BC_Rigidity_E == "0 curvature": self.D[:,-1] = 2*self.D[:,-2] - self.D[:,-3] if self.BC_Rigidity_N == "0 curvature": self.D[0,:] = 2*self.D[1,:] - self.D[2,:] if self.BC_Rigidity_S == "0 curvature": self.D[-1,:] = 2*self.D[-2,:] - self.D[-3,:] if self.BC_Rigidity_W == "mirror symmetry": self.D[:,0] = self.D[:,2] if self.BC_Rigidity_E == "mirror symmetry": self.D[:,-1] = self.D[:,-3] if self.BC_Rigidity_N == "mirror symmetry": self.D[0,:] = self.D[2,:] # Yes, will work on corners -- double-reflection if self.BC_Rigidity_S == "mirror symmetry": self.D[-1,:] = self.D[-3,:] if self.BC_Rigidity_W == "periodic": self.D[:,0] = self.D[:,-2] if self.BC_Rigidity_E == "periodic": self.D[:,-1] = self.D[:,-3] if self.BC_Rigidity_N == "periodic": self.D[0,:] = self.D[-2,:] if self.BC_Rigidity_S == "periodic": self.D[-1,:] = self.D[-3,:]
0.0235
def MatrixDiagPart(a): """ Batched diag op that returns only the diagonal elements. """ r = np.zeros(a.shape[:-2] + (min(a.shape[-2:]),)) for coord in np.ndindex(a.shape[:-2]): pos = coord + (Ellipsis,) r[pos] = np.diagonal(a[pos]) return r,
0.003559
def remove_nesting(dom, tag_name): """ Unwrap items in the node list that have ancestors with the same tag. """ for node in dom.getElementsByTagName(tag_name): for ancestor in ancestors(node): if ancestor is node: continue if ancestor is dom.documentElement: break if ancestor.tagName == tag_name: unwrap(node) break
0.002273
def power_cycle_vm(virtual_machine, action='on'): ''' Powers on/off a virtual machine specified by it's name. virtual_machine vim.VirtualMachine object to power on/off virtual machine action Operation option to power on/off the machine ''' if action == 'on': try: task = virtual_machine.PowerOn() task_name = 'power on' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) elif action == 'off': try: task = virtual_machine.PowerOff() task_name = 'power off' except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: raise salt.exceptions.ArgumentValueError('The given action is not supported') try: wait_for_task(task, get_managed_object_name(virtual_machine), task_name) except salt.exceptions.VMwareFileNotFoundError as exc: raise salt.exceptions.VMwarePowerOnError(' '.join([ 'An error occurred during power', 'operation, a file was not found: {0}'.format(exc)])) return virtual_machine
0.001529
def manual_update_license(self, fd, filename='cdrouter.lic'): """Update the license on your CDRouter system manually by uploading a .lic license from the CDRouter Support Lounge. :param fd: File-like object to upload. :param filename: (optional) Filename to use for license as string. :return: :class:`system.Upgrade <system.Upgrade>` object :rtype: system.Upgrade """ schema = UpgradeSchema() resp = self.service.post(self.base+'license/', files={'file': (filename, fd)}) return self.service.decode(schema, resp)
0.00319
def Bahadori_gas(T, MW): r'''Estimates the thermal conductivity of hydrocarbons gases at low P. Fits their data well, and is useful as only MW is required. Y is the Molecular weight, and X the temperature. .. math:: K = a + bY + CY^2 + dY^3 a = A_1 + B_1 X + C_1 X^2 + D_1 X^3 b = A_2 + B_2 X + C_2 X^2 + D_2 X^3 c = A_3 + B_3 X + C_3 X^2 + D_3 X^3 d = A_4 + B_4 X + C_4 X^2 + D_4 X^3 Parameters ---------- T : float Temperature of the gas [K] MW : float Molecular weight of the gas [g/mol] Returns ------- kg : float Estimated gas thermal conductivity [W/m/k] Notes ----- The accuracy of this equation has not been reviewed. Examples -------- >>> Bahadori_gas(40+273.15, 20) # Point from article 0.031968165337873326 References ---------- .. [1] Bahadori, Alireza, and Saeid Mokhatab. "Estimating Thermal Conductivity of Hydrocarbons." Chemical Engineering 115, no. 13 (December 2008): 52-54 ''' A = [4.3931323468E-1, -3.88001122207E-2, 9.28616040136E-4, -6.57828995724E-6] B = [-2.9624238519E-3, 2.67956145820E-4, -6.40171884139E-6, 4.48579040207E-8] C = [7.54249790107E-6, -6.46636219509E-7, 1.5124510261E-8, -1.0376480449E-10] D = [-6.0988433456E-9, 5.20752132076E-10, -1.19425545729E-11, 8.0136464085E-14] X, Y = T, MW a = A[0] + B[0]*X + C[0]*X**2 + D[0]*X**3 b = A[1] + B[1]*X + C[1]*X**2 + D[1]*X**3 c = A[2] + B[2]*X + C[2]*X**2 + D[2]*X**3 d = A[3] + B[3]*X + C[3]*X**2 + D[3]*X**3 return a + b*Y + c*Y**2 + d*Y**3
0.003049
def add_dict_to_hash(a_hash, a_dict): """Adds `a_dict` to `a_hash` Args: a_hash (`Hash`): the secure hash, e.g created by hashlib.md5 a_dict (dict[string, [string]]): the dictionary to add to the hash """ if a_dict is None: return for k, v in a_dict.items(): a_hash.update(b'\x00' + k.encode('utf-8') + b'\x00' + v.encode('utf-8'))
0.005222
def _set_mirror(self, v, load=False): """ Setter method for mirror, mapped from YANG variable /openflow_global/openflow/mirror (container) If this variable is read-only (config: false) in the source YANG file, then _set_mirror is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mirror() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=mirror.mirror, is_container='container', presence=False, yang_name="mirror", rest_name="mirror", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Openflow Mirror interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """mirror must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=mirror.mirror, is_container='container', presence=False, yang_name="mirror", rest_name="mirror", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Openflow Mirror interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""", }) self.__mirror = t if hasattr(self, '_set'): self._set()
0.006046
def _pack(cls, tensors): """Pack a list of `Tensor`s into a single, flattened, rank-1 `Tensor`.""" if not tensors: return None elif len(tensors) == 1: return array_ops.reshape(tensors[0], [-1]) else: flattened = [array_ops.reshape(tensor, [-1]) for tensor in tensors] return array_ops.concat(flattened, 0)
0.014493
def build_time(start_time): """ Calculate build time per package """ diff_time = round(time.time() - start_time, 2) if diff_time <= 59.99: sum_time = str(diff_time) + " Sec" elif diff_time > 59.99 and diff_time <= 3599.99: sum_time = round(diff_time / 60, 2) sum_time_list = re.findall(r"\d+", str(sum_time)) sum_time = ("{0} Min {1} Sec".format(sum_time_list[0], sum_time_list[1])) elif diff_time > 3599.99: sum_time = round(diff_time / 3600, 2) sum_time_list = re.findall(r"\d+", str(sum_time)) sum_time = ("{0} Hours {1} Min".format(sum_time_list[0], sum_time_list[1])) return sum_time
0.001302
def replace_in_file(workspace, src_file_path, from_str, to_str): """Replace from_str with to_str in the name and content of the given file. If any edits were necessary, returns the new filename (which may be the same as the old filename). """ from_bytes = from_str.encode('ascii') to_bytes = to_str.encode('ascii') data = read_file(os.path.join(workspace, src_file_path), binary_mode=True) if from_bytes not in data and from_str not in src_file_path: return None dst_file_path = src_file_path.replace(from_str, to_str) safe_file_dump(os.path.join(workspace, dst_file_path), data.replace(from_bytes, to_bytes), mode='wb') if src_file_path != dst_file_path: os.unlink(os.path.join(workspace, src_file_path)) return dst_file_path
0.013889
def checkout(repo, ref): """Checkout a repoself.""" # Delete local branch if it exists, remote branch will be tracked # automatically. This prevents stale local branches from causing problems. # It also avoids problems with appending origin/ to refs as that doesn't # work with tags, SHAs, and upstreams not called origin. if ref in repo.branches: # eg delete master but leave origin/master log.warn("Removing local branch {b} for repo {r}".format(b=ref, r=repo)) # Can't delete currently checked out branch, so make sure head is # detached before deleting. repo.head.reset(index=True, working_tree=True) repo.git.checkout(repo.head.commit.hexsha) repo.delete_head(ref, '--force') log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo, ref=ref)) repo.head.reset(index=True, working_tree=True) repo.git.checkout(ref) repo.head.reset(index=True, working_tree=True) sha = repo.head.commit.hexsha log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
0.000826