text
stringlengths
78
104k
score
float64
0
0.18
def extract_file_name(content_dispo): """Extract file name from the input request body""" # print type(content_dispo) # print repr(content_dispo) # convertion of escape string (str type) from server # to unicode object content_dispo = content_dispo.decode('unicode-escape').strip('"') file_name = "" for key_val in content_dispo.split(';'): param = key_val.strip().split('=') if param[0] == "filename": file_name = param[1].strip('"') break return file_name
0.001876
def _discard_config(self): """Set candidate_cfg to current running-config. Erase the merge_cfg file.""" discard_candidate = "copy running-config {}".format( self._gen_full_path(self.candidate_cfg) ) discard_merge = "copy null: {}".format(self._gen_full_path(self.merge_cfg)) self.device.send_command_expect(discard_candidate) self.device.send_command_expect(discard_merge)
0.009259
def get_owner_and_repo(repourl): """ Takes a git repository URL from Bitbucket and tries to determine the owner and repository name :param repourl: Bitbucket git repo in the form of [email protected]:OWNER/REPONAME.git https://bitbucket.com/OWNER/REPONAME.git ssh://[email protected]/OWNER/REPONAME.git :return: owner, repo: The owner of the repository and the repository name """ parsed = urlparse(repourl) if parsed.scheme: path = parsed.path[1:] else: # we assume git@host:owner/repo.git here path = parsed.path.split(':', 1)[-1] if path.endswith('.git'): path = path[:-4] while path.endswith('/'): path = path[:-1] parts = path.split('/') assert len(parts) == 2, 'OWNER/REPONAME is expected' return parts
0.004269
def split(self, decode=True): """ Für das Bit-Alignment (neu Ausrichten von Hex, ASCII-View) :rtype: list of array.array """ start = 0 result = [] message = self.decoded_bits if decode else self.plain_bits bit_alignments = set() if self.align_labels: for l in self.message_type: bit_alignments.add(l.start) bit_alignments.add(l.end) self.__bit_alignments = sorted(bit_alignments) for pos in self.__bit_alignments: result.append(message[start:pos]) start = pos result.append(message[start:]) return result
0.004412
def _getspnam(name, root=None): ''' Alternative implementation for getspnam, that use only /etc/shadow ''' root = '/' if not root else root passwd = os.path.join(root, 'etc/shadow') with salt.utils.files.fopen(passwd) as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) comps = line.strip().split(':') if comps[0] == name: # Generate a getspnam compatible output for i in range(2, 9): comps[i] = int(comps[i]) if comps[i] else -1 return spwd.struct_spwd(comps) raise KeyError
0.001575
def get_assessment_part_item_design_session_for_bank(self, bank_id, proxy): """Gets the ``OsidSession`` associated with the assessment part item design service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` return: (osid.assessment.authoring.AssessmentPartItemDesignSession) - an ``AssessmentPartItemDesignSession`` raise: NotFound - no ``Bank`` found by the given ``Id`` raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_part_item_design()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_part_item_design()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_assessment_part_lookup(): # This is kludgy, but only until Tom fixes spec raise errors.Unimplemented() # Also include check to see if the catalog Id is found otherwise raise errors.NotFound # pylint: disable=no-member return sessions.AssessmentPartItemDesignSession(bank_id, proxy=proxy, runtime=self._runtime)
0.004691
def as_task(self, logger=None, **fields): """ Start a new L{eliot.Action} of this type as a task (i.e. top-level action) with the given start fields. See L{ActionType.__call__} for example of usage. @param logger: A L{eliot.ILogger} provider to which the action's messages will be written, or C{None} to use the default one. @param fields: Extra fields to add to the message. @rtype: L{eliot.Action} """ return self._startTask( logger, self.action_type, self._serializers, **fields)
0.003436
def sign(self, hash160_lookup, tx_in_idx_set=None, hash_type=None, **kwargs): """ Sign a standard transaction. hash160_lookup: A dictionary (or another object with .get) where keys are hash160 and values are tuples (secret exponent, public_pair, is_compressed) or None (in which case the script will obviously not be signed). """ checker = self.SolutionChecker(self.tx) if tx_in_idx_set is None: tx_in_idx_set = range(len(self.tx.txs_in)) self.tx.check_unspents() for tx_in_idx in sorted(tx_in_idx_set): tx_context = checker.tx_context_for_idx(tx_in_idx) try: checker.check_solution(tx_context, flags=None) continue except ScriptError: pass try: r = self.solve(hash160_lookup, tx_in_idx, hash_type=hash_type, **kwargs) if isinstance(r, bytes): self.tx.txs_in[tx_in_idx].script = r else: self.tx.txs_in[tx_in_idx].script = r[0] self.tx.set_witness(tx_in_idx, r[1]) except (SolvingError, ValueError): pass return self
0.003946
def from_text_file(file_path): """Load MonsoonData objects from a text file generated by MonsoonData.save_to_text_file. Args: file_path: The full path of the file load from, including the file name. Returns: A list of MonsoonData objects. """ results = [] with io.open(file_path, 'r', encoding='utf-8') as f: data_strs = f.read().split(MonsoonData.delimiter) for data_str in data_strs: results.append(MonsoonData.from_string(data_str)) return results
0.003356
def create(self, name, regexes, tag_ids, logs=None): """ Create a hook :param name: The hook's name (should be the same as the tag) :type name: str :param regexes: The list of regular expressions that Logentries expects. Ex: `['user_agent = /curl\/[\d.]*/']` Would match where the user-agent is curl. :type regexes: list of str :param tag_id: The ids of the tags to associate the hook with. (The 'id' key of the create tag response) :type tag_id: list of str :param logs: The logs to add the hook to. Comes from the 'key' key in the log dict. :type logs: list of str :returns: The response of your post :rtype: dict :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries """ data = { 'name': name, 'triggers': regexes, 'sources': logs or [], 'groups': [], 'actions': tag_ids } return self._post( request=ApiActions.CREATE.value, uri=ApiUri.HOOKS.value, params=data )
0.003965
def check_accesspoints(sess): """ check the status of all connected access points """ ap_names = walk_data(sess, name_ap_oid, helper)[0] ap_operationals = walk_data(sess, operational_ap_oid, helper)[0] ap_availabilitys = walk_data(sess, availability_ap_oid, helper)[0] ap_alarms = walk_data(sess, alarm_ap_oid, helper)[0] #ap_ip = walk_data(sess, ip_ap_oid, helper) # no result helper.add_summary("Access Points Status") for x in range(len(ap_names)): ap_name = ap_names[x] ap_operational = ap_operationals[x] ap_availability = ap_availabilitys[x] ap_alarm = ap_alarms[x] # Add all states to the long output helper.add_long_output("%s - Operational: %s - Availabilty: %s - Alarm: %s" % (ap_name, operational_states[int(ap_operational)], availability_states[int(ap_availability)], alarm_states[int(ap_alarm)])) # Operational State if ap_operational != "1" and ap_operational != "4": helper.status(critical) helper.add_summary("%s Operational State: %s" % (ap_name, operational_states[int(ap_operational)])) # Avaiability State if ap_availability != "3": helper.status(critical) helper.add_summary("%s Availability State: %s" % (ap_name, availability_states[int(ap_availability)])) # Alarm State if ap_alarm == "2": helper.status(warning) helper.add_summary("%s Controller Alarm State: %s" % (ap_name, alarm_states[int(ap_alarm)])) if ap_alarm == "3" or ap_alarm == "4": helper.status(critical) helper.add_summary("%s Controller Alarm State: %s" % (ap_name, alarm_states[int(ap_alarm)]))
0.009751
def _catch_errors(self, json_response): """ Changed: removed check on number of elements: - totalResultsCount not sytematically returned (e.g in hierarchy) - done in base.py """ status = json_response.get('status') if status: message = status.get('message') value = status.get('value') custom_messages = { 10: 'Invalid credentials', 18: 'Do not use the demo account for your application', } self.error = custom_messages.get(value, message) LOGGER.error("Error %s from JSON %s", self.error, json_response) return self.error
0.002894
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Orographic Gage File Read from File Method """ # Set file extension property self.fileExtension = extension # Open file and parse into HmetRecords with open(path, 'r') as orthoFile: for line in orthoFile: sline = line.strip().split() # Cases if sline[0].lower() == 'num_sites:': self.numSites = sline[1] elif sline[0].lower() == 'elev_base': self.elevBase = sline[1] elif sline[0].lower() == 'elev_2': self.elev2 = sline[1] elif sline[0].lower() == 'year': """DO NOTHING""" else: # Create datetime object dateTime = datetime(year=int(sline[0]), month=int(sline[1]), day=int(sline[2]), hour=int(sline[3])) # Create GSSHAPY OrthoMeasurement object measurement = OrographicMeasurement(dateTime=dateTime, temp2=sline[4]) # Associate OrthoMeasurement with OrthographicGageFile self.orographicMeasurements.append(measurement)
0.002
def _get_namespace2go2term(go2terms): """Group GO IDs by namespace.""" namespace2go2term = cx.defaultdict(dict) for goid, goterm in go2terms.items(): namespace2go2term[goterm.namespace][goid] = goterm return namespace2go2term
0.007435
def do_upgrade(self): """Implement your upgrades here.""" sql = text('delete from upgrade where upgrade = :upgrade') for upgrade in self.legacy_upgrades: db.engine.execute(sql, upgrade=upgrade)
0.008734
def get_angle(v1, v2, units="degrees"): """ Calculates the angle between two vectors. Args: v1: Vector 1 v2: Vector 2 units: "degrees" or "radians". Defaults to "degrees". Returns: Angle between them in degrees. """ d = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2) d = min(d, 1) d = max(d, -1) angle = math.acos(d) if units == "degrees": return math.degrees(angle) elif units == "radians": return angle else: raise ValueError("Invalid units {}".format(units))
0.001733
def finite_diff(f, axis, dx=1.0, method='forward', out=None, **kwargs): """Calculate the partial derivative of ``f`` along a given ``axis``. In the interior of the domain of f, the partial derivative is computed using first-order accurate forward or backward difference or second-order accurate central differences. With padding the same method and thus accuracy is used on endpoints as in the interior i.e. forward and backward differences use first-order accuracy on edges while central differences use second-order accuracy at edges. Without padding one-sided forward or backward differences are used at the boundaries. The accuracy at the endpoints can then also be triggered by the edge order. The returned array has the same shape as the input array ``f``. Per default forward difference with dx=1 and no padding is used. Parameters ---------- f : `array-like` An N-dimensional array. axis : int The axis along which the partial derivative is evaluated. dx : float, optional Scalar specifying the distance between sampling points along ``axis``. method : {'central', 'forward', 'backward'}, optional Finite difference method which is used in the interior of the domain of ``f``. out : `numpy.ndarray`, optional An N-dimensional array to which the output is written. Has to have the same shape as the input array ``f``. pad_mode : string, optional The padding mode to use outside the domain. ``'constant'``: Fill with ``pad_const``. ``'symmetric'``: Reflect at the boundaries, not doubling the outmost values. ``'periodic'``: Fill in values from the other side, keeping the order. ``'order0'``: Extend constantly with the outmost values (ensures continuity). ``'order1'``: Extend with constant slope (ensures continuity of the first derivative). This requires at least 2 values along each axis where padding is applied. ``'order2'``: Extend with second order accuracy (ensures continuity of the second derivative). This requires at least 3 values along each axis where padding is applied. pad_const : float, optional For ``pad_mode == 'constant'``, ``f`` assumes ``pad_const`` for indices outside the domain of ``f`` Returns ------- out : `numpy.ndarray` N-dimensional array of the same shape as ``f``. If ``out`` was provided, the returned object is a reference to it. Examples -------- >>> f = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) >>> finite_diff(f, axis=0) array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., -9.]) Without arguments the above defaults to: >>> finite_diff(f, axis=0, dx=1.0, method='forward', pad_mode='constant') array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., -9.]) Parameters can be changed one by one: >>> finite_diff(f, axis=0, dx=0.5) array([ 2., 2., 2., 2., 2., 2., 2., 2., 2., -18.]) >>> finite_diff(f, axis=0, pad_mode='order1') array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) Central differences and different edge orders: >>> finite_diff(0.5 * f ** 2, axis=0, method='central', pad_mode='order1') array([ 0.5, 1. , 2. , 3. , 4. , 5. , 6. , 7. , 8. , 8.5]) >>> finite_diff(0.5 * f ** 2, axis=0, method='central', pad_mode='order2') array([-0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) In-place evaluation: >>> out = f.copy() >>> out is finite_diff(f, axis=0, out=out) True """ f_arr = np.asarray(f) ndim = f_arr.ndim if f_arr.shape[axis] < 2: raise ValueError('in axis {}: at least two elements required, got {}' ''.format(axis, f_arr.shape[axis])) if axis < 0: axis += ndim if not (0 <= axis < ndim): raise IndexError('`axis` {} outside the valid range 0 ... {}' ''.format(axis, ndim - 1)) dx, dx_in = float(dx), dx if dx <= 0 or not np.isfinite(dx): raise ValueError("`dx` must be positive, got {}".format(dx_in)) method, method_in = str(method).lower(), method if method not in _SUPPORTED_DIFF_METHODS: raise ValueError('`method` {} was not understood'.format(method_in)) pad_mode = kwargs.pop('pad_mode', 'constant') if pad_mode not in _SUPPORTED_PAD_MODES: raise ValueError('`pad_mode` {} not understood' ''.format(pad_mode)) pad_const = kwargs.pop('pad_const', 0) pad_const = f.dtype.type(pad_const) if out is None: out = np.empty_like(f_arr) else: if out.shape != f.shape: raise ValueError('expected output shape {}, got {}' ''.format(f.shape, out.shape)) if f_arr.shape[axis] < 2 and pad_mode == 'order1': raise ValueError("size of array to small to use 'order1', needs at " "least 2 elements along axis {}.".format(axis)) if f_arr.shape[axis] < 3 and pad_mode == 'order2': raise ValueError("size of array to small to use 'order2', needs at " "least 3 elements along axis {}.".format(axis)) if kwargs: raise ValueError('unkown keyword argument(s): {}'.format(kwargs)) # create slice objects: initially all are [:, :, ..., :] # Swap axes so that the axis of interest is first. This is a O(1) # operation and is done to simplify the code below. out, out_in = np.swapaxes(out, 0, axis), out f_arr = np.swapaxes(f_arr, 0, axis) # Interior of the domain of f if method == 'central': # 1D equivalent: out[1:-1] = (f[2:] - f[:-2])/2.0 np.subtract(f_arr[2:], f_arr[:-2], out=out[1:-1]) out[1:-1] /= 2.0 elif method == 'forward': # 1D equivalent: out[1:-1] = (f[2:] - f[1:-1]) np.subtract(f_arr[2:], f_arr[1:-1], out=out[1:-1]) elif method == 'backward': # 1D equivalent: out[1:-1] = (f[1:-1] - f[:-2]) np.subtract(f_arr[1:-1], f_arr[:-2], out=out[1:-1]) # Boundaries if pad_mode == 'constant': # Assume constant value c for indices outside the domain of ``f`` # With padding the method used on endpoints is the same as in the # interior of the domain of f if method == 'central': out[0] = (f_arr[1] - pad_const) / 2.0 out[-1] = (pad_const - f_arr[-2]) / 2.0 elif method == 'forward': out[0] = f_arr[1] - f_arr[0] out[-1] = pad_const - f_arr[-1] elif method == 'backward': out[0] = f_arr[0] - pad_const out[-1] = f_arr[-1] - f_arr[-2] elif pad_mode == 'symmetric': # Values of f for indices outside the domain of f are replicates of # the edge values # With padding the method used on endpoints is the same as in the # interior of the domain of f if method == 'central': out[0] = (f_arr[1] - f_arr[0]) / 2.0 out[-1] = (f_arr[-1] - f_arr[-2]) / 2.0 elif method == 'forward': out[0] = f_arr[1] - f_arr[0] out[-1] = 0 elif method == 'backward': out[0] = 0 out[-1] = f_arr[-1] - f_arr[-2] elif pad_mode == 'symmetric_adjoint': # The adjoint case of symmetric if method == 'central': out[0] = (f_arr[1] + f_arr[0]) / 2.0 out[-1] = (-f_arr[-1] - f_arr[-2]) / 2.0 elif method == 'forward': out[0] = f_arr[1] out[-1] = -f_arr[-1] elif method == 'backward': out[0] = f_arr[0] out[-1] = -f_arr[-2] elif pad_mode == 'periodic': # Values of f for indices outside the domain of f are replicates of # the edge values on the other side if method == 'central': out[0] = (f_arr[1] - f_arr[-1]) / 2.0 out[-1] = (f_arr[0] - f_arr[-2]) / 2.0 elif method == 'forward': out[0] = f_arr[1] - f_arr[0] out[-1] = f_arr[0] - f_arr[-1] elif method == 'backward': out[0] = f_arr[0] - f_arr[-1] out[-1] = f_arr[-1] - f_arr[-2] elif pad_mode == 'order0': # Values of f for indices outside the domain of f are replicates of # the edge value. if method == 'central': out[0] = (f_arr[1] - f_arr[0]) / 2.0 out[-1] = (f_arr[-1] - f_arr[-2]) / 2.0 elif method == 'forward': out[0] = f_arr[1] - f_arr[0] out[-1] = 0 elif method == 'backward': out[0] = 0 out[-1] = f_arr[-1] - f_arr[-2] elif pad_mode == 'order0_adjoint': # Values of f for indices outside the domain of f are replicates of # the edge value. if method == 'central': out[0] = (f_arr[0] + f_arr[1]) / 2.0 out[-1] = -(f_arr[-1] + f_arr[-2]) / 2.0 elif method == 'forward': out[0] = f_arr[1] out[-1] = -f_arr[-1] elif method == 'backward': out[0] = f_arr[0] out[-1] = -f_arr[-2] elif pad_mode == 'order1': # Values of f for indices outside the domain of f are linearly # extrapolated from the inside. # independent of ``method`` out[0] = f_arr[1] - f_arr[0] out[-1] = f_arr[-1] - f_arr[-2] elif pad_mode == 'order1_adjoint': # Values of f for indices outside the domain of f are linearly # extrapolated from the inside. if method == 'central': out[0] = f_arr[0] + f_arr[1] / 2.0 out[-1] = -f_arr[-1] - f_arr[-2] / 2.0 # Increment in case array is very short and we get aliasing out[1] -= f_arr[0] / 2.0 out[-2] += f_arr[-1] / 2.0 elif method == 'forward': out[0] = f_arr[0] + f_arr[1] out[-1] = -f_arr[-1] # Increment in case array is very short and we get aliasing out[1] -= f_arr[0] elif method == 'backward': out[0] = f_arr[0] out[-1] = -f_arr[-1] - f_arr[-2] # Increment in case array is very short and we get aliasing out[-2] += f_arr[-1] elif pad_mode == 'order2': # 2nd order edges out[0] = -(3.0 * f_arr[0] - 4.0 * f_arr[1] + f_arr[2]) / 2.0 out[-1] = (3.0 * f_arr[-1] - 4.0 * f_arr[-2] + f_arr[-3]) / 2.0 elif pad_mode == 'order2_adjoint': # Values of f for indices outside the domain of f are quadratically # extrapolated from the inside. if method == 'central': out[0] = 1.5 * f_arr[0] + 0.5 * f_arr[1] out[-1] = -1.5 * f_arr[-1] - 0.5 * f_arr[-2] # Increment in case array is very short and we get aliasing out[1] -= 1.5 * f_arr[0] out[2] += 0.5 * f_arr[0] out[-3] -= 0.5 * f_arr[-1] out[-2] += 1.5 * f_arr[-1] elif method == 'forward': out[0] = 1.5 * f_arr[0] + 1.0 * f_arr[1] out[-1] = -1.5 * f_arr[-1] # Increment in case array is very short and we get aliasing out[1] -= 2.0 * f_arr[0] out[2] += 0.5 * f_arr[0] out[-3] -= 0.5 * f_arr[-1] out[-2] += 1.0 * f_arr[-1] elif method == 'backward': out[0] = 1.5 * f_arr[0] out[-1] = -1.0 * f_arr[-2] - 1.5 * f_arr[-1] # Increment in case array is very short and we get aliasing out[1] -= 1.0 * f_arr[0] out[2] += 0.5 * f_arr[0] out[-3] -= 0.5 * f_arr[-1] out[-2] += 2.0 * f_arr[-1] else: raise NotImplementedError('unknown pad_mode') # divide by step size out /= dx return out_in
0.000084
def remove(self, msg, callback): """Remove a callback from the callback list. msg: Message template callback: Callback method to remove. If callback is None, all callbacks for the message template are removed. """ if callback is None: self._dict.pop(msg, None) else: cb = self._dict.get(msg, []) try: cb.remove(callback) except ValueError: pass if cb: _LOGGER.debug('%d callbacks for message: %s', len(cb), msg) self.add(msg, cb, True) else: self._dict.pop(msg, None) _LOGGER.debug('Removed all callbacks for message: %s', msg)
0.002628
def fuzzy_date_parser(self, text): """Thin wrapper around ``parsedatetime`` and ``dateutil`` modules. Since there's no upstream suppport for multiple locales, this wrapper exists. :param str text: Text to parse. :returns: A parsed date/time object. Raises exception on failure. :rtype: datetime """ try: parsed = dateparser.parse(text, dayfirst=True) return parsed except (ValueError, TypeError): locales = parsedatetime._locales[:] # Loop through all the locales and try to parse successfully our # string for locale in locales: const = parsedatetime.Constants(locale) const.re_option += re.UNICODE parser = parsedatetime.Calendar(const) parsed, ok = parser.parse(text) if ok: return datetime(*parsed[:6])
0.002105
def add(self, child): """ Adds a typed child object to the component. @param child: Child object to be added. """ if isinstance(child, Component): self.add_child(child) else: raise ModelError('Unsupported child element')
0.006803
def derive_identity_rmf(name, rmf): """Create an "identity" RMF that does not mix energies. *name* The name of the RMF object to be created; passed to Sherpa. *rmf* An existing RMF object on which to base this one. Returns: A new RMF1D object that has a response matrix that is as close to diagonal as we can get in energy space, and that has a constant sensitivity as a function of detector channel. In many X-ray observations, the relevant background signal does not behave like an astrophysical source that is filtered through the telescope's response functions. However, I have been unable to get current Sherpa (version 4.9) to behave how I want when working with backround models that are *not* filtered through these response functions. This function constructs an "identity" RMF response matrix that provides the best possible approximation of a passthrough "instrumental response": it mixes energies as little as possible and has a uniform sensitivity as a function of detector channel. """ from sherpa.astro.data import DataRMF from sherpa.astro.instrument import RMF1D # The "x" axis of the desired matrix -- the columnar direction; axis 1 -- # is "channels". There are n_chan of them and each maps to a notional # energy range specified by "e_min" and "e_max". # # The "y" axis of the desired matrix -- the row direction; axis 1 -- is # honest-to-goodness energy. There are tot_n_energy energy bins, each # occupying a range specified by "energ_lo" and "energ_hi". # # We want every channel that maps to a valid output energy to have a # nonzero entry in the matrix. The relative sizes of n_energy and n_cell # can vary, as can the bounds of which regions of each axis can be validly # mapped to each other. So this problem is basically equivalent to that of # drawing an arbitrary pixelated line on bitmap, without anti-aliasing. # # The output matrix is represented in a row-based sparse format. # # - There is a integer vector "n_grp" of size "n_energy". It gives the # number of "groups" needed to fill in each row of the matrix. Let # "tot_groups = sum(n_grp)". For a given row, "n_grp[row_index]" may # be zero, indicating that the row is all zeros. # - There are integer vectors "f_chan" and "n_chan", each of size # "tot_groups", that define each group. "f_chan" gives the index of # the first channel column populated by the group; "n_chan" gives the # number of columns populated by the group. Note that there can # be multiple groups for a single row, so successive group records # may fill in different pieces of the same row. # - Let "tot_cells = sum(n_chan)". # - There is a vector "matrix" of size "tot_cells" that stores the actual # matrix data. This is just a concatenation of all the data corresponding # to each group. # - Unpopulated matrix entries are zero. # # See expand_rmf_matrix() for a sloppy implementation of how to unpack # this sparse format. n_chan = rmf.e_min.size n_energy = rmf.energ_lo.size c_lo_offset = rmf.e_min[0] c_lo_slope = (rmf.e_min[-1] - c_lo_offset) / (n_chan - 1) c_hi_offset = rmf.e_max[0] c_hi_slope = (rmf.e_max[-1] - c_hi_offset) / (n_chan - 1) e_lo_offset = rmf.energ_lo[0] e_lo_slope = (rmf.energ_lo[-1] - e_lo_offset) / (n_energy - 1) e_hi_offset = rmf.energ_hi[0] e_hi_slope = (rmf.energ_hi[-1] - e_hi_offset) / (n_energy - 1) all_e_indices = np.arange(n_energy) all_e_los = e_lo_slope * all_e_indices + e_lo_offset start_chans = np.floor((all_e_los - c_lo_offset) / c_lo_slope).astype(np.int) all_e_his = e_hi_slope * all_e_indices + e_hi_offset stop_chans = np.ceil((all_e_his - c_hi_offset) / c_hi_slope).astype(np.int) first_e_index_on_channel_grid = 0 while stop_chans[first_e_index_on_channel_grid] < 0: first_e_index_on_channel_grid += 1 last_e_index_on_channel_grid = n_energy - 1 while start_chans[last_e_index_on_channel_grid] >= n_chan: last_e_index_on_channel_grid -= 1 n_nonzero_rows = last_e_index_on_channel_grid + 1 - first_e_index_on_channel_grid e_slice = slice(first_e_index_on_channel_grid, last_e_index_on_channel_grid + 1) n_grp = np.zeros(n_energy, dtype=np.int) n_grp[e_slice] = 1 start_chans = np.maximum(start_chans[e_slice], 0) stop_chans = np.minimum(stop_chans[e_slice], n_chan - 1) # We now have a first cut at a row-oriented expression of our "identity" # RMF. However, it's conservative. Trim down to eliminate overlaps between # sequences. for i in range(n_nonzero_rows - 1): my_end = stop_chans[i] next_start = start_chans[i+1] if next_start <= my_end: stop_chans[i] = max(start_chans[i], next_start - 1) # Results are funky unless the sums along the vertical axis are constant. # Ideally the sum along the *horizontal* axis would add up to 1 (since, # ideally, each row is a probability distribution), but it is not # generally possible to fulfill both of these constraints simultaneously. # The latter constraint does not seem to matter in practice so we ignore it. # Due to the funky encoding of the matrix, we need to build a helper table # to meet the vertical-sum constraint. counts = np.zeros(n_chan, dtype=np.int) for i in range(n_nonzero_rows): counts[start_chans[i]:stop_chans[i]+1] += 1 counts[:start_chans.min()] = 1 counts[stop_chans.max()+1:] = 1 assert (counts > 0).all() # We can now build the matrix. f_chan = start_chans rmfnchan = stop_chans + 1 - f_chan assert (rmfnchan > 0).all() matrix = np.zeros(rmfnchan.sum()) amounts = 1. / counts ofs = 0 for i in range(n_nonzero_rows): f = f_chan[i] n = rmfnchan[i] matrix[ofs:ofs+n] = amounts[f:f+n] ofs += n # All that's left to do is create the Python objects. drmf = DataRMF( name, rmf.detchans, rmf.energ_lo, rmf.energ_hi, n_grp, f_chan, rmfnchan, matrix, offset = 0, e_min = rmf.e_min, e_max = rmf.e_max, header = None ) return RMF1D(drmf, pha=rmf._pha)
0.002031
def best_unique_genomes(self, n): """Returns the most n fit genomes, with no duplication.""" best_unique = {} for g in self.most_fit_genomes: best_unique[g.key] = g best_unique_list = list(best_unique.values()) def key(genome): return genome.fitness return sorted(best_unique_list, key=key, reverse=True)[:n]
0.005236
def tomof(self, maxline=MAX_MOF_LINE): """ Return a MOF string with the declaration of this CIM qualifier type. The returned MOF string conforms to the ``qualifierDeclaration`` ABNF rule defined in :term:`DSP0004`. Qualifier flavors are included in the returned MOF string only when the information is available (i.e. the value of the corresponding attribute is not `None`). Because :term:`DSP0004` does not support instance qualifiers, and thus does not define a flavor keyword for the :attr:`~pywbem.CIMQualifierDeclaration.toinstance` attribute, that flavor is not included in the returned MOF string. Returns: :term:`unicode string`: MOF string. """ mof = [] mof.append(u'Qualifier ') mof.append(self.name) mof.append(u' : ') mof.append(self.type) if self.is_array: mof.append(u'[') if self.array_size is not None: mof.append(six.text_type(self.array_size)) mof.append(u']') if self.value is not None: mof.append(u' = ') if isinstance(self.value, list): mof.append(u'{ ') mof_str = u''.join(mof) line_pos = len(mof_str) - mof_str.rfind('\n') - 1 val_str, line_pos = _value_tomof( self.value, self.type, MOF_INDENT, maxline, line_pos, 3, False) mof.append(val_str) if isinstance(self.value, list): mof.append(u' }') mof.append(u',\n') mof.append(_indent_str(MOF_INDENT + 1)) mof.append(u'Scope(') mof_scopes = [] for scope in self._ordered_scopes: if self.scopes.get(scope, False): mof_scopes.append(scope.lower()) mof.append(u', '.join(mof_scopes)) mof.append(u')') # toinstance flavor not included here because not part of DSP0004 mof_flavors = [] if self.overridable is True: mof_flavors.append('EnableOverride') elif self.overridable is False: mof_flavors.append('DisableOverride') if self.tosubclass is True: mof_flavors.append('ToSubclass') elif self.tosubclass is False: mof_flavors.append('Restricted') if self.translatable: mof_flavors.append('Translatable') if mof_flavors: mof.append(u',\n') mof.append(_indent_str(MOF_INDENT + 1)) mof.append(u'Flavor(') mof.append(u', '.join(mof_flavors)) mof.append(u')') mof.append(u';\n') return u''.join(mof)
0.000735
def put(self, key, value, cache=None, options={}): """Query the server to set the key specified to the value specified in the specified cache. Keyword arguments: key -- the name of the key to be set. Required. value -- the value to set key to. Must be a string or JSON serialisable. Required. cache -- the cache to store the item in. Defaults to None, which uses self.name. If no name is set, raises a ValueError. options -- a dict of arguments to send with the request. See http://dev.iron.io/cache/reference/api/#put_item for more information on defaults and possible values. """ if cache is None: cache = self.name if cache is None: raise ValueError("Cache name must be set") if not isinstance(value, str_type) and not isinstance(value, int_types): value = json.dumps(value) options["value"] = value body = json.dumps(options) cache = quote_plus(cache) key = quote_plus(key) result = self.client.put("caches/%s/items/%s" % (cache, key), body, {"Content-Type": "application/json"}) return Item(cache=cache, key=key, value=value)
0.003858
def remove_diskgroup(cache_disk_id, data_accessibility=True, service_instance=None): ''' Remove the diskgroup with the specified cache disk. cache_disk_id The canonical name of the cache disk. data_accessibility Specifies whether to ensure data accessibility. Default value is True. service_instance Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Default is None. .. code-block:: bash salt '*' vsphere.remove_diskgroup cache_disk_id='naa.000000000000001' ''' log.trace('Validating diskgroup input') host_ref = _get_proxy_target(service_instance) hostname = __proxy__['esxi.get_details']()['esxi_host'] diskgroups = \ salt.utils.vmware.get_diskgroups(host_ref, cache_disk_ids=[cache_disk_id]) if not diskgroups: raise VMwareObjectRetrievalError( 'No diskgroup with cache disk id \'{0}\' was found in ESXi ' 'host \'{1}\''.format(cache_disk_id, hostname)) log.trace('data accessibility = %s', data_accessibility) salt.utils.vsan.remove_diskgroup( service_instance, host_ref, diskgroups[0], data_accessibility=data_accessibility) return True
0.001554
def receive(sources, timeout=None): """Get all outstanding signals from sources. A source can be either (1) an object ID returned by the task (we want to receive signals from), or (2) an actor handle. When invoked by the same entity E (where E can be an actor, task or driver), for each source S in sources, this function returns all signals generated by S since the last receive() was invoked by E on S. If this is the first call on S, this function returns all past signals generated by S so far. Note that different actors, tasks or drivers that call receive() on the same source S will get independent copies of the signals generated by S. Args: sources: List of sources from which the caller waits for signals. A source is either an object ID returned by a task (in this case the object ID is used to identify that task), or an actor handle. If the user passes the IDs of multiple objects returned by the same task, this function returns a copy of the signals generated by that task for each object ID. timeout: Maximum time (in seconds) this function waits to get a signal from a source in sources. If None, the timeout is infinite. Returns: A list of pairs (S, sig), where S is a source in the sources argument, and sig is a signal generated by S since the last time receive() was called on S. Thus, for each S in sources, the return list can contain zero or multiple entries. """ # If None, initialize the timeout to a huge value (i.e., over 30,000 years # in this case) to "approximate" infinity. if timeout is None: timeout = 10**12 if timeout < 0: raise ValueError("The 'timeout' argument cannot be less than 0.") if not hasattr(ray.worker.global_worker, "signal_counters"): ray.worker.global_worker.signal_counters = defaultdict(lambda: b"0") signal_counters = ray.worker.global_worker.signal_counters # Map the ID of each source task to the source itself. task_id_to_sources = defaultdict(lambda: []) for s in sources: task_id_to_sources[_get_task_id(s).hex()].append(s) # Construct the redis query. query = "XREAD BLOCK " # Multiply by 1000x since timeout is in sec and redis expects ms. query += str(1000 * timeout) query += " STREAMS " query += " ".join([task_id for task_id in task_id_to_sources]) query += " " query += " ".join([ ray.utils.decode(signal_counters[ray.utils.hex_to_binary(task_id)]) for task_id in task_id_to_sources ]) answers = ray.worker.global_worker.redis_client.execute_command(query) if not answers: return [] results = [] # Decoding is a little bit involved. Iterate through all the answers: for i, answer in enumerate(answers): # Make sure the answer corresponds to a source, s, in sources. task_id = ray.utils.decode(answer[0]) task_source_list = task_id_to_sources[task_id] # The list of results for source s is stored in answer[1] for r in answer[1]: for s in task_source_list: if r[1][1].decode("ascii") == ACTOR_DIED_STR: results.append((s, ActorDiedSignal())) else: # Now it gets tricky: r[0] is the redis internal sequence # id signal_counters[ray.utils.hex_to_binary(task_id)] = r[0] # r[1] contains a list with elements (key, value), in our # case we only have one key "signal" and the value is the # signal. signal = cloudpickle.loads( ray.utils.hex_to_binary(r[1][1])) results.append((s, signal)) return results
0.000256
def GetValueByPath(self, path_segments): """Retrieves a plist value by path. Args: path_segments (list[str]): path segment strings relative to the root of the plist. Returns: object: The value of the key specified by the path or None. """ key = self.root_key for path_segment in path_segments: if isinstance(key, dict): try: key = key[path_segment] except KeyError: return None elif isinstance(key, list): try: list_index = int(path_segment, 10) except ValueError: return None key = key[list_index] else: return None if not key: return None return key
0.012363
def round(self, x): """Round the given value. @param x: to round @type x: numeric """ fraction, scaled_x, scale = self._get_fraction(x) if fraction < self.minimum_stochastic_distance or 1-fraction <self.minimum_stochastic_distance: result = round(x,self.precision) else: rounddown = fraction < self.random_generator.random() if rounddown: result = math.floor(scaled_x) / scale else: result = math.ceil(scaled_x) / scale self._record_roundoff_error(x, result) return result
0.010972
def delegate(attribute_name, method_names): """Pass the call to the attribute called attribute_name for every method listed in method_names.""" # hack for python 2.7 as nonlocal is not available info = { 'attribute': attribute_name, 'methods': method_names } def decorator(cls): """Decorate class.""" attribute = info['attribute'] if attribute.startswith("__"): attribute = "_" + cls.__name__ + attribute for name in info['methods']: setattr(cls, name, eval("lambda self, *a, **kw: " "self.{0}.{1}(*a, **kw)".format(attribute, name))) return cls return decorator
0.004267
def design(n, spacing, shift, fI, fC=False, r=None, r_def=(1, 1, 2), reim=None, cvar='amp', error=0.01, name=None, full_output=False, finish=False, save=True, path='filters', verb=2, plot=1): r"""Digital linear filter (DLF) design This routine can be used to design digital linear filters for the Hankel or Fourier transform, or for any linear transform ([Ghos70]_). For this included or provided theoretical transform pairs can be used. Alternatively, one can use the EM modeller empymod to use the responses to an arbitrary 1D model as numerical transform pair. This filter designing tool uses the direct matrix inversion method as described in [Kong07]_ and is based on scripts by [Key12]_. The tool is an add-on to the electromagnetic modeller empymod [Wert17]_. Fruitful discussions with Evert Slob and Kerry Key improved the add-on substantially. Example notebooks of its usage can be found in the repo `github.com/empymod/empymod-examples <https://github.com/empymod/empymod-examples>`_. Parameters ---------- n : int Filter length. spacing: float or tuple (start, stop, num) Spacing between filter points. If tuple, it corresponds to the input for np.linspace with endpoint=True. shift: float or tuple (start, stop, num) Shift of base from zero. If tuple, it corresponds to the input for np.linspace with endpoint=True. fI, fC : transform pairs Theoretical or numerical transform pair(s) for the inversion (I) and for the check of goodness (fC). fC is optional. If not provided, fI is used for both fI and fC. r : array, optional Right-hand side evaluation points for the check of goodness (fC). Defaults to r = np.logspace(0, 5, 1000), which are a lot of evaluation points, and depending on the transform pair way too long r's. r_def : tuple (add_left, add_right, factor), optional Definition of the right-hand side evaluation points r of the inversion. r is derived from the base values, default is (1, 1, 2). - rmin = log10(1/max(base)) - add_left - rmax = log10(1/min(base)) + add_right - r = logspace(rmin, rmax, factor*n) reim : np.real or np.imag, optional Which part of complex transform pairs is used for the inversion. Defaults to np.real. cvar : string {'amp', 'r'}, optional If 'amp', the inversion minimizes the amplitude. If 'r', the inversion maximizes the right-hand side evaluation point r. Defaults is 'amp'. error : float, optional Up to which relative error the transformation is considered good in the evaluation of the goodness. Default is 0.01 (1 %). name : str, optional Name of the filter. Defaults to dlf_+str(n). full_output : bool, optional If True, returns best filter and output from scipy.optimize.brute; else only filter. Default is False. finish : None, True, or callable, optional If callable, it is passed through to scipy.optimize.brute: minimization function to find minimize best result from brute-force approach. Default is None. You can simply provide True in order to use scipy.optimize.fmin_powell(). Set this to None if you are only interested in the actually provided spacing/shift-values. save : bool, optional If True, best filter is saved to plain text files in ./filters/. Can be loaded with fdesign.load_filter(name). If full, the inversion output is stored too. You can add '.gz' to `name`, which will then save the full inversion output in a compressed file instead of plain text. path : string, optional Absolute or relative path where output will be saved if `save=True`. Default is 'filters'. verb : {0, 1, 2}, optional Level of verbosity, default is 2: - 0: Print nothing. - 1: Print warnings. - 2: Print additional time, progress, and result plot : {0, 1, 2, 3}, optional Level of plot-verbosity, default is 1: - 0: Plot nothing. - 1: Plot brute-force result - 2: Plot additional theoretical transform pairs, and best inv. - 3: Plot additional inversion result (can result in lots of plots depending on spacing and shift) If you are using a notebook, use %matplotlib notebook to have all inversion results appear in the same plot. Returns ------- filter : empymod.filter.DigitalFilter instance Best filter for the input parameters. full : tuple Output from scipy.optimize.brute with full_output=True. (Returned when ``full_output`` is True.) """ # === 1. LET'S START ============ t0 = printstartfinish(verb) # Check plot with matplotlib (soft dependency) if plot > 0 and not plt: plot = 0 if verb > 0: print(plt_msg) # Ensure fI, fC are lists def check_f(f): if hasattr(f, 'name'): # put into list if single tp f = [f, ] else: # ensure list (works for lists, tuples, arrays) f = list(f) return f if not fC: # copy fI if fC not provided fC = dc(fI) fI = check_f(fI) if fI[0].name == 'j2': print("* ERROR :: j2 (jointly j0 and j1) is only implemented for " + "fC, not for fI!") raise ValueError('j2') fC = check_f(fC) # Check default input values if finish and not callable(finish): finish = fmin_powell if name is None: name = 'dlf_'+str(n) if r is None: r = np.logspace(0, 5, 1000) if reim not in [np.real, np.imag]: reim = np.real # Get spacing and shift slices, cast r ispacing = _ls2ar(spacing, 'spacing') ishift = _ls2ar(shift, 'shift') r = np.atleast_1d(r) # Initialize log-dict to keep track in brute-force minimization-function. log = {'cnt1': -1, # Counter 'cnt2': -1, # %-counter; v Total number of iterations v 'totnr': np.arange(*ispacing).size*np.arange(*ishift).size, 'time': t0, # Timer 'warn-r': 0} # Warning for short r # === 2. THEORETICAL MODEL rhs ============ # Calculate rhs for i, f in enumerate(fC): fC[i].rhs = f.rhs(r) # Plot if plot > 1: _call_qc_transform_pairs(n, ispacing, ishift, fI, fC, r, r_def, reim) # === 3. RUN BRUTE FORCE OVER THE GRID ============ full = brute(_get_min_val, (ispacing, ishift), full_output=True, args=(n, fI, fC, r, r_def, error, reim, cvar, verb, plot, log), finish=finish) # Add cvar-information to full: 0 for 'amp', 1 for 'r' if cvar == 'r': full += (1, ) else: full += (0, ) # Finish output from brute/fmin; depending if finish or not if verb > 1: print('') if callable(finish): print('') # Get best filter (full[0] contains spacing/shift of the best result). dlf = _calculate_filter(n, full[0][0], full[0][1], fI, r_def, reim, name) # If verbose, print result if verb > 1: print_result(dlf, full) # === 4. FINISHED ============ printstartfinish(verb, t0) # If plot, show result if plot > 0: print('* QC: Overview of brute-force inversion:') plot_result(dlf, full, False) if plot > 1: print('* QC: Inversion result of best filter (minimum amplitude):') _get_min_val(full[0], n, fI, fC, r, r_def, error, reim, cvar, 0, plot+1, log) # Save if desired if save: if full_output: save_filter(name, dlf, full, path=path) else: save_filter(name, dlf, path=path) # Output, depending on full_output if full_output: return dlf, full else: return dlf
0.000124
def get_imports(self, volume=None, state=None, offset=None, limit=None): """ Fetches imports for this project. :param volume: Optional volume identifier. :param state: Optional state. :param offset: Pagination offset. :param limit: Pagination limit. :return: Collection object. """ return self._api.imports.query(project=self.id, volume=volume, state=state, offset=offset, limit=limit)
0.004032
def _handle_commit_error(self, failure, retry_delay, attempt): """ Retry the commit request, depending on failure type Depending on the type of the failure, we retry the commit request with the latest processed offset, or callback/errback self._commit_ds """ # Check if we are stopping and the request was cancelled if self._stopping and failure.check(CancelledError): # Not really an error return self._deliver_commit_result(self._last_committed_offset) # Check that the failure type is a Kafka error...this could maybe be # a tighter check to determine whether a retry will succeed... if not failure.check(KafkaError): log.error("Unhandleable failure during commit attempt: %r\n\t%r", failure, failure.getBriefTraceback()) return self._deliver_commit_result(failure) # Do we need to abort? if (self.request_retry_max_attempts != 0 and attempt >= self.request_retry_max_attempts): log.debug("%r: Exhausted attempts: %d to commit offset: %r", self, self.request_retry_max_attempts, failure) return self._deliver_commit_result(failure) # Check the retry_delay to see if we should log at the higher level # Using attempts % 2 gets us 1-warn/minute with defaults timings if retry_delay < self.retry_max_delay or 0 == (attempt % 2): log.debug("%r: Failure committing offset to kafka: %r", self, failure) else: # We've retried until we hit the max delay, log alternately at warn log.warning("%r: Still failing committing offset to kafka: %r", self, failure) # Schedule a delayed call to retry the commit retry_delay = min(retry_delay * REQUEST_RETRY_FACTOR, self.retry_max_delay) self._commit_call = self.client.reactor.callLater( retry_delay, self._send_commit_request, retry_delay, attempt + 1)
0.000956
def pendulum(): """Configuration for the pendulum classic control task.""" locals().update(default()) # Environment env = 'Pendulum-v0' max_length = 200 steps = 1e6 # 1M # Optimization batch_size = 20 chunk_length = 50 return locals()
0.043137
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None, origclass = None): """Returns a RelaxNG definition for this element (as an XML element (lxml.etree) rather than a string)""" E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace",'a':"http://relaxng.org/ns/annotation/0.9" }) if not extraattribs: extraattribs = [] extraattribs.append(E.optional(E.attribute(E.text(), name='set')) ) return AbstractElement.relaxng(includechildren, extraattribs, extraelements, cls)
0.027536
def load_external_data_for_model(model, base_dir): # type: (ModelProto, Text) -> None """ Loads external tensors into model @params model: ModelProto to load external data to base_dir: directory that contains external data """ for tensor in _get_all_tensors(model): if uses_external_data(tensor): load_external_data_for_tensor(tensor, base_dir)
0.005076
def make(cls, vol): """ Convert uuid to Volume, if necessary. """ if isinstance(vol, cls): return vol elif vol is None: return None else: return cls(vol, None)
0.008811
def clean(self, value): """Cleans and returns the given value, or raises a ParameterNotValidError exception""" if isinstance(value, numpy.ndarray): return value elif isinstance(value, (list, tuple)): return numpy.array(value) raise ParameterNotValidError
0.009615
def change_logger_levels(logger=None, level=logging.DEBUG): """ Go through the logger and handlers and update their levels to the one specified. :param logger: logging name or object to modify, defaults to root logger :param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error) """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.setLevel(level) for handler in logger.handlers: handler.level = level
0.003945
def _convert_oauth2_credentials(credentials): """Converts to :class:`google.oauth2.credentials.Credentials`. Args: credentials (Union[oauth2client.client.OAuth2Credentials, oauth2client.client.GoogleCredentials]): The credentials to convert. Returns: google.oauth2.credentials.Credentials: The converted credentials. """ new_credentials = google.oauth2.credentials.Credentials( token=credentials.access_token, refresh_token=credentials.refresh_token, token_uri=credentials.token_uri, client_id=credentials.client_id, client_secret=credentials.client_secret, scopes=credentials.scopes) new_credentials._expires = credentials.token_expiry return new_credentials
0.001284
def standardize_polygons_str(data_str): """Given a POLYGON string, standardize the coordinates to a 1x1 grid. Input : data_str (taken from above) Output: tuple of polygon objects """ # find all of the polygons in the letter (for instance an A # needs to be constructed from 2 polygons) path_strs = re.findall("\(\(([^\)]+?)\)\)", data_str.strip()) # convert the data into a numpy array polygons_data = [] for path_str in path_strs: data = np.array([ tuple(map(float, x.split())) for x in path_str.strip().split(",")]) polygons_data.append(data) # standardize the coordinates min_coords = np.vstack(data.min(0) for data in polygons_data).min(0) max_coords = np.vstack(data.max(0) for data in polygons_data).max(0) for data in polygons_data: data[:, ] -= min_coords data[:, ] /= (max_coords - min_coords) polygons = [] for data in polygons_data: polygons.append(load_wkt( "POLYGON((%s))" % ",".join(" ".join(map(str, x)) for x in data))) return tuple(polygons)
0.005484
def run(self): """print .new configuration files """ self.find_new() for n in self.news: print("{0}".format(n)) print("") self.msg.template(78) print("| Installed {0} new configuration files:".format( len(self.news))) self.msg.template(78) self.choices()
0.005714
def list_images(self): """ Return the list of available images for this node type :returns: Array of hash """ try: return list_images(self._NODE_TYPE) except OSError as e: raise aiohttp.web.HTTPConflict(text="Can not list images {}".format(e))
0.009464
def tile_x_size(self, zoom): """ Width of a tile in SRID units at zoom level. - zoom: zoom level """ warnings.warn(DeprecationWarning("tile_x_size is deprecated")) validate_zoom(zoom) return round(self.x_size / self.matrix_width(zoom), ROUND)
0.006689
def share(self, group_id, group_access, expires_at=None, **kwargs): """Share the project with a group. Args: group_id (int): ID of the group. group_access (int): Access level for the group. **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server failed to perform the request """ path = '/projects/%s/share' % self.get_id() data = {'group_id': group_id, 'group_access': group_access, 'expires_at': expires_at} self.manager.gitlab.http_post(path, post_data=data, **kwargs)
0.002762
def check_ranges(cls, ranges, length): """Removes errored ranges""" result = [] for start, end in ranges: if isinstance(start, int) or isinstance(end, int): if isinstance(start, int) and not (0 <= start < length): continue elif isinstance(start, int) and isinstance(end, int) and not (start <= end): continue elif start is None and end == 0: continue result.append( (start,end) ) return result
0.010695
def plot_obsseries(self, **kwargs: Any) -> None: """Plot the |IOSequence.series| of the |Obs| sequence object. See method |Node.plot_allseries| for further information. """ self.__plot_series([self.sequences.obs], kwargs)
0.007874
def set_trace(host=None, port=None, patch_stdstreams=False): """ Opens a remote PDB on first available port. """ if host is None: host = os.environ.get('REMOTE_PDB_HOST', '127.0.0.1') if port is None: port = int(os.environ.get('REMOTE_PDB_PORT', '0')) rdb = RemotePdb(host=host, port=port, patch_stdstreams=patch_stdstreams) rdb.set_trace(frame=sys._getframe().f_back)
0.002427
def _populate_trie(self, values: List[str]) -> CharTrie: """Takes a list and inserts its elements into a new trie and returns it""" if self._default_tokenizer: return reduce(self._populate_trie_reducer, iter(values), CharTrie()) return reduce(self._populate_trie_reducer_regex, iter(values), CharTrie())
0.014749
def pan_delta(self, dx_px, dy_px): """ This causes the scene to appear to translate right and up (i.e., what really happens is the camera is translated left and down). This is also called "panning" in some software packages. Passing in negative delta values causes the opposite motion. """ direction = self.target - self.position distance_from_target = direction.length() direction = direction.normalized() speed_per_radius = self.get_translation_speed(distance_from_target) px_per_unit = self.vport_radius_px / speed_per_radius right = direction ^ self.up translation = (right * (-dx_px / px_per_unit) + self.up * (-dy_px / px_per_unit)) self.position = self.position + translation self.target = self.target + translation
0.002301
def irange(self, minimum=None, maximum=None, inclusive=(True, True), reverse=False): """ Create an iterator of values between `minimum` and `maximum`. `inclusive` is a pair of booleans that indicates whether the minimum and maximum ought to be included in the range, respectively. The default is (True, True) such that the range is inclusive of both minimum and maximum. Both `minimum` and `maximum` default to `None` which is automatically inclusive of the start and end of the list, respectively. When `reverse` is `True` the values are yielded from the iterator in reverse order; `reverse` defaults to `False`. """ minimum = self._key(minimum) if minimum is not None else None maximum = self._key(maximum) if maximum is not None else None return self.irange_key( min_key=minimum, max_key=maximum, inclusive=inclusive, reverse=reverse, )
0.002997
def _draw(self, txt, final=False): """Print the rendered string to the stdout.""" if not self._file_mode: # If the user presses Ctrl+C this ensures we still start writing from the beginning of the line sys.stdout.write("\r") sys.stdout.write(txt) if final and not isinstance(self._widget, _HiddenWidget): sys.stdout.write("\n") else: if not self._file_mode: sys.stdout.write("\r") sys.stdout.flush()
0.005825
def get_zone_info(cls, area_str, match_type='EXACT', result_type='LIST'): """ 输入包含省份、城市、地区信息的内容,返回地区编号; :param: * area_str: (string) 要查询的区域,省份、城市、地区信息,比如 北京市 * match_type: (string) 查询匹配模式,默认值 'EXACT',表示精确匹配,可选 'FUZZY',表示模糊查询 * result_type: (string) 返回结果数量类型,默认值 'LIST',表示返回列表,可选 'SINGLE_STR',返回结果的第一个地区编号字符串 :returns: * 返回类型 根据 resule_type 决定返回类型是列表或者单一字符串,列表中包含元组 比如:[('110000', '北京市')],元组中的第一个元素是地区码, 第二个元素是对应的区域内容 结果最多返回 20 个。 举例如下:: from fishbase.fish_data import * print('--- fish_data get_zone_info demo ---') result = IdCard.get_zone_info(area_str='北京市') print(result) # 模糊查询 result = IdCard.get_zone_info(area_str='西安市', match_type='FUZZY') print(result) result0 = [] for i in result: result0.append(i[0]) print('---西安市---') print(len(result0)) print(result0) # 模糊查询, 结果返回设定 single_str result = IdCard.get_zone_info(area_str='西安市', match_type='FUZZY', result_type='SINGLE_STR') print(result) # 模糊查询, 结果返回设定 single_str,西安市 和 西安 的差别 result = IdCard.get_zone_info(area_str='西安', match_type='FUZZY', result_type='SINGLE_STR') print(result) print('---') 输出结果:: --- fish_data get_zone_info demo --- [('110000', '北京市')] 130522198407316471 True ---西安市--- 11 ['610100', '610101', '610102', '610103', '610104', '610111', '610112', '610113', '610114', '610115', '610116'] 610100 220403 --- """ values = [] if match_type == 'EXACT': values = sqlite_query('fish_data.sqlite', 'select zone, areanote from cn_idcard where areanote = :area', {"area": area_str}) if match_type == 'FUZZY': values = sqlite_query('fish_data.sqlite', 'select zone, areanote from cn_idcard where areanote like :area', {"area": '%' + area_str + '%'}) # result_type 结果数量判断处理 if result_type == 'LIST': # 如果返回记录多,大于 20 项,只返回前面 20 个结果 if len(values) > 20: values = values[0:20] return values if result_type == 'SINGLE_STR': if len(values) == 0: return '' if len(values) > 0: value_str = values[0][0] return value_str
0.003357
def check_for_partition(self, schema, table, partition): """ Checks whether a partition exists :param schema: Name of hive schema (database) @table belongs to :type schema: str :param table: Name of hive table @partition belongs to :type schema: str :partition: Expression that matches the partitions to check for (eg `a = 'b' AND c = 'd'`) :type schema: str :rtype: bool >>> hh = HiveMetastoreHook() >>> t = 'static_babynames_partitioned' >>> hh.check_for_partition('airflow', t, "ds='2015-01-01'") True """ with self.metastore as client: partitions = client.get_partitions_by_filter( schema, table, partition, 1) if partitions: return True else: return False
0.002317
def str_2_obj(obj_str, tg_type=None): """Converts a string into an object according to the given tango type :param obj_str: the string to be converted :type obj_str: :py:obj:`str` :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :return: an object calculated from the given string :rtype: :py:obj:`object` """ if tg_type is None: return obj_str f = str if is_scalar_type(tg_type): if is_numerical_type(tg_type): if obj_str in __NO_STR_VALUE: return None if is_int_type(tg_type): f = int elif is_float_type(tg_type): f = float elif is_bool_type(tg_type): f = bool_ return f(obj_str)
0.001258
def get_package_counts(package_descriptors, targets, repos_data): """ Get the number of packages per target and repository. :return: a dict indexed by targets containing a list of integer values (one for each repo) """ counts = {} for target in targets: counts[target] = [0] * len(repos_data) for package_descriptor in package_descriptors.values(): debian_pkg_name = package_descriptor.debian_pkg_name for target in targets: for i, repo_data in enumerate(repos_data): version = repo_data.get(target, {}).get(debian_pkg_name, None) if version: counts[target][i] += 1 return counts
0.001414
def _get_project(msg, key='project'): ''' Return the project as `foo` or `user/foo` if the project is a fork. ''' project = msg[key]['name'] ns = msg[key].get('namespace') if ns: project = '/'.join([ns, project]) if msg[key]['parent']: user = msg[key]['user']['name'] project = '/'.join(['fork', user, project]) return project
0.002618
def _extract_packages(self): """ Extract a package in a new directory. """ if not hasattr(self, "retrieved_packages_unpacked"): self.retrieved_packages_unpacked = [self.package_name] for path in self.retrieved_packages_unpacked: package_name = basename(path) self.path_unpacked = join(CFG_UNPACKED_FILES, package_name.split('.')[0]) self.logger.debug("Extracting package: %s" % (path.split("/")[-1],)) try: if "_archival_pdf" in self.path_unpacked: self.path_unpacked = (self.path_unpacked .rstrip("_archival_pdf")) ZipFile(path).extractall(join(self.path_unpacked, "archival_pdfs")) else: ZipFile(path).extractall(self.path_unpacked) #TarFile.open(path).extractall(self.path_unpacked) except Exception: register_exception(alert_admin=True, prefix="OUP error extracting package.") self.logger.error("Error extraction package file: %s" % (path,)) if hasattr(self, "path_unpacked"): return self.path_unpacked
0.002131
def v_magnitude(v): """ Simple vector helper function returning the length of a vector. ``v`` may be any vector, with any number of dimensions """ return math.sqrt(sum(v[i]*v[i] for i in range(len(v))))
0.008811
def get_breadcrumbs(url, request=None): """ Given a url returns a list of breadcrumbs, which are each a tuple of (name, url). """ from wave.reverse import preserve_builtin_query_params from wave.settings import api_settings from wave.views import APIView view_name_func = api_settings.VIEW_NAME_FUNCTION def breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen): """ Add tuples of (name, url) to the breadcrumbs list, progressively chomping off parts of the url. """ try: (view, unused_args, unused_kwargs) = resolve(url) except Exception: pass else: # Check if this is a REST framework view, # and if so add it to the breadcrumbs cls = getattr(view, 'cls', None) if cls is not None and issubclass(cls, APIView): # Don't list the same view twice in a row. # Probably an optional trailing slash. if not seen or seen[-1] != view: suffix = getattr(view, 'suffix', None) name = view_name_func(cls, suffix) insert_url = preserve_builtin_query_params(prefix + url, request) breadcrumbs_list.insert(0, (name, insert_url)) seen.append(view) if url == '': # All done return breadcrumbs_list elif url.endswith('/'): # Drop trailing slash off the end and continue to try to # resolve more breadcrumbs url = url.rstrip('/') return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen) # Drop trailing non-slash off the end and continue to try to # resolve more breadcrumbs url = url[:url.rfind('/') + 1] return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen) prefix = get_script_prefix().rstrip('/') url = url[len(prefix):] return breadcrumbs_recursive(url, [], prefix, [])
0.000984
def fetch_ticker(self) -> Ticker: """Fetch the market ticker.""" return self._fetch('ticker', self.market.code)(self._ticker)()
0.013986
def get_hosting_device_driver(self, context, id): """Returns device driver for hosting device template with <id>.""" if id is None: return try: return self._hosting_device_drivers[id] except KeyError: try: template = self._get_hosting_device_template(context, id) self._hosting_device_drivers[id] = importutils.import_object( template['device_driver']) except (ImportError, TypeError, n_exc.NeutronException): LOG.exception("Error loading hosting device driver for " "hosting device template %s", id) return self._hosting_device_drivers.get(id)
0.002725
def list_records(self, file_const=None): """Iterate through the file records""" for r in self._dataset.files: if file_const and r.minor_type != file_const: continue yield self.instance_from_name(r.path)
0.007752
async def release(data): """ Release a session :param data: Information obtained from a POST request. The content type is application/json. The correct packet form should be as follows: { 'token': UUID token from current session start 'command': 'release' } """ global session if not feature_flags.use_protocol_api_v2(): session.adapter.remove_instrument('left') session.adapter.remove_instrument('right') else: session.adapter.cache_instruments() session = None return web.json_response({"message": "calibration session released"})
0.00161
def bottleneck_layer(inputs, hparams, name="discrete_bottleneck"): """Computes latents given inputs (typically, compressed targets).""" [ latents_dense, latents_discrete, extra_loss, embed_fn, _, ] = hparams.bottleneck(inputs=inputs, filter_size=hparams.compress_filter_size, name=name, mode=hparams.mode) if DO_SUMMARIES: tf.summary.histogram("discrete_latents", tf.reshape(latents_discrete, [-1])) return latents_dense, latents_discrete, extra_loss, embed_fn
0.00774
def from_hertz(self, hertz, standard_pitch=440): """Set the Note name and pitch, calculated from the hertz value. The standard_pitch argument can be used to set the pitch of A-4, from which the rest is calculated. """ value = ((log((float(hertz) * 1024) / standard_pitch, 2) + 1.0 / 24) * 12 + 9) # notes.note_to_int("A") self.name = notes.int_to_note(int(value) % 12) self.octave = int(value / 12) - 6 return self
0.006098
def ShowWindow(handle: int, cmdShow: int) -> bool: """ ShowWindow from Win32. handle: int, the handle of a native window. cmdShow: int, a value in clas `SW`. Return bool, True if succeed otherwise False. """ return ctypes.windll.user32.ShowWindow(ctypes.c_void_p(handle), cmdShow)
0.003247
def PushAttributeContainer(self, serialized_data): """Pushes a serialized attribute container onto the list. Args: serialized_data (bytes): serialized attribute container data. """ self._list.append(serialized_data) self.data_size += len(serialized_data) self.next_sequence_number += 1
0.003165
def get_dataset(self, dataset): """ Checks to see if the dataset is present. If not, it downloads and unzips it. """ # If the dataset is present, no need to download anything. success = True dataset_path = self.base_dataset_path + dataset if not isdir(dataset_path): # Try 5 times to download. The download page is unreliable, so we need a few tries. was_error = False for iteration in range(5): # Guard against trying again if successful if iteration == 0 or was_error is True: zip_path = dataset_path + ".zip" # Download zip files if they're not there if not isfile(zip_path): try: with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset) as pbar: urlretrieve(self.datasets[dataset]["url"], zip_path, pbar.hook) except Exception as ex: print("Error downloading %s: %s" % (dataset, ex)) was_error = True # Unzip the data files if not isdir(dataset_path): try: with zipfile.ZipFile(zip_path) as zip_archive: zip_archive.extractall(path=dataset_path) zip_archive.close() except Exception as ex: print("Error unzipping %s: %s" % (zip_path, ex)) # Usually the error is caused by a bad zip file. # Delete it so the program will try to download it again. try: remove(zip_path) except FileNotFoundError: pass was_error = True if was_error: print("\nThis recognizer is trained by the CASIA handwriting database.") print("If the download doesn't work, you can get the files at %s" % self.datasets[dataset]["url"]) print("If you have download problems, " "wget may be effective at downloading because of download resuming.") success = False return success
0.004115
def unicode_is_ascii(u_string): """Determine if unicode string only contains ASCII characters. :param str u_string: unicode string to check. Must be unicode and not Python 2 `str`. :rtype: bool """ assert isinstance(u_string, str) try: u_string.encode('ascii') return True except UnicodeEncodeError: return False
0.002681
def get_extra_path(name): """ :param name: name in format helper.path_name sip.default_sip_dir """ # Paths are cached in path_cache helper_name, _, key = name.partition(".") helper = path_helpers.get(helper_name) if not helper: raise ValueError("Helper '{0}' not found.".format(helper)) if name not in path_cache: extra_paths = helper.extra_paths() path_cache.update(extra_paths) extra_path = path_cache.get(name) if not extra_path: raise ValueError("Helper '{0}' has no path called {1}".format(helper_name, name)) return extra_path
0.003263
def run_once(self): '''comsume queues and feed tasks to fetcher, once''' self._update_projects() self._check_task_done() self._check_request() while self._check_cronjob(): pass self._check_select() self._check_delete() self._try_dump_cnt()
0.006329
def create_volume(DryRun=None, Size=None, SnapshotId=None, AvailabilityZone=None, VolumeType=None, Iops=None, Encrypted=None, KmsKeyId=None, TagSpecifications=None): """ Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints . You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume. You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide . You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources . For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide . See also: AWS API Documentation Examples This example creates an 80 GiB General Purpose (SSD) volume in the Availability Zone us-east-1a. Expected Output: This example creates a new Provisioned IOPS (SSD) volume with 1000 provisioned IOPS from a snapshot in the Availability Zone us-east-1a. Expected Output: :example: response = client.create_volume( DryRun=True|False, Size=123, SnapshotId='string', AvailabilityZone='string', VolumeType='standard'|'io1'|'gp2'|'sc1'|'st1', Iops=123, Encrypted=True|False, KmsKeyId='string', TagSpecifications=[ { 'ResourceType': 'customer-gateway'|'dhcp-options'|'image'|'instance'|'internet-gateway'|'network-acl'|'network-interface'|'reserved-instances'|'route-table'|'snapshot'|'spot-instances-request'|'subnet'|'security-group'|'volume'|'vpc'|'vpn-connection'|'vpn-gateway', 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ] }, ] ) :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation . Otherwise, it is UnauthorizedOperation . :type Size: integer :param Size: The size of the volume, in GiBs. Constraints: 1-16384 for gp2 , 4-16384 for io1 , 500-16384 for st1 , 500-16384 for sc1 , and 1-1024 for standard . If you specify a snapshot, the volume size must be equal to or larger than the snapshot size. Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. :type SnapshotId: string :param SnapshotId: The snapshot from which to create the volume. :type AvailabilityZone: string :param AvailabilityZone: [REQUIRED] The Availability Zone in which to create the volume. Use DescribeAvailabilityZones to list the Availability Zones that are currently available to you. :type VolumeType: string :param VolumeType: The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes. Default: standard :type Iops: integer :param Iops: Only valid for Provisioned IOPS SSD volumes. The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Constraint: Range is 100 to 20000 for Provisioned IOPS SSD volumes :type Encrypted: boolean :param Encrypted: Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or vice versa. If your AMI uses encrypted volumes, you can only launch it on supported instance types. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide . :type KmsKeyId: string :param KmsKeyId: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1 :012345678910 :key/abcd1234-a123-456a-a12b-a123b4cd56ef . If a KmsKeyId is specified, the Encrypted flag must also be set. :type TagSpecifications: list :param TagSpecifications: The tags to apply to the volume during creation. (dict) --The tags to apply to a resource when the resource is being created. ResourceType (string) --The type of resource to tag. Currently, the resource types that support tagging on creation are instance and volume . Tags (list) --The tags to apply to the resource. (dict) --Describes a tag. Key (string) --The key of the tag. Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws: Value (string) --The value of the tag. Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters. :rtype: dict :return: { 'VolumeId': 'string', 'Size': 123, 'SnapshotId': 'string', 'AvailabilityZone': 'string', 'State': 'creating'|'available'|'in-use'|'deleting'|'deleted'|'error', 'CreateTime': datetime(2015, 1, 1), 'Attachments': [ { 'VolumeId': 'string', 'InstanceId': 'string', 'Device': 'string', 'State': 'attaching'|'attached'|'detaching'|'detached', 'AttachTime': datetime(2015, 1, 1), 'DeleteOnTermination': True|False }, ], 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ], 'VolumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1', 'Iops': 123, 'Encrypted': True|False, 'KmsKeyId': 'string' } """ pass
0.005098
def check_args(args): """Checks the arguments and options.""" # Checking that only VCF can have a - (stdout) as output if args.output_format not in _streamable_format and args.output == "-": logger.error("{} format cannot be streamed to standard output" "".format(args.output_format)) sys.exit(1) # Checking the file extensions if args.output_format == "vcf" and args.output != "-": if not args.output.endswith(".vcf"): args.output += ".vcf" elif args.output_format == "plink": if args.output.endswith(".bed"): args.output = args.output[:-4]
0.001553
def get_release_definitions(self, project, search_text=None, expand=None, artifact_type=None, artifact_source_id=None, top=None, continuation_token=None, query_order=None, path=None, is_exact_name_match=None, tag_filter=None, property_filters=None, definition_id_filter=None, is_deleted=None, search_text_contains_folder_name=None): """GetReleaseDefinitions. [Preview API] Get a list of release definitions. :param str project: Project ID or project name :param str search_text: Get release definitions with names containing searchText. :param str expand: The properties that should be expanded in the list of Release definitions. :param str artifact_type: Release definitions with given artifactType will be returned. Values can be Build, Jenkins, GitHub, Nuget, Team Build (external), ExternalTFSBuild, Git, TFVC, ExternalTfsXamlBuild. :param str artifact_source_id: Release definitions with given artifactSourceId will be returned. e.g. For build it would be {projectGuid}:{BuildDefinitionId}, for Jenkins it would be {JenkinsConnectionId}:{JenkinsDefinitionId}, for TfsOnPrem it would be {TfsOnPremConnectionId}:{ProjectName}:{TfsOnPremDefinitionId}. For third-party artifacts e.g. TeamCity, BitBucket you may refer 'uniqueSourceIdentifier' inside vss-extension.json at https://github.com/Microsoft/vsts-rm-extensions/blob/master/Extensions. :param int top: Number of release definitions to get. :param str continuation_token: Gets the release definitions after the continuation token provided. :param str query_order: Gets the results in the defined order. Default is 'IdAscending'. :param str path: Gets the release definitions under the specified path. :param bool is_exact_name_match: 'true'to gets the release definitions with exact match as specified in searchText. Default is 'false'. :param [str] tag_filter: A comma-delimited list of tags. Only release definitions with these tags will be returned. :param [str] property_filters: A comma-delimited list of extended properties to be retrieved. If set, the returned Release Definitions will contain values for the specified property Ids (if they exist). If not set, properties will not be included. Note that this will not filter out any Release Definition from results irrespective of whether it has property set or not. :param [str] definition_id_filter: A comma-delimited list of release definitions to retrieve. :param bool is_deleted: 'true' to get release definitions that has been deleted. Default is 'false' :param bool search_text_contains_folder_name: 'true' to get the release definitions under the folder with name as specified in searchText. Default is 'false'. :rtype: [ReleaseDefinition] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if search_text is not None: query_parameters['searchText'] = self._serialize.query('search_text', search_text, 'str') if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') if artifact_type is not None: query_parameters['artifactType'] = self._serialize.query('artifact_type', artifact_type, 'str') if artifact_source_id is not None: query_parameters['artifactSourceId'] = self._serialize.query('artifact_source_id', artifact_source_id, 'str') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if query_order is not None: query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str') if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') if is_exact_name_match is not None: query_parameters['isExactNameMatch'] = self._serialize.query('is_exact_name_match', is_exact_name_match, 'bool') if tag_filter is not None: tag_filter = ",".join(tag_filter) query_parameters['tagFilter'] = self._serialize.query('tag_filter', tag_filter, 'str') if property_filters is not None: property_filters = ",".join(property_filters) query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str') if definition_id_filter is not None: definition_id_filter = ",".join(definition_id_filter) query_parameters['definitionIdFilter'] = self._serialize.query('definition_id_filter', definition_id_filter, 'str') if is_deleted is not None: query_parameters['isDeleted'] = self._serialize.query('is_deleted', is_deleted, 'bool') if search_text_contains_folder_name is not None: query_parameters['searchTextContainsFolderName'] = self._serialize.query('search_text_contains_folder_name', search_text_contains_folder_name, 'bool') response = self._send(http_method='GET', location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665', version='5.1-preview.3', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[ReleaseDefinition]', self._unwrap_collection(response))
0.005495
def get_parent_dir(name): """Get the parent directory of a filename.""" parent_dir = os.path.dirname(os.path.dirname(name)) if parent_dir: return parent_dir return os.path.abspath('.')
0.004808
def finditem(self, item, threshold=None): """Return most similar item to the provided one, or None if nothing exceeds the threshold. >>> from ngram import NGram >>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")], ... key=lambda x:x[1].lower()) >>> n.finditem((3, 'Hom')) (1, 'Ham') >>> n.finditem((4, "Oggsy")) (2, 'Eggsy') >>> n.finditem((4, "Oggsy"), 0.8) """ results = self.searchitem(item, threshold) if results: return results[0][0] else: return None
0.003257
def failed(self, reason=None): """失败订单(未成功创建入broker) Arguments: reason {str} -- 失败原因 """ # 订单创建失败(如废单/场外废单/价格高于涨停价/价格低于跌停价/通讯失败) self._status = ORDER_STATUS.FAILED self.reason = str(reason)
0.008
def handle_delete_user(self, req): """Handles the DELETE v2/<account>/<user> call for deleting a user from an account. Can only be called by an account .admin. :param req: The swob.Request to process. :returns: swob.Response, 2xx on success. """ # Validate path info account = req.path_info_pop() user = req.path_info_pop() if req.path_info or not account or account[0] == '.' or not user or \ user[0] == '.': return HTTPBadRequest(request=req) # if user to be deleted is reseller_admin, then requesting # user must be the super_admin is_reseller_admin = self.is_user_reseller_admin(req, account, user) if not is_reseller_admin and not req.credentials_valid: # if user to be deleted can't be found, return 404 return HTTPNotFound(request=req) elif is_reseller_admin and not self.is_super_admin(req): return HTTPForbidden(request=req) if not self.is_account_admin(req, account): return self.denied_response(req) # Delete the user's existing token, if any. path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user)) resp = self.make_pre_authed_request( req.environ, 'HEAD', path).get_response(self.app) if resp.status_int == 404: return HTTPNotFound(request=req) elif resp.status_int // 100 != 2: raise Exception('Could not obtain user details: %s %s' % (path, resp.status)) candidate_token = resp.headers.get('x-object-meta-auth-token') if candidate_token: object_name = self._get_concealed_token(candidate_token) path = quote('/v1/%s/.token_%s/%s' % (self.auth_account, object_name[-1], object_name)) resp = self.make_pre_authed_request( req.environ, 'DELETE', path).get_response(self.app) if resp.status_int // 100 != 2 and resp.status_int != 404: raise Exception('Could not delete possibly existing token: ' '%s %s' % (path, resp.status)) # Delete the user entry itself. path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user)) resp = self.make_pre_authed_request( req.environ, 'DELETE', path).get_response(self.app) if resp.status_int // 100 != 2 and resp.status_int != 404: raise Exception('Could not delete the user object: %s %s' % (path, resp.status)) return HTTPNoContent(request=req)
0.001505
def class_get_trait_help(cls, trait, inst=None): """Get the help string for a single trait. If `inst` is given, it's current trait values will be used in place of the class default. """ assert inst is None or isinstance(inst, cls) lines = [] header = "--%s.%s=<%s>" % (cls.__name__, trait.name, trait.__class__.__name__) lines.append(header) if inst is not None: lines.append(indent('Current: %r' % getattr(inst, trait.name), 4)) else: try: dvr = repr(trait.get_default_value()) except Exception: dvr = None # ignore defaults we can't construct if dvr is not None: if len(dvr) > 64: dvr = dvr[:61]+'...' lines.append(indent('Default: %s' % dvr, 4)) if 'Enum' in trait.__class__.__name__: # include Enum choices lines.append(indent('Choices: %r' % (trait.values,))) help = trait.get_metadata('help') if help is not None: help = '\n'.join(wrap_paragraphs(help, 76)) lines.append(indent(help, 4)) return '\n'.join(lines)
0.004102
def _minutes_to_exclude(self): """ Calculate the minutes which should be excluded when a window occurs on days which had an early close, i.e. days where the close based on the regular period of minutes per day and the market close do not match. Returns ------- List of DatetimeIndex representing the minutes to exclude because of early closes. """ market_opens = self._market_opens.values.astype('datetime64[m]') market_closes = self._market_closes.values.astype('datetime64[m]') minutes_per_day = (market_closes - market_opens).astype(np.int64) early_indices = np.where( minutes_per_day != self._minutes_per_day - 1)[0] early_opens = self._market_opens[early_indices] early_closes = self._market_closes[early_indices] minutes = [(market_open, early_close) for market_open, early_close in zip(early_opens, early_closes)] return minutes
0.001942
def error(self): """Returns the error for this barrier and all work items, if any.""" # Copy the error from any failed item to be the error for the whole # barrier. The first error seen "wins". Also handles the case where # the WorkItems passed into the barrier have already completed and # been marked with errors. for item in self: if isinstance(item, WorkItem) and item.error: return item.error return None
0.004057
def set_subservice(self, obj): """Add a sub-service object. :param obj: stackinabox.services.StackInABoxService instance :raises: RouteAlreadyRegisteredError if the route is already registered :returns: n/a """ # ensure there is not already a sub-service if self.obj is not None: raise RouteAlreadyRegisteredError( 'Service Router ({0} - {1}): Route {2} already has a ' 'sub-service handler' .format(id(self), self.service_name, self.uri)) # warn if any methods are already registered if len(self.methods): logger.debug( 'WARNING: Service Router ({0} - {1}): Methods detected ' 'on Route {2}. Sub-Service {3} may be hidden.' .format(id(self), self.service_name, self.uri, obj.name)) # Ensure we do not have any circular references assert(obj != self.parent_obj) # if no errors, save the object and update the URI self.obj = obj self.obj.base_url = '{0}/{1}'.format(self.uri, self.service_name)
0.001771
def angle(array_of_xyzs): """ Calculates angle between three coordinate points (I could not find a package that does this but if one exists that would probably be better). Used for Angle constraints. """ ab = array_of_xyzs[0] - array_of_xyzs[1] cb = array_of_xyzs[2] - array_of_xyzs[1] return np.arccos((np.dot(ab,cb)) / (np.sqrt(ab[0]**2 + ab[1]**2 \ + ab[2]**2) * np.sqrt(cb[0]**2 + cb[1]**2 + cb[2]**2)))
0.01573
def key(self, att=None): """Returns the Redis key where the values are stored.""" if att is not None: return self._key[self.id][att] else: return self._key[self.id]
0.009434
def flip_strand(self): """Flips the strand of the alleles.""" self.reference = complement_alleles(self.reference) self.coded = complement_alleles(self.coded) self.variant.complement_alleles()
0.008969
def _atexit__register(self, func, *targs, **kwargs): """ Intercept :func:`atexit.register` calls, diverting any to :func:`shutil.rmtree` into a private list. """ if func == shutil.rmtree: self.deferred.append((func, targs, kwargs)) return self.original['register'](func, *targs, **kwargs)
0.00554
def do_escape_nl(self, arg): """ Escape newlines in any responses """ if arg.lower() == 'off': self.escape_nl = False else: self.escape_nl = True
0.009569
def load(cls, query_name): """Load a pre-made query. These queries are distributed with lsstprojectmeta. See :file:`lsstrojectmeta/data/githubv4/README.rst` inside the package repository for details on available queries. Parameters ---------- query_name : `str` Name of the query, such as ``'technote_repo'``. Returns ------- github_query : `GitHubQuery A GitHub query or mutation object that you can pass to `github_request` to execute the request itself. """ template_path = os.path.join( os.path.dirname(__file__), '../data/githubv4', query_name + '.graphql') with open(template_path) as f: query_data = f.read() return cls(query_data, name=query_name)
0.002336
def call_hook(message, attachment=None, color='good', short=False, identifier=None, channel=None, username=None, icon_emoji=None): ''' Send message to Slack incoming webhook. :param message: The topic of message. :param attachment: The message to send to the Slacke WebHook. :param color: The color of border of left side :param short: An optional flag indicating whether the value is short enough to be displayed side-by-side with other values. :param identifier: The identifier of WebHook. :param channel: The channel to use instead of the WebHook default. :param username: Username to use instead of WebHook default. :param icon_emoji: Icon to use instead of WebHook default. :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt '*' slack.call_hook message='Hello, from SaltStack' ''' base_url = 'https://hooks.slack.com/services/' if not identifier: identifier = _get_hook_id() url = _urljoin(base_url, identifier) if not message: log.error('message is required option') if attachment: payload = { 'attachments': [ { 'fallback': message, 'color': color, 'pretext': message, 'fields': [ { "value": attachment, "short": short, } ] } ] } else: payload = { 'text': message, } if channel: payload['channel'] = channel if username: payload['username'] = username if icon_emoji: payload['icon_emoji'] = icon_emoji data = _urlencode( { 'payload': salt.utils.json.dumps(payload) } ) result = salt.utils.http.query(url, method='POST', data=data, status=True) if result['status'] <= 201: return True else: return { 'res': False, 'message': result.get('body', result['status']) }
0.000431
def _do_scale(image, size): """Rescale the image by scaling the smaller spatial dimension to `size`.""" shape = tf.cast(tf.shape(image), tf.float32) w_greater = tf.greater(shape[0], shape[1]) shape = tf.cond(w_greater, lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) return tf.image.resize_bicubic([image], shape)[0]
0.018141
def as_field_error(node, secid): """ convert a fieldExceptions element to a FieldError or FieldError array """ assert node.Name == 'fieldExceptions' if node.IsArray: return [XmlHelper.as_field_error(node.GetValue(_), secid) for _ in range(node.NumValues)] else: fld = XmlHelper.get_child_value(node, 'fieldId') info = node.GetElement('errorInfo') src = XmlHelper.get_child_value(info, 'source') code = XmlHelper.get_child_value(info, 'code') cat = XmlHelper.get_child_value(info, 'category') msg = XmlHelper.get_child_value(info, 'message') subcat = XmlHelper.get_child_value(info, 'subcategory') return FieldError(security=secid, field=fld, source=src, code=code, category=cat, message=msg, subcategory=subcat)
0.005675
def plot_predict(self, h=5, past_values=20, intervals=True, **kwargs): """ Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? past_values : int (default : 20) How many past observations to show on the forecast graph? intervals : Boolean Would you like to show prediction intervals for the forecast? Returns ---------- - Plot of the forecast """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: # Retrieve data, dates and (transformed) latent variables sigma2, Y, scores = self._model(self.latent_variables.get_z_values()) date_index = self.shift_dates(h) if self.latent_variables.estimation_method in ['M-H']: sim_vector = self._sim_prediction_bayes(h, 15000) error_bars = [] for pre in range(5,100,5): error_bars.append(np.insert([np.percentile(i,pre) for i in sim_vector], 0, sigma2[-1])) forecasted_values = np.insert([np.mean(i) for i in sim_vector], 0, sigma2[-1]) plot_values = np.append(sigma2[-1-past_values:-2], forecasted_values) plot_index = date_index[-h-past_values:] else: t_z = self.transform_z() sim_values = self._sim_prediction(sigma2, Y, scores, h, t_z, 15000) error_bars, forecasted_values, plot_values, plot_index = self._summarize_simulations(sigma2, sim_values, date_index, h, past_values) plt.figure(figsize=figsize) if intervals == True: alpha =[0.15*i/float(100) for i in range(50,12,-2)] for count, pre in enumerate(error_bars): plt.fill_between(date_index[-h-1:], error_bars[count], error_bars[-count-1], alpha=alpha[count]) plt.plot(plot_index, plot_values) plt.title("Forecast for " + self.data_name + " Conditional Volatility") plt.xlabel("Time") plt.ylabel(self.data_name + " Conditional Volatility") plt.show()
0.009057
def convert(self, blob, size=500): """Size is the maximum horizontal size.""" file_list = [] with make_temp_file(blob) as in_fn, make_temp_file() as out_fn: try: subprocess.check_call(["pdftoppm", "-jpeg", in_fn, out_fn]) file_list = sorted(glob.glob(f"{out_fn}-*.jpg")) converted_images = [] for fn in file_list: converted = resize(open(fn, "rb").read(), size, size) converted_images.append(converted) return converted_images except Exception as e: raise ConversionError("pdftoppm failed") from e finally: for fn in file_list: try: os.remove(fn) except OSError: pass
0.002301
def walk_nodes(self, node, original): """ Iterate over the nodes recursively yielding the templatetag 'sass_src' """ try: # try with django-compressor<2.1 nodelist = self.parser.get_nodelist(node, original=original) except TypeError: nodelist = self.parser.get_nodelist(node, original=original, context=None) for node in nodelist: if isinstance(node, SassSrcNode): if node.is_sass: yield node else: for node in self.walk_nodes(node, original=original): yield node
0.004666
def _kafka_success(self, item, spider, response): ''' Callback for successful send ''' item['success'] = True item = self._clean_item(item) item['spiderid'] = spider.name self.logger.info("Sent page to Kafka", item)
0.00738
def as_coeff_unit(self): """Factor the coefficient multiplying a unit For units that are multiplied by a constant dimensionless coefficient, returns a tuple containing the coefficient and a new unit object for the unmultiplied unit. Example ------- >>> import unyt as u >>> unit = (u.m**2/u.cm).simplify() >>> unit 100*m >>> unit.as_coeff_unit() (100.0, m) """ coeff, mul = self.expr.as_coeff_Mul() coeff = float(coeff) ret = Unit( mul, self.base_value / coeff, self.base_offset, self.dimensions, self.registry, ) return coeff, ret
0.00271
def subsystem(s): """Validate a |Subsystem|. Checks its state and cut. """ node_states(s.state) cut(s.cut, s.cut_indices) if config.VALIDATE_SUBSYSTEM_STATES: state_reachable(s) return True
0.004425
def getBitmap(self): """ Captures screen area of this region, at least the part that is on the screen Returns image as numpy array """ return PlatformManager.getBitmapFromRect(self.x, self.y, self.w, self.h)
0.016667
def get_missing_required_annotations(self) -> List[str]: """Return missing required annotations.""" return [ required_annotation for required_annotation in self.required_annotations if required_annotation not in self.annotations ]
0.006897
def new_result(self, job): """ function to register finished runs Every time a run has finished, this function should be called to register it with the result logger. If overwritten, make sure to call this method from the base class to ensure proper logging. Parameters: ----------- job_id: dict a dictionary containing all the info about the run job_result: dict contains all the results of the job, i.e. it's a dict with the keys 'loss' and 'info' """ super(KernelDensityEstimator, self).new_result(job) budget = job.kwargs["budget"] if budget not in self.configs.keys(): self.configs[budget] = [] self.losses[budget] = [] # We want to get a numerical representation of the configuration in the original space conf = ConfigSpace.Configuration(self.configspace, job.kwargs['config']) self.configs[budget].append(conf.get_array()) self.losses[budget].append(job.result['result']["loss"]) # Check if we have enough data points to fit a KDE if len(self.configs[budget]) % self.update_after_n_points == 0: train_configs, train_losses = [], [] train_configs.extend(self.configs[budget]) train_losses.extend(self.losses[budget]) n = int(self.top_n_percent * len(train_configs) / 100.) remaining_budgets = list(self.configs.keys()) remaining_budgets.remove(budget) remaining_budgets.sort(reverse=True) for b in remaining_budgets: if n >= self.min_points_in_model: break train_configs.extend(self.configs[b]) train_losses.extend(self.losses[b]) n = int(self.top_n_percent * len(train_configs) / 100.) if len(train_losses) < self.min_points_in_model: return n = max(self.min_points_in_model, n) # Refit KDE for the current budget idx = np.argsort(train_losses) train_data = (np.array(train_configs)[idx])[:n] self.kde_models[budget] = sm.nonparametric.KDEMultivariate(data=train_data, var_type=self.var_type, bw='cv_ls')
0.032918