text
stringlengths
78
104k
score
float64
0
0.18
def get_customjs(self, references, plot_id=None): """ Creates a CustomJS callback that will send the requested attributes back to python. """ # Generate callback JS code to get all the requested data if plot_id is None: plot_id = self.plot.id or 'PLACEHOLDER_PLOT_ID' self_callback = self.js_callback.format(comm_id=self.comm.id, timeout=self.timeout, debounce=self.debounce, plot_id=plot_id) attributes = self.attributes_js(self.attributes) conditions = ["%s" % cond for cond in self.skip] conditional = '' if conditions: conditional = 'if (%s) { return };\n' % (' || '.join(conditions)) data = "var data = {};\n" code = conditional + data + attributes + self.code + self_callback return CustomJS(args=references, code=code)
0.001988
def _find_cmd(cmd): """Find the full path to a .bat or .exe using the win32api module.""" try: from win32api import SearchPath except ImportError: raise ImportError('you need to have pywin32 installed for this to work') else: PATH = os.environ['PATH'] extensions = ['.exe', '.com', '.bat', '.py'] path = None for ext in extensions: try: path = SearchPath(PATH, cmd + ext)[0] except: pass if path is None: raise OSError("command %r not found" % cmd) else: return path
0.004792
def without(seq1, seq2): r"""Return a list with all elements in `seq2` removed from `seq1`, order preserved. Examples: >>> without([1,2,3,1,2], [1]) [2, 3, 2] """ if isSet(seq2): d2 = seq2 else: d2 = set(seq2) return [elt for elt in seq1 if elt not in d2]
0.010239
def ports_open(name, ports, proto='tcp', direction='in'): ''' Ensure ports are open for a protocol, in a direction. e.g. - proto='tcp', direction='in' would set the values for TCP_IN in the csf.conf file. ports A list of ports that should be open. proto The protocol. May be one of 'tcp', 'udp', 'tcp6', or 'udp6'. direction Choose 'in', 'out', or both to indicate the port should be opened for inbound traffic, outbound traffic, or both. ''' ports = list(six.moves.map(six.text_type, ports)) diff = False ret = {'name': ','.join(ports), 'changes': {}, 'result': True, 'comment': 'Ports open.'} current_ports = __salt__['csf.get_ports'](proto=proto, direction=direction) direction = direction.upper() directions = __salt__['csf.build_directions'](direction) for direction in directions: log.trace('current_ports[direction]: %s', current_ports[direction]) log.trace('ports: %s', ports) if current_ports[direction] != ports: diff = True if diff: result = __salt__['csf.allow_ports'](ports, proto=proto, direction=direction) ret['changes']['Ports'] = 'Changed' ret['comment'] = result return ret
0.001531
def auc(x, y, reorder=False): #from sklearn, http://scikit-learn.org, licensed under BSD License """Compute Area Under the Curve (AUC) using the trapezoidal rule This is a general fuction, given points on a curve. For computing the area under the ROC-curve, see :func:`auc_score`. Parameters ---------- x : array, shape = [n] x coordinates. y : array, shape = [n] y coordinates. reorder : boolean, optional (default=False) If True, assume that the curve is ascending in the case of ties, as for an ROC curve. If the curve is non-ascending, the result will be wrong. Returns ------- auc : float Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> pred = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2) >>> metrics.auc(fpr, tpr) 0.75 See also -------- auc_score : Computes the area under the ROC curve """ if np is None: raise ImportError("No numpy installed") # XXX: Consider using ``scipy.integrate`` instead, or moving to # ``utils.extmath`` if not isinstance(x, np.ndarray): x = np.array(x) if not isinstance(x, np.ndarray): y = np.array(y) if x.shape[0] < 2: raise ValueError('At least 2 points are needed to compute' ' area under curve, but x.shape = %s' % x.shape) if reorder: # reorder the data points according to the x axis and using y to # break ties x, y = np.array(sorted(points for points in zip(x, y))).T h = np.diff(x) else: h = np.diff(x) if np.any(h < 0): h *= -1 assert not np.any(h < 0), ("Reordering is not turned on, and " "The x array is not increasing: %s" % x) area = np.sum(h * (y[1:] + y[:-1])) / 2.0 return area
0.003035
def plot_partial_row_coordinates(self, X, ax=None, figsize=(6, 6), x_component=0, y_component=1, color_labels=None, **kwargs): """Plot the row principal coordinates.""" utils.validation.check_is_fitted(self, 's_') if ax is None: fig, ax = plt.subplots(figsize=figsize) # Add plotting style ax = plot.stylize_axis(ax) # Check input if self.check_input: utils.check_array(X, dtype=[str, np.number]) # Prepare input X = self._prepare_input(X) # Retrieve partial coordinates coords = self.partial_row_coordinates(X) # Determine the color of each group if there are group labels if color_labels is not None: colors = {g: ax._get_lines.get_next_color() for g in sorted(list(set(color_labels)))} # Get the list of all possible markers marks = itertools.cycle(list(markers.MarkerStyle.markers.keys())) next(marks) # The first marker looks pretty shit so we skip it # Plot points for name in self.groups: mark = next(marks) x = coords[name][x_component] y = coords[name][y_component] if color_labels is None: ax.scatter(x, y, marker=mark, label=name, **kwargs) continue for color_label, color in sorted(colors.items()): mask = np.array(color_labels) == color_label label = '{} - {}'.format(name, color_label) ax.scatter(x[mask], y[mask], marker=mark, color=color, label=label, **kwargs) # Legend ax.legend() # Text ax.set_title('Partial row principal coordinates') ei = self.explained_inertia_ ax.set_xlabel('Component {} ({:.2f}% inertia)'.format(x_component, 100 * ei[x_component])) ax.set_ylabel('Component {} ({:.2f}% inertia)'.format(y_component, 100 * ei[y_component])) return ax
0.003974
def _remove(self, shard_name): """remove member from configuration""" result = self.router_command("removeShard", shard_name, is_eval=False) if result['ok'] == 1 and result['state'] == 'completed': shard = self._shards.pop(shard_name) if shard.get('isServer', False): Servers().remove(shard['_id']) if shard.get('isReplicaSet', False): ReplicaSets().remove(shard['_id']) return result
0.004132
def insert(self, item, priority): """Adds item to DEPQ with given priority by performing a binary search on the concurrently rotating deque. Amount rotated R of DEPQ of length n would be n <= R <= 3n/2. Performance: O(n)""" with self.lock: self_data = self.data rotate = self_data.rotate self_items = self.items maxlen = self._maxlen try: if priority <= self_data[-1][1]: self_data.append((item, priority)) elif priority > self_data[0][1]: self_data.appendleft((item, priority)) else: length = len(self_data) + 1 mid = length // 2 shift = 0 while True: if priority <= self_data[0][1]: rotate(-mid) shift += mid mid //= 2 if mid == 0: mid += 1 else: rotate(mid) shift -= mid mid //= 2 if mid == 0: mid += 1 if self_data[-1][1] >= priority > self_data[0][1]: self_data.appendleft((item, priority)) # When returning to original position, never shift # more than half length of DEPQ i.e. if length is # 100 and we rotated -75, rotate -25, not 75 if shift > length // 2: shift = length % shift rotate(-shift) else: rotate(shift) break try: self_items[item] += 1 except TypeError: self_items[repr(item)] += 1 except IndexError: self_data.append((item, priority)) try: self_items[item] = 1 except TypeError: self_items[repr(item)] = 1 if maxlen is not None and maxlen < len(self_data): self._poplast()
0.000826
def round_sig_error(num, uncert, pm=False): """ Return a string of the number and its uncertainty to the right sig figs via uncertainty's print methods. The uncertainty determines the sig fig rounding of the number. https://pythonhosted.org/uncertainties/user_guide.html """ u = ufloat(num, uncert) if pm: return '{:.1uL}'.format(u) else: return '{:.1uLS}'.format(u)
0.004831
def calculate_max_cols_length(table, size): """ :param table: list of lists: [["row 1 column 1", "row 1 column 2"], ["row 2 column 1", "row 2 column 2"]] each item consists of instance of urwid.Text :returns dict, {index: width} """ max_cols_lengths = {} for row in table: col_index = 0 for idx, widget in enumerate(row.widgets): l = widget.pack((size[0], ))[0] max_cols_lengths[idx] = max(max_cols_lengths.get(idx, 0), l) col_index += 1 max_cols_lengths.setdefault(0, 1) # in case table is empty return max_cols_lengths
0.00321
def parse_comment_telemetry(text): """ Looks for base91 telemetry found in comment field Returns [remaining_text, telemetry] """ parsed = {} match = re.findall(r"^(.*?)\|([!-{]{4,14})\|(.*)$", text) if match and len(match[0][1]) % 2 == 0: text, telemetry, post = match[0] text += post temp = [0] * 7 for i in range(7): temp[i] = base91.to_decimal(telemetry[i*2:i*2+2]) parsed.update({ 'telemetry': { 'seq': temp[0], 'vals': temp[1:6] } }) if temp[6] != '': parsed['telemetry'].update({ 'bits': "{0:08b}".format(temp[6] & 0xFF)[::-1] }) return (text, parsed)
0.001299
def read_some(self): """Read at least one byte of cooked data unless EOF is hit. Return '' if EOF is hit. Block if no data is immediately available. """ self.process_rawq() while self.cookedq.tell() == 0 and not self.eof: self.fill_rawq() self.process_rawq() buf = self.cookedq.getvalue() self.cookedq.seek(0) self.cookedq.truncate() return buf
0.004435
async def request_proof(self, connection: Connection): """ Example: connection = await Connection.create(source_id) await connection.connect(phone_number) name = "proof name" requested_attrs = [{"name": "age", "restrictions": [{"schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766" } ] }, { "name":"name", "restrictions": [ { "schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766"}]}] proof = await Proof.create(source_id, name, requested_attrs) await proof.request_proof(connection) :param connection: Connection :return: """ if not hasattr(Proof.request_proof, "cb"): self.logger.debug("vcx_proof_send_request: Creating callback") Proof.request_proof.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32)) c_proof_handle = c_uint32(self.handle) c_connection_handle = c_uint32(connection.handle) await do_call('vcx_proof_send_request', c_proof_handle, c_connection_handle, Proof.request_proof.cb)
0.001977
def get_score(self, member, default=None, pipe=None): """ Return the score of *member*, or *default* if it is not in the collection. """ pipe = self.redis if pipe is None else pipe score = pipe.zscore(self.key, self._pickle(member)) if (score is None) and (default is not None): score = float(default) return score
0.005102
def strip_cdata(text): """Removes all CDATA blocks from `text` if it contains them. Note: If the function contains escaped XML characters outside of a CDATA block, they will be unescaped. Args: A string containing one or more CDATA blocks. Returns: An XML unescaped string with CDATA block qualifiers removed. """ if not is_cdata(text): return text xml = "<e>{0}</e>".format(text) node = etree.fromstring(xml) return node.text
0.001972
def _set_below(self, v, load=False): """ Setter method for below, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert/below (container) If this variable is read-only (config: false) in the source YANG file, then _set_below is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_below() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=below.below, is_container='container', presence=False, yang_name="below", rest_name="below", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Below trigger', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """below must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=below.below, is_container='container', presence=False, yang_name="below", rest_name="below", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Below trigger', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)""", }) self.__below = t if hasattr(self, '_set'): self._set()
0.006013
def encode_timestamp(timestamp: hints.Buffer) -> str: """ Encode the given buffer to a :class:`~str` using Base32 encoding. The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which are a timestamp in milliseconds. .. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID bytes specifically and is not meant for arbitrary encoding. :param timestamp: Bytes to encode :type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview` :return: Value encoded as a Base32 string :rtype: :class:`~str` :raises ValueError: when the timestamp is not 6 bytes """ length = len(timestamp) if length != 6: raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length)) encoding = ENCODING return \ encoding[(timestamp[0] & 224) >> 5] + \ encoding[timestamp[0] & 31] + \ encoding[(timestamp[1] & 248) >> 3] + \ encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \ encoding[((timestamp[2] & 62) >> 1)] + \ encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \ encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \ encoding[(timestamp[4] & 124) >> 2] + \ encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \ encoding[timestamp[5] & 31]
0.003501
def collect(items, convert=(list, tuple), convert_to=tuple): """ Converts a nested list/tuple/generator into a tuple. If no nested list/tuple/generator is found (or if multiple are found) then "items" is returned unchanged to the caller. Useful for generic functions. :param items: Target sequence :param convert: Tuple of types to convert into target type :param convert_to: Target type :return: The collected sequence """ if len(items) == 1: head_item = head(items) if True in (isinstance(head_item, t) for t in convert): items = convert_to(*items) elif isgenerator(head(items)): items = convert_to(*items) return convert_to(items)
0.004038
def _wanmen_get_title_by_json_topic_part(json_content, tIndex, pIndex): """JSON, int, int, int->str Get a proper title with courseid+topicID+partID.""" return '_'.join([json_content[0]['name'], json_content[0]['Topics'][tIndex]['name'], json_content[0]['Topics'][tIndex]['Parts'][pIndex]['name']])
0.008451
def attack(self, imgs, targets): """ Perform the EAD attack on the given instance for the given targets. If self.targeted is true, then the targets represents the target labels If self.targeted is false, then targets are the original class labels """ batch_size = self.batch_size r = [] for i in range(0, len(imgs) // batch_size): _logger.debug( ("Running EAD attack on instance %s of %s", i * batch_size, len(imgs))) r.extend( self.attack_batch( imgs[i * batch_size:(i + 1) * batch_size], targets[i * batch_size:(i + 1) * batch_size])) if len(imgs) % batch_size != 0: last_elements = len(imgs) - (len(imgs) % batch_size) _logger.debug( ("Running EAD attack on instance %s of %s", last_elements, len(imgs))) temp_imgs = np.zeros((batch_size, ) + imgs.shape[2:]) temp_targets = np.zeros((batch_size, ) + targets.shape[2:]) temp_imgs[:(len(imgs) % batch_size)] = imgs[last_elements:] temp_targets[:(len(imgs) % batch_size)] = targets[last_elements:] temp_data = self.attack_batch(temp_imgs, temp_targets) r.extend(temp_data[:(len(imgs) % batch_size)], targets[last_elements:]) return np.array(r)
0.008527
def catch_gzip_errors(f): """ A decorator to handle gzip encoding errors which have been known to happen during hydration. """ def new_f(self, *args, **kwargs): try: return f(self, *args, **kwargs) except requests.exceptions.ContentDecodingError as e: log.warning("caught gzip error: %s", e) self.connect() return f(self, *args, **kwargs) return new_f
0.002278
def mpsse_read_gpio(self): """Read both GPIO bus states and return a 16 bit value with their state. D0-D7 are the lower 8 bits and C0-C7 are the upper 8 bits. """ # Send command to read low byte and high byte. self._write('\x81\x83') # Wait for 2 byte response. data = self._poll_read(2) # Assemble response into 16 bit value. low_byte = ord(data[0]) high_byte = ord(data[1]) logger.debug('Read MPSSE GPIO low byte = {0:02X} and high byte = {1:02X}'.format(low_byte, high_byte)) return (high_byte << 8) | low_byte
0.006568
def _handle_message_for_stream(self, stream_transport, message, timeout): """Handle an incoming message, check if it's for the given stream. If the message is not for the stream, then add it to the appropriate message queue. Args: stream_transport: AdbStreamTransport currently waiting on a message. message: Message to check and handle. timeout: Timeout to use for the operation, should be an instance of timeouts.PolledTimeout. Returns: The message read if it was for this stream, None otherwise. Raises: AdbProtocolError: If we receive an unexpected message type. """ if message.command not in ('OKAY', 'CLSE', 'WRTE'): raise usb_exceptions.AdbProtocolError( '%s received unexpected message: %s', self, message) if message.arg1 == stream_transport.local_id: # Ack writes immediately. if message.command == 'WRTE': # Make sure we don't get a WRTE before an OKAY/CLSE message. if not stream_transport.remote_id: raise usb_exceptions.AdbProtocolError( '%s received WRTE before OKAY/CLSE: %s', stream_transport, message) self.transport.write_message(adb_message.AdbMessage( 'OKAY', stream_transport.local_id, stream_transport.remote_id), timeout) elif message.command == 'CLSE': self.close_stream_transport(stream_transport, timeout) return message else: # Message was not for this stream, add it to the right stream's queue. with self._stream_transport_map_lock: dest_transport = self._stream_transport_map.get(message.arg1) if dest_transport: if message.command == 'CLSE': self.close_stream_transport(dest_transport, timeout) dest_transport.enqueue_message(message, timeout) else: _LOG.warning('Received message for unknown local-id: %s', message)
0.006132
async def emit(self, event, data=None, room=None, skip_sid=None, namespace=None, callback=None, **kwargs): """Emit a custom event to one or more connected clients. :param event: The event name. It can be any string. The event names ``'connect'``, ``'message'`` and ``'disconnect'`` are reserved and should not be used. :param data: The data to send to the client or clients. Data can be of type ``str``, ``bytes``, ``list`` or ``dict``. If a ``list`` or ``dict``, the data will be serialized as JSON. :param room: The recipient of the message. This can be set to the session ID of a client to address that client's room, or to any custom room created by the application, If this argument is omitted the event is broadcasted to all connected clients. :param skip_sid: The session ID of a client to skip when broadcasting to a room or to all clients. This can be used to prevent a message from being sent to the sender. :param namespace: The Socket.IO namespace for the event. If this argument is omitted the event is emitted to the default namespace. :param callback: If given, this function will be called to acknowledge the the client has received the message. The arguments that will be passed to the function are those provided by the client. Callback functions can only be used when addressing an individual client. :param ignore_queue: Only used when a message queue is configured. If set to ``True``, the event is emitted to the clients directly, without going through the queue. This is more efficient, but only works when a single server process is used. It is recommended to always leave this parameter with its default value of ``False``. Note: this method is a coroutine. """ namespace = namespace or '/' self.logger.info('emitting event "%s" to %s [%s]', event, room or 'all', namespace) await self.manager.emit(event, data, namespace, room=room, skip_sid=skip_sid, callback=callback, **kwargs)
0.001125
def getShocks(self): ''' Draws a new Markov state and income shocks for the representative agent. Parameters ---------- None Returns ------- None ''' cutoffs = np.cumsum(self.MrkvArray[self.MrkvNow,:]) MrkvDraw = drawUniform(N=1,seed=self.RNG.randint(0,2**31-1)) self.MrkvNow = np.searchsorted(cutoffs,MrkvDraw) t = self.t_cycle[0] i = self.MrkvNow[0] IncomeDstnNow = self.IncomeDstn[t-1][i] # set current income distribution PermGroFacNow = self.PermGroFac[t-1][i] # and permanent growth factor Indices = np.arange(IncomeDstnNow[0].size) # just a list of integers # Get random draws of income shocks from the discrete distribution EventDraw = drawDiscrete(N=1,X=Indices,P=IncomeDstnNow[0],exact_match=False,seed=self.RNG.randint(0,2**31-1)) PermShkNow = IncomeDstnNow[1][EventDraw]*PermGroFacNow # permanent "shock" includes expected growth TranShkNow = IncomeDstnNow[2][EventDraw] self.PermShkNow = np.array(PermShkNow) self.TranShkNow = np.array(TranShkNow)
0.021386
def _unascii(s): """Unpack `\\uNNNN` escapes in 's' and encode the result as UTF-8 This method takes the output of the JSONEncoder and expands any \\uNNNN escapes it finds (except for \\u0000 to \\u001F, which are converted to \\xNN escapes). For performance, it assumes that the input is valid JSON, and performs few sanity checks. """ # make the fast path fast: if there are no matches in the string, the # whole thing is ascii. On python 2, that means we're done. On python 3, # we have to turn it into a bytes, which is quickest with encode('utf-8') m = _U_ESCAPE.search(s) if not m: return s if PY2 else s.encode('utf-8') # appending to a string (or a bytes) is slooow, so we accumulate sections # of string result in 'chunks', and join them all together later. # (It doesn't seem to make much difference whether we accumulate # utf8-encoded bytes, or strings which we utf-8 encode after rejoining) # chunks = [] # 'pos' tracks the index in 's' that we have processed into 'chunks' so # far. pos = 0 while m: start = m.start() end = m.end() g = m.group(1) if g is None: # escaped backslash: pass it through along with anything before the # match chunks.append(s[pos:end]) else: # \uNNNN, but we have to watch out for surrogate pairs. # # On python 2, str.encode("utf-8") will decode utf-16 surrogates # before re-encoding, so it's fine for us to pass the surrogates # through. (Indeed we must, to deal with UCS-2 python builds, per # https://github.com/matrix-org/python-canonicaljson/issues/12). # # On python 3, str.encode("utf-8") complains about surrogates, so # we have to unpack them. c = int(g, 16) if c < 0x20: # leave as a \uNNNN escape chunks.append(s[pos:end]) else: if PY3: # pragma nocover if c & 0xfc00 == 0xd800 and s[end:end + 2] == '\\u': esc2 = s[end + 2:end + 6] c2 = int(esc2, 16) if c2 & 0xfc00 == 0xdc00: c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00)) end += 6 chunks.append(s[pos:start]) chunks.append(unichr(c)) pos = end m = _U_ESCAPE.search(s, pos) # pass through anything after the last match chunks.append(s[pos:]) return (''.join(chunks)).encode("utf-8")
0.000367
def buffer_side(linestring, side, buffer): """ Given a Shapely LineString, a side of the LineString (string; 'left' = left hand side of LineString, 'right' = right hand side of LineString, or 'both' = both sides), and a buffer size in the distance units of the LineString, buffer the LineString on the given side by the buffer size and return the resulting Shapely polygon. """ b = linestring.buffer(buffer, cap_style=2) if side in ['left', 'right'] and buffer > 0: # Make a tiny buffer to split the normal-size buffer # in half across the linestring eps = min(buffer/2, 0.001) b0 = linestring.buffer(eps, cap_style=3) diff = b.difference(b0) polys = so.polygonize(diff) # Buffer sides slightly to include original linestring if side == 'left': b = list(polys)[0].buffer(1.1*eps) else: b = list(polys)[-1].buffer(1.1*eps) return b
0.00103
def aggregate(self, query: Optional[dict] = None, group: Optional[dict] = None, order_by: Optional[tuple] = None) -> List[IModel]: """Get aggregated results : param query: Rulez based query : param group: Grouping structure : param order_by: Tuple of ``(field, order)`` where ``order`` is ``'asc'`` or ``'desc'`` : todo: Grouping structure need to be documented """ raise NotImplementedError
0.007828
def entries(self): """A list of :class:`PasswordEntry` objects.""" passwords = [] for store in self.stores: passwords.extend(store.entries) return natsort(passwords, key=lambda e: e.name)
0.008658
def http_put(self, path, query_data={}, post_data={}, files=None, **kwargs): """Make a PUT request to the Gitlab server. Args: path (str): Path or full URL to query ('/projects' or 'http://whatever/v4/api/projecs') query_data (dict): Data to send as query parameters post_data (dict): Data to send in the body (will be converted to json) files (dict): The files to send to the server **kwargs: Extra options to send to the server (e.g. sudo) Returns: The parsed json returned by the server. Raises: GitlabHttpError: When the return code is not 2xx GitlabParsingError: If the json data could not be parsed """ result = self.http_request('put', path, query_data=query_data, post_data=post_data, files=files, **kwargs) try: return result.json() except Exception: raise GitlabParsingError( error_message="Failed to parse the server message")
0.002618
def _format_playlist_line(self, lineNum, pad, station): """ format playlist line so that if fills self.maxX """ line = "{0}. {1}".format(str(lineNum + self.startPos + 1).rjust(pad), station[0]) f_data = ' [{0}, {1}]'.format(station[2], station[1]) if version_info < (3, 0): if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX -2: """ this is too long, try to shorten it by removing file size """ f_data = ' [{0}]'.format(station[1]) if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 2: """ still too long. start removing chars """ while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 3: f_data = f_data[:-1] f_data += ']' """ if too short, pad f_data to the right """ if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2: while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2: line += ' ' else: if len(line) + len(f_data) > self.bodyMaxX -2: """ this is too long, try to shorten it by removing file size """ f_data = ' [{0}]'.format(station[1]) if len(line) + len(f_data) > self.bodyMaxX - 2: """ still too long. start removing chars """ while len(line) + len(f_data) > self.bodyMaxX - 3: f_data = f_data[:-1] f_data += ']' """ if too short, pad f_data to the right """ if len(line) + len(f_data) < self.maxX - 2: while len(line) + len(f_data) < self.maxX - 2: line += ' ' line += f_data return line
0.00501
def demo(context): """Setup a scout demo instance. This instance will be populated with a case, a gene panel and some variants. """ LOG.info("Running scout setup demo") institute_name = context.obj['institute_name'] user_name = context.obj['user_name'] user_mail = context.obj['user_mail'] adapter = context.obj['adapter'] LOG.info("Setting up database %s", context.obj['mongodb']) setup_scout( adapter=adapter, institute_id=institute_name, user_name=user_name, user_mail = user_mail, demo=True )
0.011804
def init_send(self): """ Generates the first (IKE_INIT) packet for Initiator :return: bytes() containing a valid IKE_INIT packet """ packet = Packet() self.packets.append(packet) packet.add_payload(payloads.SA()) packet.add_payload(payloads.KE(diffie_hellman=self.diffie_hellman)) packet.add_payload(payloads.Nonce(nonce=self.Ni)) packet.iSPI = self.iSPI = packet.payloads[0].spi self.state = State.INIT return bytes(packet)
0.003839
def from_config(cls, cp, model, nprocesses=1, use_mpi=False): """ Loads the sampler from the given config file. For generating the temperature ladder to be used by emcee_pt, either the number of temperatures (provided by the option 'ntemps'), or the path to a file storing inverse temperature values (provided under a subsection inverse-temperatures-file) can be loaded from the config file. If the latter, the file should be of hdf format, having an attribute named 'betas' storing the list of inverse temperature values to be provided to emcee_pt. If the former, emcee_pt will construct the ladder with "ntemps" geometrically spaced temperatures. """ section = "sampler" # check name assert cp.get(section, "name") == cls.name, ( "name in section [sampler] must match mine") # get the number of walkers to use nwalkers = int(cp.get(section, "nwalkers")) if cp.has_option(section, "ntemps") and \ cp.has_option(section, "inverse-temperatures-file"): raise ValueError("Must specify either ntemps or " "inverse-temperatures-file, not both.") if cp.has_option(section, "inverse-temperatures-file"): # get the path of the file containing inverse temperatures values. inverse_temperatures_file = cp.get(section, "inverse-temperatures-file") with h5py.File(inverse_temperatures_file, "r") as fp: try: betas = numpy.array(fp.attrs['betas']) ntemps = betas.shape[0] except KeyError: raise AttributeError("No attribute called betas") else: # get the number of temperatures betas = None ntemps = int(cp.get(section, "ntemps")) # get the checkpoint interval, if it's specified checkpoint_interval = cls.checkpoint_from_config(cp, section) checkpoint_signal = cls.ckpt_signal_from_config(cp, section) # get the loglikelihood function logl = get_optional_arg_from_config(cp, section, 'logl-function') obj = cls(model, ntemps, nwalkers, betas=betas, checkpoint_interval=checkpoint_interval, checkpoint_signal=checkpoint_signal, loglikelihood_function=logl, nprocesses=nprocesses, use_mpi=use_mpi) # set target obj.set_target_from_config(cp, section) # add burn-in if it's specified obj.set_burn_in_from_config(cp) # set prethin options obj.set_thin_interval_from_config(cp, section) return obj
0.000717
def get_dtype_kinds(l): """ Parameters ---------- l : list of arrays Returns ------- a set of kinds that exist in this list of arrays """ typs = set() for arr in l: dtype = arr.dtype if is_categorical_dtype(dtype): typ = 'category' elif is_sparse(arr): typ = 'sparse' elif isinstance(arr, ABCRangeIndex): typ = 'range' elif is_datetime64tz_dtype(arr): # if to_concat contains different tz, # the result must be object dtype typ = str(arr.dtype) elif is_datetime64_dtype(dtype): typ = 'datetime' elif is_timedelta64_dtype(dtype): typ = 'timedelta' elif is_object_dtype(dtype): typ = 'object' elif is_bool_dtype(dtype): typ = 'bool' elif is_extension_array_dtype(dtype): typ = str(arr.dtype) else: typ = dtype.kind typs.add(typ) return typs
0.001951
def select_graphic_rendition(self, *attrs): """Set display attributes. :param list attrs: a list of display attributes to set. """ replace = {} # Fast path for resetting all attributes. if not attrs or attrs == (0, ): self.cursor.attrs = self.default_char return else: attrs = list(reversed(attrs)) while attrs: attr = attrs.pop() if attr == 0: # Reset all attributes. replace.update(self.default_char._asdict()) elif attr in g.FG_ANSI: replace["fg"] = g.FG_ANSI[attr] elif attr in g.BG: replace["bg"] = g.BG_ANSI[attr] elif attr in g.TEXT: attr = g.TEXT[attr] replace[attr[1:]] = attr.startswith("+") elif attr in g.FG_AIXTERM: replace.update(fg=g.FG_AIXTERM[attr], bold=True) elif attr in g.BG_AIXTERM: replace.update(bg=g.BG_AIXTERM[attr], bold=True) elif attr in (g.FG_256, g.BG_256): key = "fg" if attr == g.FG_256 else "bg" try: n = attrs.pop() if n == 5: # 256. m = attrs.pop() replace[key] = g.FG_BG_256[m] elif n == 2: # 24bit. # This is somewhat non-standard but is nonetheless # supported in quite a few terminals. See discussion # here https://gist.github.com/XVilka/8346728. replace[key] = "{0:02x}{1:02x}{2:02x}".format( attrs.pop(), attrs.pop(), attrs.pop()) except IndexError: pass self.cursor.attrs = self.cursor.attrs._replace(**replace)
0.001055
def parse(self, scope): """Parse node args: scope (Scope): current scope raises: SyntaxError returns: self """ if not self.parsed: if len(self.tokens) > 2: property, style, _ = self.tokens self.important = True else: property, style = self.tokens self.important = False self.property = ''.join(property) self.parsed = [] if style: style = self.preprocess(style) self.parsed = self.process(style, scope) return self
0.003021
def getLinkedRequests(self): """Lookup linked Analysis Requests :returns: sorted list of ARs, where the latest AR comes first """ rc = api.get_tool("reference_catalog") refs = rc.getBackReferences(self, "AnalysisRequestAttachment") # fetch the objects by UID and handle nonexisting UIDs gracefully ars = map(lambda ref: api.get_object_by_uid(ref.sourceUID, None), refs) # filter out None values (nonexisting UIDs) ars = filter(None, ars) # sort by physical path, so that attachments coming from an AR with a # higher "-Rn" suffix get sorted correctly. # N.B. the created date is the same, hence we can not use it return sorted(ars, key=api.get_path, reverse=True)
0.002604
def put(self, item): ''' store item in sqlite database ''' if isinstance(item, self._item_class): self._put_one(item) elif isinstance(item, (list, tuple)): self._put_many(item) else: raise RuntimeError('Unknown item(s) type, %s' % type(item))
0.006289
def appendInputWithNSimilarValues(inputs, numNear = 10): """ Creates a neighboring record for each record in the inputs and adds new records at the end of the inputs list """ numInputs = len(inputs) skipOne = False for i in xrange(numInputs): input = inputs[i] numChanged = 0 newInput = copy.deepcopy(input) for j in xrange(len(input)-1): if skipOne: skipOne = False continue if input[j] == 1 and input[j+1] == 0: newInput[j] = 0 newInput[j+1] = 1 inputs.append(newInput) newInput = copy.deepcopy(newInput) #print input #print newInput numChanged += 1 skipOne = True if numChanged == numNear: break
0.016304
def _readClusterSettings(self): """ Read the current instance's meta-data to get the cluster settings. """ # get the leader metadata mdUrl = "http://169.254.169.254/metadata/instance?api-version=2017-08-01" header = {'Metadata': 'True'} request = urllib.request.Request(url=mdUrl, headers=header) response = urllib.request.urlopen(request) data = response.read() dataStr = data.decode("utf-8") metadata = json.loads(dataStr) # set values from the leader meta-data self._zone = metadata['compute']['location'] self.clusterName = metadata['compute']['resourceGroupName'] tagsStr = metadata['compute']['tags'] tags = dict(item.split(":") for item in tagsStr.split(";")) self._owner = tags.get('owner', 'no-owner') leader = self.getLeader() self._leaderPrivateIP = leader.privateIP self._setSSH() # create id_rsa.pub file on the leader if it is not there self._masterPublicKeyFile = self.LEADER_HOME_DIR + '.ssh/id_rsa.pub' # Add static nodes to /etc/hosts since Azure sometimes fails to find them with DNS map(lambda x: self._addToHosts(x), self.getProvisionedWorkers(None))
0.004766
def create(self, vlans): """ Method to create vlan's :param vlans: List containing vlan's desired to be created on database :return: None """ data = {'vlans': vlans} return super(ApiVlan, self).post('api/v3/vlan/', data)
0.007194
def from_callback(cls, cb, ny=None, nparams=None, dep_scaling=1, indep_scaling=1, **kwargs): """ Create an instance from a callback. Analogous to :func:`SymbolicSys.from_callback`. Parameters ---------- cb : callable Signature rhs(x, y[:], p[:]) -> f[:] ny : int length of y nparams : int length of p dep_scaling : number (>0) or iterable of numbers scaling of the dependent variables (default: 1) indep_scaling: number (>0) scaling of the independent variable (default: 1) \*\*kwargs : Keyword arguments passed onto :class:`ScaledSys`. Examples -------- >>> def f(x, y, p): ... return [p[0]*y[0]**2] >>> odesys = ScaledSys.from_callback(f, 1, 1, dep_scaling=10) >>> odesys.exprs (p_0*y_0**2/10,) """ return TransformedSys.from_callback( cb, ny, nparams, dep_transf_cbs=repeat(cls._scale_fw_bw(dep_scaling)), indep_transf_cbs=cls._scale_fw_bw(indep_scaling), **kwargs )
0.005055
def release(self): """ Releases this resource back to the pool it came from. """ if self.errored: self.pool.delete_resource(self) else: self.pool.release(self)
0.008969
def root(): """Placeholder root url for the PCI. Ideally this should never be called! """ response = { "links": { "message": "Welcome to the SIP Processing Controller Interface", "items": [ {"href": "{}health".format(request.url)}, {"href": "{}subarrays".format(request.url)}, {"href": "{}scheduling_blocks".format(request.url)}, {"href": "{}processing_blocks".format(request.url)} ] } } return response, HTTPStatus.OK
0.001789
def IsPrimitiveType(obj): """See if the passed in type is a Primitive Type""" return (isinstance(obj, types.bool) or isinstance(obj, types.byte) or isinstance(obj, types.short) or isinstance(obj, six.integer_types) or isinstance(obj, types.double) or isinstance(obj, types.float) or isinstance(obj, six.string_types) or isinstance(obj, types.PropertyPath) or isinstance(obj, types.ManagedMethod) or isinstance(obj, types.datetime) or isinstance(obj, types.URI) or isinstance(obj, type))
0.018727
def _create_instance(self, cls, args, ref=None): """ Returns an instance of `cls` with `args` passed as arguments. Recursively inspects `args` to create nested objects and functions as necessary. `cls` will only be considered only if it's an object we track (i.e.: troposphere objects). If `cls` has a `props` attribute, nested properties will be instanciated as troposphere Property objects as necessary. If `cls` is a list and contains a single troposphere type, the returned value will be a list of instances of that type. """ if isinstance(cls, Sequence): if len(cls) == 1: # a list of 1 type means we must provide a list of such objects if (isinstance(args, basestring) or not isinstance(args, Sequence)): args = [args] return [self._create_instance(cls[0], v) for v in args] if isinstance(cls, Sequence)\ or cls not in self.inspect_members.union(self._custom_members): # this object doesn't map to any known object. could be a string # or int, or a Ref... or a list of types such as # [basestring, FindInMap, Ref] or maybe a # validator such as `integer` or `port_range` return self._convert_definition(args) elif issubclass(cls, AWSHelperFn): # special handling for functions, we want to handle it before # entering the other conditions. try: if issubclass(cls, Tags): arg_dict = {} for d in args: arg_dict[d['Key']] = d['Value'] return cls(arg_dict) if (isinstance(args, Sequence) and not isinstance(args, basestring)): return cls(*self._convert_definition(args)) if issubclass(cls, autoscaling.Metadata): return self._generate_autoscaling_metadata(cls, args) if issubclass(cls, Export): return cls(args['Name']) args = self._convert_definition(args) if isinstance(args, Ref) and issubclass(cls, Ref): # watch out for double-refs... # this can happen if an object's .props has 'Ref' # as the expected type (which is wrong and should be # changed to basestring!) return args return cls(args) except TypeError as ex: if '__init__() takes exactly' not in ex.message: raise # special AWSHelperFn typically take lowercased parameters, # but templates use uppercase. for this reason we cannot # map to most of them, so we fallback with a generic one. # this might not work for all types if they do extra # processing in their init routine return GenericHelperFn(args) elif isinstance(args, Mapping): # we try to build as many troposphere objects as we can by # inspecting its type validation metadata kwargs = {} kwargs.update(args) for prop_name in getattr(cls, 'props', []): if prop_name not in kwargs: continue # the user did not specify this value; skip it expected_type = cls.props[prop_name][0] if (isinstance(expected_type, Sequence) or expected_type in self.inspect_members): kwargs[prop_name] = self._create_instance( expected_type, kwargs[prop_name], prop_name) else: kwargs[prop_name] = self._convert_definition( kwargs[prop_name], prop_name) args = self._convert_definition(kwargs) if isinstance(args, Ref): # use the returned ref instead of creating a new object return args if isinstance(args, AWSHelperFn): return self._convert_definition(kwargs) assert isinstance(args, Mapping) return cls(title=ref, **args) return cls(self._convert_definition(args))
0.000453
def exclude_paths(root, patterns, dockerfile=None): """ Given a root directory path and a list of .dockerignore patterns, return an iterator of all paths (both regular files and directories) in the root directory that do *not* match any of the patterns. All paths returned are relative to the root. """ if dockerfile is None: dockerfile = 'Dockerfile' patterns.append('!' + dockerfile) pm = PatternMatcher(patterns) return set(pm.walk(root))
0.002033
def estimate_chi2mixture(self, lrt): """ estimates the parameters of a mixture of a chi-squared random variable of degree 0 and a scaled chi-squared random variable of degree d (1-mixture)*chi2(0) + (mixture)*scale*chi2(dof), where scale is the scaling parameter for the scales chi-square distribution dof are the degrees of freedom of the second component mixture is the probability of beeing in the first component input: lrt [Ntests] vector of test statistics """ """ step 1: estimate the probability of being in component one """ self.mixture = 1-(lrt<=self.tol).mean() n_false = SP.sum(lrt>self.tol) """ step 2: only use the largest qmax fraction of test statistics to estimate the remaining parameters """ n_fitting = SP.ceil(self.qmax * n_false) lrt_sorted = -SP.sort(-lrt)[:n_fitting] q = SP.linspace(0, 1,n_false)[1:n_fitting+1] log_q = SP.log10(q) """ step 3: fitting scale and dof by minimizing the squared error of the log10 p-values with their theorietical values [uniform distribution] """ MSE_opt = SP.inf MSE = SP.zeros((self.n_intervals,self.n_intervals)) for i,scale in enumerate(SP.linspace(self.scale_min,self.scale_max,self.n_intervals)): for j,dof in enumerate(SP.linspace(self.dof_min,self.dof_max,self.n_intervals)): p = STATS.chi2.sf(lrt_sorted/scale,dof) log_p = SP.log10(p) MSE[i,j] = SP.mean((log_q - log_p)**2) if MSE[i,j] < MSE_opt: MSE_opt = MSE[i,j] self.scale = scale self.dof = dof
0.018421
def get(self, key, local_default = None, required = False): """Get a parameter value. If parameter is not set, return `local_default` if it is not `None` or the PyXMPP global default otherwise. :Raise `KeyError`: if parameter has no value and no global default :Return: parameter value """ # pylint: disable-msg=W0221 if key in self._settings: return self._settings[key] if local_default is not None: return local_default if key in self._defs: setting_def = self._defs[key] if setting_def.default is not None: return setting_def.default factory = setting_def.factory if factory is None: return None value = factory(self) if setting_def.cache is True: setting_def.default = value return value if required: raise KeyError(key) return local_default
0.005929
def system_bus(**kwargs) : "returns a Connection object for the D-Bus system bus." return \ Connection(dbus.Connection.bus_get(DBUS.BUS_SYSTEM, private = False)) \ .register_additional_standard(**kwargs)
0.017621
def update_experiments(self): """Experiment mapping.""" # 693 Remove if 'not applicable' for field in record_get_field_instances(self.record, '693'): subs = field_get_subfields(field) all_subs = subs.get('a', []) + subs.get('e', []) if 'not applicable' in [x.lower() for x in all_subs]: record_delete_field(self.record, '693', field_position_global=field[4]) new_subs = [] experiment_a = "" experiment_e = "" for (key, value) in subs.iteritems(): if key == 'a': experiment_a = value[0] new_subs.append((key, value[0])) elif key == 'e': experiment_e = value[0] experiment = "%s---%s" % (experiment_a.replace(" ", "-"), experiment_e) translated_experiments = self.get_config_item(experiment, "experiments") new_subs.append(("e", translated_experiments)) record_delete_field(self.record, tag="693", field_position_global=field[4]) record_add_field(self.record, "693", subfields=new_subs)
0.001511
def default_icon_path(self): """Returns default path to icon of this assistant. Assuming self.path == "/foo/assistants/crt/python/django.yaml" For image format in [png, svg]: 1) Take the path of this assistant and strip it of load path (=> "crt/python/django.yaml") 2) Substitute its extension for <image format> (=> "crt/python/django.<image format>") 3) Prepend self.load_path + 'icons' (=> "/foo/icons/crt/python/django.<image format>") 4) If file from 3) exists, return it Return empty string if no icon found. """ supported_exts = ['.png', '.svg'] stripped = self.path.replace(os.path.join(self.load_path, 'assistants'), '').strip(os.sep) for ext in supported_exts: icon_with_ext = os.path.splitext(stripped)[0] + ext icon_fullpath = os.path.join(self.load_path, 'icons', icon_with_ext) if os.path.exists(icon_fullpath): return icon_fullpath return ''
0.003724
def get_updated(self, from_time, to_time=None): """ Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an specified to time. :param from_time: An epoch representation of the date from which to restrict the query to. :param to_time: An optional epcoh representation of the date to which to restrict the query to. :return: a python dictionary with either the result of the search or an error from TheTVDB. """ arguments = locals() optional_parameters = {'to_time': 'toTime'} query_string = 'fromTime=%s&%s' % (from_time, utils.query_param_string_from_option_args(optional_parameters, arguments)) raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string, headers=self.__get_header_with_auth()) return self.parse_raw_response(raw_response)
0.008712
def get(self): """ Get a JSON-ready representation of this Section. :returns: This Section, ready for use in a request body. :rtype: dict """ section = {} if self.key is not None and self.value is not None: section[self.key] = self.value return section
0.006079
def deploy_from_template(self, si, logger, data_holder, vcenter_data_model, reservation_id, cancellation_context): """ :param cancellation_context: :param reservation_id: :param si: :param logger: :type data_holder: DeployFromTemplateDetails :type vcenter_data_model :rtype DeployAppResult: :return: """ template_resource_model = data_holder.template_resource_model return self._deploy_a_clone(si, logger, data_holder.app_name, template_resource_model.vcenter_template, template_resource_model, vcenter_data_model, reservation_id, cancellation_context)
0.003333
def multiple_subplots(rows=1, cols=1, maxplots=None, n=1, delete=True, for_maps=False, *args, **kwargs): """ Function to create subplots. This function creates so many subplots on so many figures until the specified number `n` is reached. Parameters ---------- rows: int The number of subplots per rows cols: int The number of subplots per column maxplots: int The number of subplots per figure (if None, it will be row*cols) n: int number of subplots to create delete: bool If True, the additional subplots per figure are deleted for_maps: bool If True this is a simple shortcut for setting ``subplot_kw=dict(projection=cartopy.crs.PlateCarree())`` and is useful if you want to use the :attr:`~ProjectPlotter.mapplot`, :attr:`~ProjectPlotter.mapvector` or :attr:`~ProjectPlotter.mapcombined` plotting methods ``*args`` and ``**kwargs`` anything that is passed to the :func:`matplotlib.pyplot.subplots` function Returns ------- list list of maplotlib.axes.SubplotBase instances""" import matplotlib.pyplot as plt axes = np.array([]) maxplots = maxplots or rows * cols kwargs.setdefault('figsize', [ min(8.*cols, 16), min(6.5*rows, 12)]) if for_maps: import cartopy.crs as ccrs subplot_kw = kwargs.setdefault('subplot_kw', {}) subplot_kw['projection'] = ccrs.PlateCarree() for i in range(0, n, maxplots): fig, ax = plt.subplots(rows, cols, *args, **kwargs) try: axes = np.append(axes, ax.ravel()[:maxplots]) if delete: for iax in range(maxplots, rows * cols): fig.delaxes(ax.ravel()[iax]) except AttributeError: # got a single subplot axes = np.append(axes, [ax]) if i + maxplots > n and delete: for ax2 in axes[n:]: fig.delaxes(ax2) axes = axes[:n] return axes
0.000485
def filter_macro(func, *args, **kwargs): """ Promotes a function that returns a filter into its own filter type. Example:: @filter_macro def String(): return Unicode | Strip | NotEmpty # You can now use `String` anywhere you would use a regular Filter: (String | Split(':')).apply('...') You can also use ``filter_macro`` to create partials, allowing you to preset one or more initialization arguments:: Minor = filter_macro(Max, max_value=18, inclusive=False) Minor(inclusive=True).apply(18) """ filter_partial = partial(func, *args, **kwargs) class FilterMacroMeta(FilterMeta): @staticmethod def __new__(mcs, name, bases, attrs): # This is as close as we can get to running # ``update_wrapper`` on a type. for attr in WRAPPER_ASSIGNMENTS: if hasattr(func, attr): attrs[attr] = getattr(func, attr) # Note that we ignore the ``name`` argument, passing in # ``func.__name__`` instead. return super(FilterMacroMeta, mcs)\ .__new__(mcs, func.__name__, bases, attrs) def __call__(cls, *runtime_args, **runtime_kwargs): return filter_partial(*runtime_args, **runtime_kwargs) class FilterMacro(with_metaclass(FilterMacroMeta, FilterMacroType)): # This method will probably never get called due to overloaded # ``__call__`` in the metaclass, but just in case, we'll include # it because it is an abstract method in `BaseFilter`. def _apply(self, value): # noinspection PyProtectedMember return self.__class__()._apply(value) return FilterMacro
0.000568
def callback_prototype(prototype): """Decorator to process a callback prototype. A callback prototype is a function whose signature includes all the values that will be passed by the callback API in question. The original function will be returned, with a ``prototype.adapt`` attribute which can be used to prepare third party callbacks. """ protosig = signature(prototype) positional, keyword = [], [] for name, param in protosig.parameters.items(): if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD): raise TypeError("*args/**kwargs not supported in prototypes") if (param.default is not Parameter.empty) \ or (param.kind == Parameter.KEYWORD_ONLY): keyword.append(name) else: positional.append(name) kwargs = dict.fromkeys(keyword) def adapt(callback): """Introspect and prepare a third party callback.""" sig = signature(callback) try: # XXX: callback can have extra optional parameters - OK? sig.bind(*positional, **kwargs) return callback except TypeError: pass # Match up arguments unmatched_pos = positional[:] unmatched_kw = kwargs.copy() unrecognised = [] # TODO: unrecognised parameters with default values - OK? for name, param in sig.parameters.items(): # print(name, param.kind) #DBG if param.kind == Parameter.POSITIONAL_ONLY: if len(unmatched_pos) > 0: unmatched_pos.pop(0) else: unrecognised.append(name) elif param.kind == Parameter.POSITIONAL_OR_KEYWORD: if (param.default is not Parameter.empty) and (name in unmatched_kw): unmatched_kw.pop(name) elif len(unmatched_pos) > 0: unmatched_pos.pop(0) else: unrecognised.append(name) elif param.kind == Parameter.VAR_POSITIONAL: unmatched_pos = [] elif param.kind == Parameter.KEYWORD_ONLY: if name in unmatched_kw: unmatched_kw.pop(name) else: unrecognised.append(name) else: # VAR_KEYWORD unmatched_kw = {} # print(unmatched_pos, unmatched_kw, unrecognised) #DBG if unrecognised: raise TypeError("Function {!r} had unmatched arguments: {}".format(callback, unrecognised)) n_positional = len(positional) - len(unmatched_pos) @wraps(callback) def adapted(*args, **kwargs): """Wrapper for third party callbacks that discards excess arguments""" # print(args, kwargs) args = args[:n_positional] for name in unmatched_kw: # XXX: Could name not be in kwargs? kwargs.pop(name) # print(args, kwargs, unmatched_pos, cut_positional, unmatched_kw) return callback(*args, **kwargs) return adapted prototype.adapt = adapt return prototype
0.004621
def reduce_configs(self): """Reduce the experiments to restart.""" experiment_ids = self.get_reduced_configs() experiments = self.experiment_group.experiments.filter(id__in=experiment_ids) self.create_iteration() iteration_config = self.experiment_group.iteration_config hptuning_config = self.experiment_group.hptuning_config n_resources = self.experiment_group.search_manager.get_resources_for_iteration( iteration=iteration_config.iteration) resource_value = self.experiment_group.search_manager.get_n_resources( n_resources=n_resources, bracket_iteration=iteration_config.bracket_iteration ) resource_name = hptuning_config.hyperband.resource.name resource_value = hptuning_config.hyperband.resource.cast_value(resource_value) # Check if we need to resume or restart the experiments for experiment in experiments: declarations = experiment.declarations declarations[resource_name] = resource_value declarations_spec = {'declarations': declarations} specification = experiment.specification.patch(declarations_spec) status_message = 'Hyperband iteration: {}, bracket iteration: {}'.format( iteration_config.iteration, iteration_config.bracket_iteration) if hptuning_config.hyperband.resume: experiment.resume( declarations=declarations, config=specification.parsed_data, message=status_message) else: experiment.restart( experiment_group=self.experiment_group, declarations=declarations, config=specification.parsed_data)
0.003842
def set_pairs(self): """ %prog pairs <blastfile|samfile|bedfile> Report how many paired ends mapped, avg distance between paired ends, etc. Paired reads must have the same prefix, use --rclip to remove trailing part, e.g. /1, /2, or .f, .r, default behavior is to truncate until last char. """ self.set_usage(self.set_pairs.__doc__) self.add_option("--pairsfile", default=None, help="Write valid pairs to pairsfile [default: %default]") self.add_option("--nrows", default=200000, type="int", help="Only use the first n lines [default: %default]") self.set_mates() self.add_option("--pdf", default=False, action="store_true", help="Print PDF instead ASCII histogram [default: %default]") self.add_option("--bins", default=20, type="int", help="Number of bins in the histogram [default: %default]") self.add_option("--distmode", default="ss", choices=("ss", "ee"), help="Distance mode between paired reads, ss is outer distance, " \ "ee is inner distance [default: %default]")
0.010093
def load_data(self, pdbid): """Loads and parses an XML resource and saves it as a tree if successful""" f = urlopen("http://projects.biotec.tu-dresden.de/plip-rest/pdb/%s?format=xml" % pdbid.lower()) self.doc = etree.parse(f)
0.016064
def colRowIsOnSciencePixelList(self, col, row, padding=DEFAULT_PADDING): """similar to colRowIsOnSciencePixelList() but takes lists as input""" out = np.ones(len(col), dtype=bool) col_arr = np.array(col) row_arr = np.array(row) mask = np.bitwise_or(col_arr < 12. - padding, col_arr > 1111 + padding) out[mask] = False mask = np.bitwise_or(row_arr < 20. - padding, row_arr > 1043 + padding) out[mask] = False return out
0.004082
def QueryUsers(self, database_link, query, options=None): """Queries users in a database. :param str database_link: The link to the database. :param (str or dict) query: :param dict options: The request options for the request. :return: Query Iterable of Users. :rtype: query_iterable.QueryIterable """ if options is None: options = {} path = base.GetPathFromLink(database_link, 'users') database_id = base.GetResourceIdOrFullNameFromLink(database_link) def fetch_fn(options): return self.__QueryFeed(path, 'users', database_id, lambda r: r['Users'], lambda _, b: b, query, options), self.last_response_headers return query_iterable.QueryIterable(self, query, options, fetch_fn)
0.003742
def radiated_intensity(rho, i, j, epsilonp, rm, omega_level, xi, N, D, unfolding): r"""Return the radiated intensity in a given direction. >>> from fast import State, Integer, split_hyperfine_to_magnetic >>> g = State("Rb", 87, 5, 1, 3/Integer(2), 0) >>> e = State("Rb", 87, 4, 2, 5/Integer(2), 1) >>> magnetic_states = split_hyperfine_to_magnetic([g, e]) >>> omega0 = magnetic_states[0].omega >>> omega_level = [ei.omega - omega0 for ei in magnetic_states] >>> Ne = len(magnetic_states) >>> N = 4e6 >>> D = 0.1 >>> unfolding = Unfolding(Ne, True, True, True) >>> rho = np.zeros((Ne, Ne)) >>> rho[0, 0] = 0.8 >>> rho[3, 3] = 0.2 >>> rho[3, 0] = 0.3 >>> rho[0, 3] = 0.3 >>> rho = unfolding(rho) >>> ep = np.array([1, 1j, 0])/np.sqrt(2.0) >>> ex = np.array([1, 0, 0]) >>> r0 = 4.75278521538619e-11 >>> rm = np.zeros((3, Ne, Ne), complex) >>> rm[0, 1, 0] = -r0 >>> rm[0, 3, 0] = r0 >>> rm[1, 1, 0] = -1j*r0 >>> rm[1, 3, 0] = -1j*r0 >>> rm[1, 2, 0] = -np.sqrt(2)*r0 >>> xi = np.zeros((1, Ne, Ne)) >>> xi[0, 1, 0] = 1 >>> xi[0, 2, 0] = 1 >>> xi[0, 3, 0] = 1 >>> xi[0, :, :] += xi[0, :, :].transpose() >>> print(radiated_intensity(rho, 1, 0, ex, rm, ... omega_level, xi, N, D, unfolding)) 4.60125990174e-22 """ def inij(i, j, ilist, jlist): if (i in ilist) and (j in jlist): return 1 else: return 0 rm = np.array(rm) Nl = xi.shape[0] Ne = xi.shape[1] aux = define_simplification(omega_level, xi, Nl) u = aux[0] omega_levelu = aux[2] ui = u(i) uj = u(j) omegaij = omega_levelu[ui] - omega_levelu[uj] ilist = [ii for ii in range(Ne) if u(ii) == ui] jlist = [jj for jj in range(Ne) if u(jj) == uj] rp = np.array([rm[ii].conjugate().transpose() for ii in range(3)]) rm = np.array([[[rm[p, ii, jj]*inij(ii, jj, ilist, jlist) for jj in range(Ne)] for ii in range(Ne)] for p in range(3)]) rp = np.array([[[rp[p, ii, jj]*inij(jj, ii, ilist, jlist) for jj in range(Ne)] for ii in range(Ne)] for p in range(3)]) epsilonm = epsilonp.conjugate() Adag = cartesian_dot_product(rm, epsilonp) A = cartesian_dot_product(rp, epsilonm) fact = alpha_num*N*hbar_num*omegaij**3/2/np.pi/c_num**2/D**2 Iop = fact * np.dot(Adag, A) intensity = observable(Iop, rho, unfolding) intensity = float(np.real(intensity)) return intensity
0.000378
def worker(wrapped, dkwargs, hash_value=None, *args, **kwargs): """ This is an asynchronous sender callable that uses the Django ORM to store webhooks. Redis is used to handle the message queue. dkwargs argument requires the following key/values: :event: A string representing an event. kwargs argument requires the following key/values :owner: The user who created/owns the event """ if "event" not in dkwargs: msg = "djwebhooks.decorators.redis_hook requires an 'event' argument in the decorator." raise TypeError(msg) event = dkwargs['event'] if "owner" not in kwargs: msg = "djwebhooks.senders.redis_callable requires an 'owner' argument in the decorated function." raise TypeError(msg) owner = kwargs['owner'] if "identifier" not in kwargs: msg = "djwebhooks.senders.orm_callable requires an 'identifier' argument in the decorated function." raise TypeError(msg) identifier = kwargs['identifier'] senderobj = DjangoRQSenderable( wrapped, dkwargs, hash_value, WEBHOOK_ATTEMPTS, *args, **kwargs ) # Add the webhook object just so it's around # TODO - error handling if this can't be found senderobj.webhook_target = WebhookTarget.objects.get( event=event, owner=owner, identifier=identifier ) # Get the target url and add it senderobj.url = senderobj.webhook_target.target_url # Get the payload. This overides the senderobj.payload property. senderobj.payload = senderobj.get_payload() # Get the creator and add it to the payload. senderobj.payload['owner'] = getattr(kwargs['owner'], WEBHOOK_OWNER_FIELD) # get the event and add it to the payload senderobj.payload['event'] = dkwargs['event'] return senderobj.send()
0.002674
def toStr(self) : """returns a string version of the CSV""" s = [self.strLegend] for l in self.lines : s.append(l.toStr()) return self.lineSeparator.join(s)
0.05988
def get_formset(self): """Provide the formset corresponding to this DataTable. Use this to validate the formset and to get the submitted data back. """ if self._formset is None: self._formset = self.formset_class( self.request.POST or None, initial=self._get_formset_data(), prefix=self._meta.name) return self._formset
0.004762
def common_vector_root(vec1, vec2): """ Return common root of the two vectors. Args: vec1 (list/tuple): First vector. vec2 (list/tuple): Second vector. Usage example:: >>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0]) [1, 2] Returns: list: Common part of two vectors or blank list. """ root = [] for v1, v2 in zip(vec1, vec2): if v1 == v2: root.append(v1) else: return root return root
0.001949
def put(self, item, priority=None): """ Stores a transition in replay memory. If the memory is full, the oldest entry is replaced. """ if not self._isfull(): self._memory.append(None) position = self._next_position_then_increment() old_priority = 0 if self._memory[position] is None \ else (self._memory[position].priority or 0) row = _SumRow(item, priority) self._memory[position] = row self._update_internal_nodes( position, (row.priority or 0) - old_priority)
0.003454
def whitelist(self, address: Address): """Whitelist peer address to receive communications from This may be called before transport is started, to ensure events generated during start are handled properly. """ self.log.debug('Whitelist', address=to_normalized_address(address)) self._address_mgr.add_address(address)
0.008219
def parse(cls, headers): """Returns a dictionary from HTTP header text. >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") >>> sorted(h.iteritems()) [('Content-Length', '42'), ('Content-Type', 'text/html')] """ h = cls() for line in headers.splitlines(): if line: h.parse_line(line) return h
0.007212
def create(self, index, doc_type, body, id=None, **query_params): """ Adds a typed JSON document in a specific index, making it searchable. Behind the scenes this method calls index(..., op_type='create') `<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_ :param index: The name of the index :param doc_type: The type of the document :param body: The document :param id: Document ID :arg consistency: Explicit write consistency setting for the operation, valid choices are: 'one', 'quorum', 'all' :arg op_type: Explicit operation type, default 'index', valid choices are: 'index', 'create' :arg parent: ID of the parent document :arg refresh: Refresh the index after performing the operation :arg routing: Specific routing value :arg timeout: Explicit operation timeout :arg timestamp: Explicit timestamp for the document :arg ttl: Expiration time for the document :arg version: Explicit version number for concurrency control :arg version_type: Specific version type, valid choices are: 'internal', 'external', 'external_gte', 'force' """ query_params['op_type'] = 'create' result = yield self.index(index, doc_type, body, id=id, params=query_params) returnValue(result)
0.002823
def show(self): """Shows the main window and grabs the focus on it. """ self.hidden = False # setting window in all desktops window_rect = RectCalculator.set_final_window_rect(self.settings, self.window) self.window.stick() # add tab must be called before window.show to avoid a # blank screen before adding the tab. if not self.get_notebook().has_page(): self.add_tab() self.window.set_keep_below(False) self.window.show_all() # this is needed because self.window.show_all() results in showing every # thing which includes the scrollbar too self.settings.general.triggerOnChangedValue(self.settings.general, "use-scrollbar") # move the window even when in fullscreen-mode log.debug("Moving window to: %r", window_rect) self.window.move(window_rect.x, window_rect.y) # this works around an issue in fluxbox if not FullscreenManager(self.settings, self.window).is_fullscreen(): self.settings.general.triggerOnChangedValue(self.settings.general, 'window-height') time = get_server_time(self.window) # TODO PORT this # When minized, the window manager seems to refuse to resume # log.debug("self.window: %s. Dir=%s", type(self.window), dir(self.window)) # is_iconified = self.is_iconified() # if is_iconified: # log.debug("Is iconified. Ubuntu Trick => " # "removing skip_taskbar_hint and skip_pager_hint " # "so deiconify can work!") # self.get_widget('window-root').set_skip_taskbar_hint(False) # self.get_widget('window-root').set_skip_pager_hint(False) # self.get_widget('window-root').set_urgency_hint(False) # log.debug("get_skip_taskbar_hint: {}".format( # self.get_widget('window-root').get_skip_taskbar_hint())) # log.debug("get_skip_pager_hint: {}".format( # self.get_widget('window-root').get_skip_pager_hint())) # log.debug("get_urgency_hint: {}".format( # self.get_widget('window-root').get_urgency_hint())) # glib.timeout_add_seconds(1, lambda: self.timeout_restore(time)) # log.debug("order to present and deiconify") self.window.present() self.window.deiconify() self.window.show() self.window.get_window().focus(time) self.window.set_type_hint(Gdk.WindowTypeHint.DOCK) self.window.set_type_hint(Gdk.WindowTypeHint.NORMAL) # log.debug("Restoring skip_taskbar_hint and skip_pager_hint") # if is_iconified: # self.get_widget('window-root').set_skip_taskbar_hint(False) # self.get_widget('window-root').set_skip_pager_hint(False) # self.get_widget('window-root').set_urgency_hint(False) # This is here because vte color configuration works only after the # widget is shown. self.settings.styleFont.triggerOnChangedValue(self.settings.styleFont, 'color') self.settings.styleBackground.triggerOnChangedValue(self.settings.styleBackground, 'color') log.debug("Current window position: %r", self.window.get_position()) self.execute_hook('show')
0.002695
def contains_variance(arrays, names): """ Make sure both arrays for bivariate ("scatter") plot have a stddev > 0 """ for ar, name in zip(arrays, names): if np.std(ar) == 0: sys.stderr.write( "No variation in '{}', skipping bivariate plots.\n".format(name.lower())) logging.info("Nanoplotter: No variation in {}, skipping bivariate plot".format(name)) return False else: return True
0.006397
def iter_parts(self): """ Generate exactly one reference to each of the parts in the package by performing a depth-first traversal of the rels graph. """ def walk_parts(source, visited=list()): for rel in source.rels.values(): if rel.is_external: continue part = rel.target_part if part in visited: continue visited.append(part) yield part new_source = part for part in walk_parts(new_source, visited): yield part for part in walk_parts(self): yield part
0.002861
def desc(self, table): '''Returns table description >>> yql.desc('geo.countries') >>> ''' query = "desc {0}".format(table) response = self.raw_query(query) return response
0.008772
def statuses_update(self, status, in_reply_to_status_id=None, lat=None, long=None, place_id=None, display_coordinates=None, trim_user=None, media_ids=None): """ Posts a tweet. https://dev.twitter.com/docs/api/1.1/post/statuses/update :param str status: (*required*) The text of your tweet, typically up to 140 characters. URL encode as necessary. t.co link wrapping may affect character counts. There are some special commands in this field to be aware of. For instance, preceding a message with "D " or "M " and following it with a screen name can create a direct message to that user if the relationship allows for it. :param str in_reply_to_status_id: The ID of an existing status that the update is in reply to. Note: This parameter will be ignored unless the author of the tweet this parameter references is mentioned within the status text. Therefore, you must include @username, where username is the author of the referenced tweet, within ``status``. :param float lat: The latitude of the location this tweet refers to. This parameter will be ignored unless it is inside the range -90.0 to +90.0 (North is positive) inclusive. It will also be ignored if there isn't a corresponding long parameter. :param float long: The longitude of the location this tweet refers to. The valid ranges for longitude is -180.0 to +180.0 (East is positive) inclusive. This parameter will be ignored if outside that range, if it is not a number, if geo_enabled is disabled, or if there not a corresponding lat parameter. :param str place_id: A place in the world. These IDs can be retrieved from GET geo/reverse_geocode. (TODO: Reference method when it exists.) :param bool display_coordinates: Whether or not to put a pin on the exact coordinates a tweet has been sent from. :param bool trim_user: When set to ``True``, the return value's user object includes only the status author's numerical ID. :param list media_ids: A list of images previously uploaded to Twitter (referenced by their ``media_id``) that are to be embedded in the tweet. Maximum of four images. :returns: A tweet dict containing the posted tweet. """ params = {} set_str_param(params, 'status', status) set_str_param(params, 'in_reply_to_status_id', in_reply_to_status_id) set_float_param(params, 'lat', lat, min=-90, max=90) set_float_param(params, 'long', long, min=-180, max=180) set_str_param(params, 'place_id', place_id) set_bool_param(params, 'display_coordinates', display_coordinates) set_bool_param(params, 'trim_user', trim_user) set_list_param(params, 'media_ids', media_ids, max_len=4) return self._post_api('statuses/update.json', params)
0.001238
def _legacy_handle_registration(config, pconn): ''' Handle the registration process Returns: True - machine is registered False - machine is unregistered None - could not reach the API ''' logger.debug('Trying registration.') # force-reregister -- remove machine-id files and registration files # before trying to register again if config.reregister: delete_registered_file() delete_unregistered_file() write_to_disk(constants.machine_id_file, delete=True) logger.debug('Re-register set, forcing registration.') logger.debug('Machine-id: %s', generate_machine_id(new=config.reregister)) # check registration with API check = get_registration_status(config, pconn) for m in check['messages']: logger.debug(m) if check['unreachable']: # Run connection test and exit return None if check['status']: # registered in API, resync files if config.register: logger.info('This host has already been registered.') write_registered_file() return True if config.register: # register if specified message, hostname, group, display_name = register(config, pconn) if not hostname: # API could not be reached, run connection test and exit logger.error(message) return None if config.display_name is None and config.group is None: logger.info('Successfully registered host %s', hostname) elif config.display_name is None: logger.info('Successfully registered host %s in group %s', hostname, group) else: logger.info('Successfully registered host %s as %s in group %s', hostname, display_name, group) if message: logger.info(message) write_registered_file() return True else: # unregistered in API, resync files write_unregistered_file(date=check['unreg_date']) # print messaging and exit if check['unreg_date']: # registered and then unregistered logger.info('This machine has been unregistered. ' 'Use --register if you would like to ' 're-register this machine.') else: # not yet registered logger.info('This machine has not yet been registered.' 'Use --register to register this machine.') return False
0.000392
def LockRetryWrapper(self, subject, retrywrap_timeout=1, retrywrap_max_timeout=10, blocking=True, lease_time=None): """Retry a DBSubjectLock until it succeeds. Args: subject: The subject which the lock applies to. retrywrap_timeout: How long to wait before retrying the lock. retrywrap_max_timeout: The maximum time to wait for a retry until we raise. blocking: If False, raise on first lock failure. lease_time: lock lease time in seconds. Returns: The DBSubjectLock object Raises: DBSubjectLockError: If the maximum retry count has been reached. """ timeout = 0 while timeout < retrywrap_max_timeout: try: return self.DBSubjectLock(subject, lease_time=lease_time) except DBSubjectLockError: if not blocking: raise stats_collector_instance.Get().IncrementCounter("datastore_retries") time.sleep(retrywrap_timeout) timeout += retrywrap_timeout raise DBSubjectLockError("Retry number exceeded.")
0.007779
def users_update(self, user_id, **kwargs): """Update an existing user.""" return self.__call_api_post('users.update', userId=user_id, data=kwargs)
0.018519
def clean_cache(self, request): """ Remove all MenuItems from Cache. """ treenav.delete_cache() self.message_user(request, _('Cache menuitem cache cleaned successfully.')) info = self.model._meta.app_label, self.model._meta.model_name changelist_url = reverse('admin:%s_%s_changelist' % info, current_app=self.admin_site.name) return redirect(changelist_url)
0.009479
def update_configuration(app): """Update parameters which are dependent on information from the project-specific conf.py (including its location on the filesystem)""" config = app.config project = config.project config_dir = app.env.srcdir sys.path.insert(0, os.path.join(config_dir, '..')) config.html_theme_path.append(os.path.relpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'themes'), config_dir)) if not config.html_logo: config.html_logo = os.path.relpath(os.path.join(STATIC_PATH, 'safari_logo.png'), config_dir) if not config.html_favicon: config.html_favicon = os.path.relpath(os.path.join(STATIC_PATH, 'favicon.ico'), config_dir) config.html_static_path.append(os.path.relpath(STATIC_PATH, config_dir)) if not config.htmlhelp_basename: config.htmlhelp_basename = '%sdoc' % project if not config.latex_logo: config.latex_logo = os.path.relpath(os.path.join(STATIC_PATH, 'safari_logo.png'), config_dir) if not config.epub_title: config.epub_title = u'%s Documentation' % project if not config.epub_publisher: config.epub_publisher = config.epub_author if not config.epub_copyright: config.epub_copyright = config.copyright config.latex_documents.append( (master_doc, '%s.tex' % project, u'%s Documentation' % project, u'Safari', 'manual')) config.man_pages.append( (master_doc, project, u'%s Documentation' % project, [u'Safari'], 1)) config.texinfo_documents.append( (master_doc, project, u'%s Documentation' % project, u'Safari', project, 'One line description of project.', 'Miscellaneous')) # Parse the version number from setup.py without actually running setup() with open(os.path.join(config_dir, '..', 'setup.py'), 'r') as f: content = f.read() match = re.search(r"version\s*=\s*['\"]([\d\.]+)['\"]", content) if match: config.version = match.group(1) config.release = config.version
0.002323
def wall_factor_fd(mu, mu_wall, turbulent=True, liquid=False): r'''Computes the wall correction factor for pressure drop due to friction between a fluid and a wall. These coefficients were derived for internal flow inside a pipe, but can be used elsewhere where appropriate data is missing. .. math:: \frac{f_d}{f_{d,\text{constant properties}}} = \left(\frac{\mu}{\mu_{wall}}\right)^n Parameters ---------- mu : float Viscosity (or Prandtl number) of flowing fluid away from the wall, [Pa*s] mu_wall : float Viscosity (or Prandtl number) of the fluid at the wall, [Pa*s] turbulent : bool Whether or not to use the turbulent coefficient, [-] liquid : bool Whether or not to use the liquid phase coefficient; otherwise the gas coefficient is used, [-] Returns ------- factor : float Correction factor for pressure loss; to be multiplied by the friction factor, or pressure drop to obtain the actual result, [-] Notes ----- The exponents are determined as follows: +-----------+--------+---------+---------+ | Regime | Phase | Heating | Cooling | +===========+========+=========+=========+ | Turbulent | Liquid | -0.25 | -0.25 | +-----------+--------+---------+---------+ | Turbulent | Gas | 0.1 | 0.1 | +-----------+--------+---------+---------+ | Laminar | Liquid | -0.58 | -0.5 | +-----------+--------+---------+---------+ | Laminar | Gas | -1 | -1 | +-----------+--------+---------+---------+ Examples -------- >>> wall_factor_fd(mu=8E-4, mu_wall=3E-4, turbulent=True, liquid=True) 0.7825422900366437 References ---------- .. [1] Kays, William M., and Michael E. Crawford. Convective Heat and Mass Transfer. 3rd edition. New York: McGraw-Hill Science/Engineering/Math, 1993. ''' params = wall_factor_fd_defaults[(turbulent, liquid)] return wall_factor(mu=mu, mu_wall=mu_wall, **params)
0.004739
def create_new_example(self, foo='', a='', b=''): """Entity object factory.""" return create_new_example(foo=foo, a=a, b=b)
0.014388
def plot_op(fn, inputs=[], outputs=[]): """ User-exposed api method for constructing a python_node Args: fn: python function that computes some np.ndarrays given np.ndarrays as inputs. it can have arbitrary side effects. inputs: array of tf.Tensors (optional). These are where fn derives its values from outputs: tf.Placeholder nodes (optional). These are constructed by the user (which allows the user to plug them into other ht.Ops or tf.Ops). The outputs of fn are mapped to each of the output placeholders. raises an Error if fn cannot map """ global COUNT, ht # check outputs if not isinstance(outputs,list): outputs=[outputs] for tensor in outputs: if tensor.op.type is not 'Placeholder': raise Error('Output nodes must be Placeholders') op=PlotOp(fn, COUNT, inputs, outputs) op_store.add_op(op) COUNT+=1 # if node has output, return value for python_op is the first output (placeholder) tensor # otherwise, return the op if outputs: return outputs[0] else: return op
0.035573
def coderef_to_ecoclass(self, code, reference=None): """ Map a GAF code to an ECO class Arguments --------- code : str GAF evidence code, e.g. ISS, IDA reference: str CURIE for a reference for the evidence instance. E.g. GO_REF:0000001. Optional - If provided can give a mapping to a more specific ECO class Return ------ str ECO class CURIE/ID """ mcls = None for (this_code,this_ref,cls) in self.mappings(): if str(this_code) == str(code): if this_ref == reference: return cls if this_ref is None: mcls = cls return mcls
0.01023
def diff(self, test_id_1, test_id_2, config=None, **kwargs): """ Create a diff report using test_id_1 as a baseline :param: test_id_1: test id to be used as baseline :param: test_id_2: test id to compare against baseline :param: config file for diff (optional) :param: **kwargs: keyword arguments """ output_directory = os.path.join(self._output_directory, 'diff_' + str(test_id_1) + '_' + str(test_id_2)) if kwargs: if 'output_directory' in kwargs.keys(): output_directory = kwargs['output_directory'] diff_report = Diff([NaaradReport(self._analyses[test_id_1].output_directory, None), NaaradReport(self._analyses[test_id_2].output_directory, None)], 'diff', output_directory, os.path.join(output_directory, self._resource_path), self._resource_path) if config: naarad.utils.extract_diff_sla_from_config_file(diff_report, config) diff_report.generate() if diff_report.sla_failures > 0: return CONSTANTS.SLA_FAILURE if diff_report.status != 'OK': return CONSTANTS.ERROR return CONSTANTS.OK
0.007819
def uncompressed(self): """If true, handle uncompressed data """ ISUNCOMPRESSED = self.verboseRead( BoolCode('UNCMPR', description='Is uncompressed?')) if ISUNCOMPRESSED: self.verboseRead(FillerAlphabet(streamPos=self.stream.pos)) print('Uncompressed data:') self.output += self.stream.readBytes(self.MLEN) print(outputFormatter(self.output[-self.MLEN:])) return ISUNCOMPRESSED
0.004193
def __request_start(self, queue_item): """Execute the request in given queue item. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair to scrape. """ try: action = self.__options.callbacks.request_before_start(self.queue, queue_item) except Exception as e: action = None print(e) print(traceback.format_exc()) if action == CrawlerActions.DO_STOP_CRAWLING: self.__should_stop = True if action == CrawlerActions.DO_SKIP_TO_NEXT: self.queue.move(queue_item, QueueItem.STATUS_FINISHED) self.__should_spawn_new_requests = True if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None: self.queue.move(queue_item, QueueItem.STATUS_IN_PROGRESS) thread = CrawlerThread(self.__request_finish, self.__lock, self.__options, queue_item) self.__threads[queue_item.get_hash()] = thread thread.daemon = True thread.start()
0.004704
def items(self): """ Generator returning all keys and values stored in a trie. """ L = [] def aux(node, s): s = s + node.char if node.output is not nil: L.append((s, node.output)) for child in node.children.values(): if child is not node: aux(child, s) aux(self.root, '') return iter(L)
0.049231
def iterate(self, image, feature_extractor, feature_vector): """iterate(image, feature_extractor, feature_vector) -> bounding_box Scales the given image, and extracts features from all possible bounding boxes. For each of the sampled bounding boxes, this function fills the given pre-allocated feature vector and yields the current bounding box. **Parameters:** ``image`` : array_like(2D) The given image to extract features for ``feature_extractor`` : :py:class:`FeatureExtractor` The feature extractor to use to extract the features for the sampled patches ``feature_vector`` : :py:class:`numpy.ndarray` (1D, uint16) The pre-allocated feature vector that will be filled inside this function; needs to be of size :py:attr:`FeatureExtractor.number_of_features` **Yields:** ``bounding_box`` : :py:class:`BoundingBox` The bounding box for which the current features are extracted for """ for scale, scaled_image_shape in self.scales(image): # prepare the feature extractor to extract features from the given image feature_extractor.prepare(image, scale) for bb in self.sample_scaled(scaled_image_shape): # extract features for feature_extractor.extract_indexed(bb, feature_vector) yield bb.scale(1./scale)
0.006056
def def_emb_sz(classes, n, sz_dict=None): "Pick an embedding size for `n` depending on `classes` if not given in `sz_dict`." sz_dict = ifnone(sz_dict, {}) n_cat = len(classes[n]) sz = sz_dict.get(n, int(emb_sz_rule(n_cat))) # rule of thumb return n_cat,sz
0.01087
def unsafe_ask(self, patch_stdout: bool = False) -> Any: """Ask the question synchronously and return user response. Does not catch keyboard interrupts.""" if patch_stdout: with prompt_toolkit.patch_stdout.patch_stdout(): return self.application.run() else: return self.application.run()
0.00554
def fuse_batchnorm_weights(gamma, beta, mean, var, epsilon): # https://github.com/Tencent/ncnn/blob/master/src/layer/batchnorm.cpp """ float sqrt_var = sqrt(var_data[i]); a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var; b_data[i] = slope_data[i] / sqrt_var; ... ptr[i] = b * ptr[i] + a; """ scale = gamma / np.sqrt(var + epsilon) bias = beta - gamma * mean / np.sqrt(var + epsilon) return [scale, bias]
0.002096
def call_webhook(event, webhook, payload): """Build request from event,webhook,payoad and parse response.""" started_at = time() request = _build_request_for_calling_webhook(event, webhook, payload) logger.info('REQUEST %(uuid)s %(method)s %(url)s %(payload)s' % dict( uuid=str(event['uuid']), url=request['url'], method=request['method'], payload=payload, )) try: content = dispatch_webhook_request(**request) logger.debug('RESPONSE %(uuid)s %(method)s %(url)s %(data)s' % dict( uuid=str(event['uuid']), url=request['url'], method=request['method'], data=content, )) data = dict( parent=str(event['uuid']), content=content, started_at=started_at, ended_at=time() ) except (FailureWebhookError, ConnectionError) as exception: if sentry.client: http_context = raven_context(**request) sentry.captureException(data={'request': http_context}) logger.error('RESPONSE %(uuid)s %(method)s %(url)s %(error)s' % dict( uuid=str(event['uuid']), method=request['method'], url=request['url'], error=exception.message,)) data = dict( parent=str(event['uuid']), error=exception.message, started_at=started_at, ended_at=time(), ) webhook_ran.send(None, data=data) return data
0.000656
def queryMulti(self, queries): """ Execute a series of Deletes,Inserts, & Updates in the Queires List @author: Nick Verbeck @since: 9/7/2008 """ self.lastError = None self.affectedRows = 0 self.rowcount = None self.record = None cursor = None try: try: self._GetConnection() #Execute query and store results cursor = self.conn.getCursor() for query in queries: self.conn.query = query if query.__class__ == [].__class__: self.affectedRows += cursor.execute(query[0], query[1]) else: self.affectedRows += cursor.execute(query) self.conn.updateCheckTime() except Exception, e: self.lastError = e finally: if cursor is not None: cursor.close() self._ReturnConnection() if self.lastError is not None: raise self.lastError else: return self.affectedRows
0.045402
def get_platform_metadata(self, platform, build_annotations): """ Return the metadata for the given platform. """ # retrieve all the workspace data build_info = get_worker_build_info(self.workflow, platform) osbs = build_info.osbs kind = "configmap/" cmlen = len(kind) cm_key_tmp = build_annotations['metadata_fragment'] cm_frag_key = build_annotations['metadata_fragment_key'] if not cm_key_tmp or not cm_frag_key or cm_key_tmp[:cmlen] != kind: msg = "Bad ConfigMap annotations for platform {}".format(platform) self.log.warning(msg) raise BadConfigMapError(msg) # use the key to get the configmap data and then use the # fragment_key to get the build metadata inside the configmap data # save the worker_build metadata cm_key = cm_key_tmp[cmlen:] try: cm_data = osbs.get_config_map(cm_key) except Exception: self.log.error("Failed to get ConfigMap for platform %s", platform) raise metadata = cm_data.get_data_by_key(cm_frag_key) defer_removal(self.workflow, cm_key, osbs) return metadata
0.001595
def _get_cgroup_measurements(self, cgroups, ru_child, result): """ This method calculates the exact results for time and memory measurements. It is not important to call this method as soon as possible after the run. """ logging.debug("Getting cgroup measurements.") cputime_wait = ru_child.ru_utime + ru_child.ru_stime if ru_child else 0 cputime_cgroups = None if CPUACCT in cgroups: # We want to read the value from the cgroup. # The documentation warns about outdated values. # So we read twice with 0.1s time difference, # and continue reading as long as the values differ. # This has never happened except when interrupting the script with Ctrl+C, # but just try to be on the safe side here. tmp = cgroups.read_cputime() tmp2 = None while tmp != tmp2: time.sleep(0.1) tmp2 = tmp tmp = cgroups.read_cputime() cputime_cgroups = tmp # Usually cputime_cgroups seems to be 0.01s greater than cputime_wait. # Furthermore, cputime_wait might miss some subprocesses, # therefore we expect cputime_cgroups to be always greater (and more correct). # However, sometimes cputime_wait is a little bit bigger than cputime2. # For small values, this is probably because cputime_wait counts since fork, # whereas cputime_cgroups counts only after cgroups.add_task() # (so overhead from runexecutor is correctly excluded in cputime_cgroups). # For large values, a difference may also indicate a problem with cgroups, # for example another process moving our benchmarked process between cgroups, # thus we warn if the difference is substantial and take the larger cputime_wait value. if cputime_wait > 0.5 and (cputime_wait * 0.95) > cputime_cgroups: logging.warning( 'Cputime measured by wait was %s, cputime measured by cgroup was only %s, ' 'perhaps measurement is flawed.', cputime_wait, cputime_cgroups) result['cputime'] = cputime_wait else: result['cputime'] = cputime_cgroups for (core, coretime) in enumerate(cgroups.get_value(CPUACCT, 'usage_percpu').split(" ")): try: coretime = int(coretime) if coretime != 0: result['cputime-cpu'+str(core)] = coretime/1000000000 # nano-seconds to seconds except (OSError, ValueError) as e: logging.debug("Could not read CPU time for core %s from kernel: %s", core, e) else: # For backwards compatibility, we report cputime_wait on systems without cpuacct cgroup. # TOOD We might remove this for BenchExec 2.0. result['cputime'] = cputime_wait if MEMORY in cgroups: # This measurement reads the maximum number of bytes of RAM+Swap the process used. # For more details, c.f. the kernel documentation: # https://www.kernel.org/doc/Documentation/cgroups/memory.txt memUsageFile = 'memsw.max_usage_in_bytes' if not cgroups.has_value(MEMORY, memUsageFile): memUsageFile = 'max_usage_in_bytes' if not cgroups.has_value(MEMORY, memUsageFile): logging.warning('Memory-usage is not available due to missing files.') else: try: result['memory'] = int(cgroups.get_value(MEMORY, memUsageFile)) except IOError as e: if e.errno == errno.ENOTSUP: # kernel responds with operation unsupported if this is disabled logging.critical( "Kernel does not track swap memory usage, cannot measure memory usage." " Please set swapaccount=1 on your kernel command line.") else: raise e if BLKIO in cgroups: blkio_bytes_file = 'throttle.io_service_bytes' if cgroups.has_value(BLKIO, blkio_bytes_file): bytes_read = 0 bytes_written = 0 for blkio_line in cgroups.get_file_lines(BLKIO, blkio_bytes_file): try: dev_no, io_type, bytes_amount = blkio_line.split(' ') if io_type == "Read": bytes_read += int(bytes_amount) elif io_type == "Write": bytes_written += int(bytes_amount) except ValueError: pass # There are irrelevant lines in this file with a different structure result['blkio-read'] = bytes_read result['blkio-write'] = bytes_written logging.debug( 'Resource usage of run: walltime=%s, cputime=%s, cgroup-cputime=%s, memory=%s', result.get('walltime'), cputime_wait, cputime_cgroups, result.get('memory', None))
0.005916
def get_var_shape(self, name): """ Return shape of the array. """ rank = self.get_var_rank(name) name = create_string_buffer(name) arraytype = ndpointer(dtype='int32', ndim=1, shape=(MAXDIMS, ), flags='F') shape = np.empty((MAXDIMS, ), dtype='int32', order='F') self.library.get_var_shape.argtypes = [c_char_p, arraytype] self.library.get_var_shape(name, shape) return tuple(shape[:rank])
0.003584