text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(LibratoHandler, self).get_default_config_help()
config.update({
'user': 'Librato username',
'apikey': 'Librato API key',
'apply_metric_prefix': 'Allow diamond to apply metric prefix',
'queue_max_size': 'Max measurements to queue before submitting',
'queue_max_interval':
'Max seconds to wait before submitting. For best behavior, '
'be sure your highest collector poll interval is lower than '
'or equal to the queue_max_interval setting.',
'include_filters':
'A list of regex patterns. Only measurements whose path '
'matches a filter will be submitted. Useful for limiting '
'usage to *only* desired measurements, e.g. '
'`"^diskspace\..*\.byte_avail$", "^loadavg\.01"` or '
'`"^sockets\.",` (note trailing comma to indicate a list)',
})
return config | 0.005272 |
def sequential_connect(self):
"""
Sequential connect is designed to return a connection to the
Rendezvous Server but it does so in a way that the local port
ranges (both for the server and used for subsequent hole
punching) are allocated sequentially and predictably. This is
because Delta+1 type NATs only preserve the delta value when
the source ports increase by one.
"""
# Connect to rendezvous server.
try:
mappings = sequential_bind(self.mapping_no + 1, self.interface)
con = self.server_connect(mappings[0]["sock"])
except Exception as e:
log.debug(e)
log.debug("this err")
return None
# First mapping is used to talk to server.
mappings.remove(mappings[0])
# Receive port mapping.
msg = "SOURCE TCP %s" % (str(mappings[0]["source"]))
con.send_line(msg)
reply = con.recv_line(timeout=2)
remote_port = self.parse_remote_port(reply)
if not remote_port:
return None
# Generate port predictions.
predictions = ""
if self.nat_type != "random":
mappings = self.predict_mappings(mappings)
for mapping in mappings:
predictions += str(mapping["remote"]) + " "
predictions = predictions.rstrip()
else:
predictions = "1337"
return [con, mappings, predictions] | 0.001309 |
def form_field(self):
"Returns appropriate form field."
label = unicode(self)
defaults = dict(required=False, label=label, widget=self.widget)
defaults.update(self.extra)
return self.field_class(**defaults) | 0.00813 |
def get_subset_ids(path):
""" Return a list with ids of all available subsets (based on existing csv-files). """
all = []
for path in glob.glob(os.path.join(path, '*.tsv')):
file_name = os.path.split(path)[1]
basename = os.path.splitext(file_name)[0]
all.append(basename)
return all | 0.008523 |
def init(self, address, hard_reset=False):
"""Open the serial connection to a dongle at the supplied address.
Args:
address (str): the serial port address of the BLED112 dongle, e.g. 'COM5'
hard_reset (bool): not currently used
Returns:
True if a connection with the dongle was established, False otherwise.
"""
self.address = address
if hard_reset:
# TODO (needs more work to be usable)
# if not Dongle._hard_reset(address):
# return False
# time.sleep(2.0)
pass
# TODO timeout not working if opened on valid, non Bluegiga port
for i in range(Dongle.PORT_RETRIES):
try:
logger.debug('Setting up BGAPI, attempt {}/{}'.format(i + 1, Dongle.PORT_RETRIES))
self.api = BlueGigaAPI(port=self.address, callbacks=self, baud=Dongle.BAUDRATE, timeout=DEF_TIMEOUT)
self.api.start_daemon()
break
except serial.serialutil.SerialException as e:
logger.debug('Failed to init BlueGigaAPI: {}, attempt {}/{}'.format(e, i + 1, Dongle.PORT_RETRIES))
time.sleep(0.1)
if self.api is None:
return False
time.sleep(0.5) # TODO
self.get_supported_connections()
logger.info('Dongle supports {} connections'.format(self.supported_connections))
if self.supported_connections == -1:
logger.error('Failed to retrieve number of supported connections from the dongle! (try reinserting it)')
return False
self.conn_state = {x: self._STATE_IDLE for x in range(self.supported_connections)}
self.reset()
self._cbthread = threading.Thread(target=self._cbthreadfunc)
self._cbthread.setDaemon(True)
self._cbthread_q = Queue()
self._cbthread.start()
return True | 0.006138 |
def get_projected_player_game_stats_by_player(self, season, week, player_id):
"""
Projected Player Game Stats by Player
"""
result = self._method_call("PlayerGameProjectionStatsByPlayerID/{season}/{week}/{player_id}", "projections", season=season, week=week, player_id=player_id)
return result | 0.009009 |
def ansi_format( self, width=64, height=12 ):
"""Return a human readable ANSI-terminal printout of the stats.
width
Custom width for the graph (in characters).
height
Custom height for the graph (in characters).
"""
from mrcrowbar.ansi import format_bar_graph_iter
if (256 % width) != 0:
raise ValueError( 'Width of the histogram must be a divisor of 256' )
elif (width <= 0):
raise ValueError( 'Width of the histogram must be greater than zero' )
elif (width > 256):
raise ValueError( 'Width of the histogram must be less than or equal to 256' )
buckets = self.histogram( width )
result = []
for line in format_bar_graph_iter( buckets, width=width, height=height ):
result.append( ' {}\n'.format( line ) )
result.append( '╘'+('═'*width)+'╛\n' )
result.append( 'entropy: {:.10f}\n'.format( self.entropy ) )
result.append( 'samples: {}'.format( self.samples ) )
return ''.join( result ) | 0.032258 |
def validate_doc(schema_names, # type: Names
doc, # type: Union[Dict[Text, Any], List[Dict[Text, Any]], Text, None]
loader, # type: Loader
strict, # type: bool
strict_foreign_properties=False # type: bool
):
# type: (...) -> None
"""Validate a document using the provided schema."""
has_root = False
for root in schema_names.names.values():
if ((hasattr(root, 'get_prop') and root.get_prop(u"documentRoot")) or (
u"documentRoot" in root.props)):
has_root = True
break
if not has_root:
raise validate.ValidationException(
"No document roots defined in the schema")
if isinstance(doc, MutableSequence):
vdoc = doc
elif isinstance(doc, CommentedMap):
vdoc = CommentedSeq([doc])
vdoc.lc.add_kv_line_col(0, [doc.lc.line, doc.lc.col])
vdoc.lc.filename = doc.lc.filename
else:
raise validate.ValidationException("Document must be dict or list")
roots = []
for root in schema_names.names.values():
if ((hasattr(root, "get_prop") and root.get_prop(u"documentRoot")) or (
root.props.get(u"documentRoot"))):
roots.append(root)
anyerrors = []
for pos, item in enumerate(vdoc):
sourceline = SourceLine(vdoc, pos, Text)
success = False
for root in roots:
success = validate.validate_ex(
root, item, loader.identifiers, strict,
foreign_properties=loader.foreign_properties,
raise_ex=False, skip_foreign_properties=loader.skip_schemas,
strict_foreign_properties=strict_foreign_properties)
if success:
break
if not success:
errors = [] # type: List[Text]
for root in roots:
if hasattr(root, "get_prop"):
name = root.get_prop(u"name")
elif hasattr(root, "name"):
name = root.name
try:
validate.validate_ex(
root, item, loader.identifiers, strict,
foreign_properties=loader.foreign_properties,
raise_ex=True, skip_foreign_properties=loader.skip_schemas,
strict_foreign_properties=strict_foreign_properties)
except validate.ClassValidationException as exc:
errors = [sourceline.makeError(u"tried `%s` but\n%s" % (
name, validate.indent(str(exc), nolead=False)))]
break
except validate.ValidationException as exc:
errors.append(sourceline.makeError(u"tried `%s` but\n%s" % (
name, validate.indent(str(exc), nolead=False))))
objerr = sourceline.makeError(u"Invalid")
for ident in loader.identifiers:
if ident in item:
objerr = sourceline.makeError(
u"Object `%s` is not valid because"
% (relname(item[ident])))
break
anyerrors.append(u"%s\n%s" %
(objerr, validate.indent(bullets(errors, "- "))))
if anyerrors:
raise validate.ValidationException(
strip_dup_lineno(bullets(anyerrors, "* "))) | 0.001151 |
def _varFindLimitSpace(basedir, vars, space, part, lookup_fatal, depth):
''' limits the search space of space to part
basically does space.get(part, None), but with
templating for part and a few more things
'''
# Previous part couldn't be found, nothing to limit to
if space is None:
return space
# A part with escaped .s in it is compounded by { and }, remove them
if part[0] == '{' and part[-1] == '}':
part = part[1:-1]
# Template part to resolve variables within (${var$var2})
part = varReplace(basedir, part, vars, lookup_fatal, depth=depth + 1)
# Now find it
if part in space:
space = space[part]
elif "[" in part:
m = _LISTRE.search(part)
if not m:
return None
else:
try:
space = space[m.group(1)][int(m.group(2))]
except (KeyError, IndexError):
return None
else:
return None
# if space is a string, check if it's a reference to another variable
if isinstance(space, basestring):
space = template_ds(basedir, space, vars, lookup_fatal, depth)
return space | 0.001708 |
def get_stp_mst_detail_output_cist_port_internal_path_cost(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
port = ET.SubElement(cist, "port")
internal_path_cost = ET.SubElement(port, "internal-path-cost")
internal_path_cost.text = kwargs.pop('internal_path_cost')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003165 |
def _dispatch(self, stmt, stage, *args):
"""Construct and call an assembly function.
This function constructs the name of the assembly function based on
the type of statement, the corresponding policy and the stage
of assembly. It then calls that function to perform the assembly
task."""
class_name = stmt.__class__.__name__
policy = self.processed_policies[stmt.uuid]
func_name = '%s_%s_%s' % (class_name.lower(), stage, policy)
func = globals().get(func_name)
if func is None:
# The specific policy is not implemented for the
# given statement type.
# We try to apply a default policy next.
func_name = '%s_%s_default' % (class_name.lower(), stage)
func = globals().get(func_name)
if func is None:
# The given statement type doesn't have a default
# policy.
#raise UnknownPolicyError('%s function %s not defined' %
# (stage, func_name))
logger.warning('%s function %s not defined' %
(stage, func_name))
return
return func(stmt, *args) | 0.002402 |
def main(argv=None):
"""
Print accuracies
"""
try:
_name_of_script, filepath = argv
except ValueError:
raise ValueError(argv)
print_accuracies(filepath=filepath, test_start=FLAGS.test_start,
test_end=FLAGS.test_end, which_set=FLAGS.which_set,
nb_iter=FLAGS.nb_iter, base_eps_iter=FLAGS.base_eps_iter,
batch_size=FLAGS.batch_size) | 0.012315 |
def run_triggers(step, pre_post, args=(), **kwargs):
'''
step, pre_post-->str: Looked up in CONFIG_VAR
step must be key in CONFIG_VAR
pre_post must be key in CONFIG_VAR[step]
args-->tuple: Passed to run_in_order()
'''
if not isinstance(step, str) or not isinstance(pre_post, str):
return
if trace_triggers:
msg = 'Trigger for %s: %s' % (step, pre_post)
sys.stderr.write(msg.center(80, '-') + '\n')
if args is None:
args = ()
if not isinstance(args, tuple):
return
cfg = __get_config()
if cfg is None:
return
if step not in cfg:
return
if not isinstance(cfg[step], dict):
return
if pre_post not in cfg[step]:
return
d = cfg[step][pre_post]
l = d.get('cmdlist', [])
if not l:
return
show_output = d.get('show_output', None)
show_err = d.get('show_err', None)
ignore_err = d.get('ignore_err', None)
run_in_order(
l,
show_output=show_output,
show_err=show_err,
ignore_err=ignore_err,
args=args,
pre_post=pre_post,
kwargs=kwargs
) | 0.001668 |
def pop(self):
"""Pops the data stack, returning the value."""
try:
return self.data_stack.pop()
except errors.MachineError as e:
raise errors.MachineError("%s: At index %d in code: %s" %
(e, self.instruction_pointer, self.code_string)) | 0.009868 |
def gauge(key, gauge=None, default=float("nan"), **dims):
"""Adds gauge with dimensions to the global pyformance registry"""
return global_registry().gauge(key, gauge=gauge, default=default, **dims) | 0.004854 |
def get_results(self):
'''
:return: result from running the task
'''
self._event.wait()
if self._exception is not None:
#
# Well... rethrownig the exception caught in execute
# but on the caller thread
#
raise self._exception # pylint: disable=E0702
return self._result | 0.005291 |
def is_executable(value,
**kwargs):
"""Indicate whether ``value`` is an executable file.
.. caution::
This validator does **NOT** work correctly on a Windows file system. This
is due to the vagaries of how Windows manages its file system and the
various ways in which it can manage file permission.
If called on a Windows file system, this validator will raise
:class:`NotImplementedError() <python:NotImplementedError>`.
.. caution::
**Use of this validator is an anti-pattern and should be used with caution.**
Validating the writability of a file *before* attempting to execute it
exposes your code to a bug called
`TOCTOU <https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use>`_.
This particular class of bug can expose your code to **security vulnerabilities**
and so this validator should only be used if you are an advanced user.
A better pattern to use when writing to file is to apply the principle of
EAFP ("easier to ask forgiveness than permission"), and simply attempt to
execute the file using a ``try ... except`` block.
.. note::
This validator relies on :func:`os.access() <python:os.access>` to check
whether ``value`` is writeable. This function has certain limitations,
most especially that:
* It will **ignore** file-locking (yielding a false-positive) if the file
is locked.
* It focuses on *local operating system permissions*, which means if trying
to access a path over a network you might get a false positive or false
negative (because network paths may have more complicated authentication
methods).
:param value: The value to evaluate.
:type value: Path-like object
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises NotImplementedError: if called on a Windows system
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
if sys.platform in ['win32', 'cygwin']:
raise NotImplementedError('not supported on Windows')
try:
validators.executable(value,
**kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | 0.002458 |
def get_foreign_id(self, idspace='musicbrainz', cache=True):
"""Get the foreign id for this artist for a specific id space
Args:
Kwargs:
idspace (str): A string indicating the idspace to fetch a foreign id for.
Returns:
A foreign ID string
Example:
>>> a = artist.Artist('fabulous')
>>> a.get_foreign_id('7digital')
u'7digital:artist:186042'
>>>
"""
if not (cache and ('foreign_ids' in self.cache) and filter(lambda d: d.get('catalog') == idspace, self.cache['foreign_ids'])):
response = self.get_attribute('profile', bucket=['id:'+idspace])
foreign_ids = response['artist'].get("foreign_ids", [])
self.cache['foreign_ids'] = self.cache.get('foreign_ids', []) + foreign_ids
cval = filter(lambda d: d.get('catalog') == util.map_idspace(idspace),
self.cache.get('foreign_ids'))
return cval[0].get('foreign_id') if cval else None | 0.010397 |
def type(self, sequence_coverage_collection,
min_gene_percent_covg_threshold=99):
"""Types a collection of genes returning the most likely gene version
in the collection with it's genotype"""
best_versions = self.get_best_version(
sequence_coverage_collection.values(),
min_gene_percent_covg_threshold)
return [self.presence_typer.type(best_version)
for best_version in best_versions] | 0.006342 |
def estimate(self, upgrades):
"""Estimate the time needed to apply upgrades.
If an upgrades does not specify and estimate it is assumed to be
in the order of 1 second.
:param upgrades: List of upgrades sorted in topological order.
"""
val = 0
for u in upgrades:
val += u.estimate()
return val | 0.005405 |
def serializable_value(self, obj):
''' Produce the value as it should be serialized.
Sometimes it is desirable for the serialized value to differ from
the ``__get__`` in order for the ``__get__`` value to appear simpler
for user or developer convenience.
Args:
obj (HasProps) : the object to get the serialized attribute for
Returns:
JSON-like
'''
value = self.__get__(obj, obj.__class__)
return self.property.serialize_value(value) | 0.003759 |
def stream(self):
"""Returns a stream object (:func:`file`, :class:`~io.BytesIO` or
:class:`~StringIO.StringIO`) on the data."""
if not hasattr(self, '_stream'):
if self.file is not None:
self._stream = self.file
elif self.filename is not None:
self._stream = open(self.filename, 'rb')
elif self.text is not None:
self._stream = StringIO(self.text)
elif self.data is not None:
self._stream = BytesIO(self.data)
else:
raise ValueError('Broken Data, all None.')
return self._stream | 0.003067 |
def dt_to_qdatetime(dt):
"""Convert a python datetime.datetime object to QDateTime
:param dt: the datetime object
:type dt: :class:`datetime.datetime`
:returns: the QDateTime conversion
:rtype: :class:`QtCore.QDateTime`
:raises: None
"""
return QtCore.QDateTime(QtCore.QDate(dt.year, dt.month, dt.day),
QtCore.QTime(dt.hour, dt.minute, dt.second)) | 0.002451 |
def get_current_user():
"""
Return a TOKEN_USER for the owner of this process.
"""
process = OpenProcessToken(
ctypes.windll.kernel32.GetCurrentProcess(), TokenAccess.TOKEN_QUERY
)
return GetTokenInformation(process, TOKEN_USER) | 0.003846 |
def drop(self,arg):
""" overload of pandas.DataFrame.drop()
Parameters
----------
arg : iterable
argument to pass to pandas.DataFrame.drop()
Returns
-------
Ensemble : Ensemble
"""
df = super(Ensemble,self).drop(arg)
return type(self)(data=df,pst=self.pst) | 0.016713 |
def InteractiveShellCommand(cls, conn, cmd=None, strip_cmd=True, delim=None, strip_delim=True, clean_stdout=True):
"""Retrieves stdout of the current InteractiveShell and sends a shell command if provided
TODO: Should we turn this into a yield based function so we can stream all output?
Args:
conn: Instance of AdbConnection
cmd: Optional. Command to run on the target.
strip_cmd: Optional (default True). Strip command name from stdout.
delim: Optional. Delimiter to look for in the output to know when to stop expecting more output
(usually the shell prompt)
strip_delim: Optional (default True): Strip the provided delimiter from the output
clean_stdout: Cleanup the stdout stream of any backspaces and the characters that were deleted by the backspace
Returns:
The stdout from the shell command.
"""
if delim is not None and not isinstance(delim, bytes):
delim = delim.encode('utf-8')
# Delimiter may be shell@hammerhead:/ $
# The user or directory could change, making the delimiter somthing like root@hammerhead:/data/local/tmp $
# Handle a partial delimiter to search on and clean up
if delim:
user_pos = delim.find(b'@')
dir_pos = delim.rfind(b':/')
if user_pos != -1 and dir_pos != -1:
partial_delim = delim[user_pos:dir_pos + 1] # e.g. @hammerhead:
else:
partial_delim = delim
else:
partial_delim = None
stdout = ''
stdout_stream = BytesIO()
original_cmd = ''
try:
if cmd:
original_cmd = str(cmd)
cmd += '\r' # Required. Send a carriage return right after the cmd
cmd = cmd.encode('utf8')
# Send the cmd raw
bytes_written = conn.Write(cmd)
if delim:
# Expect multiple WRTE cmds until the delim (usually terminal prompt) is detected
data = b''
while partial_delim not in data:
cmd, data = conn.ReadUntil(b'WRTE')
stdout_stream.write(data)
else:
# Otherwise, expect only a single WRTE
cmd, data = conn.ReadUntil(b'WRTE')
# WRTE cmd from device will follow with stdout data
stdout_stream.write(data)
else:
# No cmd provided means we should just expect a single line from the terminal. Use this sparingly
cmd, data = conn.ReadUntil(b'WRTE')
if cmd == b'WRTE':
# WRTE cmd from device will follow with stdout data
stdout_stream.write(data)
else:
print("Unhandled cmd: {}".format(cmd))
cleaned_stdout_stream = BytesIO()
if clean_stdout:
stdout_bytes = stdout_stream.getvalue()
bsruns = {} # Backspace runs tracking
next_start_pos = 0
last_run_pos, last_run_len = find_backspace_runs(stdout_bytes, next_start_pos)
if last_run_pos != -1 and last_run_len != 0:
bsruns.update({last_run_pos: last_run_len})
cleaned_stdout_stream.write(stdout_bytes[next_start_pos:(last_run_pos - last_run_len)])
next_start_pos += last_run_pos + last_run_len
while last_run_pos != -1:
last_run_pos, last_run_len = find_backspace_runs(stdout_bytes[next_start_pos:], next_start_pos)
if last_run_pos != -1:
bsruns.update({last_run_pos: last_run_len})
cleaned_stdout_stream.write(stdout_bytes[next_start_pos:(last_run_pos - last_run_len)])
next_start_pos += last_run_pos + last_run_len
cleaned_stdout_stream.write(stdout_bytes[next_start_pos:])
else:
cleaned_stdout_stream.write(stdout_stream.getvalue())
stdout = cleaned_stdout_stream.getvalue()
# Strip original cmd that will come back in stdout
if original_cmd and strip_cmd:
findstr = original_cmd.encode('utf-8') + b'\r\r\n'
pos = stdout.find(findstr)
while pos >= 0:
stdout = stdout.replace(findstr, b'')
pos = stdout.find(findstr)
if b'\r\r\n' in stdout:
stdout = stdout.split(b'\r\r\n')[1]
# Strip delim if requested
# TODO: Handling stripping partial delims here - not a deal breaker the way we're handling it now
if delim and strip_delim:
stdout = stdout.replace(delim, b'')
stdout = stdout.rstrip()
except Exception as e:
print("InteractiveShell exception (most likely timeout): {}".format(e))
return stdout | 0.003715 |
def get_minions():
'''
Return a list of minions
'''
serv = _get_serv(ret=None)
sql = "select distinct(id) from returns"
data = serv.query(sql)
ret = []
if data:
for jid in data[0]['points']:
ret.append(jid[1])
return ret | 0.003597 |
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return | 0.001471 |
def to_dense_one_hot(labels, class_count):
"""Converts a vector that specified one-hot per batch into a dense version.
Args:
labels: The labels input.
class_count: The number of classes as an int.
Returns:
One dense vector for each item in the batch.
Raises:
ValueError: If labels is not rank 1.
TypeError: If class_count is not an integer or labels is not an integer
Tensor.
"""
if not isinstance(class_count, tf.compat.integral_types):
raise TypeError('class_count must be an integer type.')
if labels.dtype.base_dtype not in (tf.int32, tf.int64):
raise TypeError('Labels must be an integer: %s' % labels.dtype)
if labels.get_shape().ndims != 1:
raise ValueError('Labels must be a rank 1 tensor: %s' % labels.get_shape())
dtype = labels.dtype.base_dtype
class_tensor = tf.convert_to_tensor(
class_count, dtype=dtype, name='class_count')
# Extract the batch from the shape so this is batch independent.
batch = tf.gather(tf.shape(labels), 0)
count = tf.expand_dims(tf.range(0, limit=batch), 1)
labels = tf.expand_dims(labels, 1)
batch = tf.gather(tf.shape(labels), 0)
if dtype != tf.int32:
count = tf.cast(count, dtype)
batch = tf.cast(batch, dtype)
result = tf.sparse_to_dense(
tf.concat([count, labels], 1),
tf.concat([tf.expand_dims(batch, 0), tf.expand_dims(class_tensor, 0)], 0),
1.0, 0.0)
result.set_shape([labels.get_shape().dims[0], class_count])
return result | 0.011471 |
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "colab.settings")
from django.conf import settings
if not hasattr(settings, 'SECRET_KEY') and 'initconfig' in sys.argv:
command = initconfig.Command()
command.handle()
else:
utility = ManagementUtility(argv)
utility.execute() | 0.00232 |
def _register_trade(self, order):
""" constructs trade info from order data """
if order['id'] in self.orders.recent:
orderId = order['id']
else:
orderId = order['parentId']
# entry / exit?
symbol = order["symbol"]
order_data = self.orders.recent[orderId]
position = self.get_positions(symbol)['position']
if position != 0:
# entry
order_data['action'] = "ENTRY"
order_data['position'] = position
order_data['entry_time'] = tools.datetime_to_timezone(
order['time'])
order_data['exit_time'] = None
order_data['entry_order'] = order_data['order_type']
order_data['entry_price'] = order['avgFillPrice']
order_data['exit_price'] = 0
order_data['exit_reason'] = None
else:
order_data['action'] = "EXIT"
order_data['position'] = 0
order_data['exit_time'] = tools.datetime_to_timezone(order['time'])
order_data['exit_price'] = order['avgFillPrice']
# target / stop?
if order['id'] == order_data['targetOrderId']:
order_data['exit_reason'] = "TARGET"
elif order['id'] == order_data['stopOrderId']:
order_data['exit_reason'] = "STOP"
else:
order_data['exit_reason'] = "SIGNAL"
# remove from collection
del self.orders.recent[orderId]
if order_data is None:
return None
# trade identifier
tradeId = self.strategy.upper() + '_' + symbol.upper()
tradeId = hashlib.sha1(tradeId.encode()).hexdigest()
# existing trade?
if tradeId not in self.active_trades:
self.active_trades[tradeId] = {
"strategy": self.strategy,
"action": order_data['action'],
"quantity": abs(order_data['position']),
"position": order_data['position'],
"symbol": order_data["symbol"].split('_')[0],
"direction": order_data['direction'],
"entry_time": None,
"exit_time": None,
"duration": "0s",
"exit_reason": order_data['exit_reason'],
"order_type": order_data['order_type'],
"market_price": order_data['price'],
"target": order_data['target'],
"stop": order_data['initial_stop'],
"entry_price": 0,
"exit_price": order_data['exit_price'],
"realized_pnl": 0
}
if "entry_time" in order_data:
self.active_trades[tradeId]["entry_time"] = order_data['entry_time']
if "entry_price" in order_data:
self.active_trades[tradeId]["entry_price"] = order_data['entry_price']
else:
# self.active_trades[tradeId]['direction'] = order_data['direction']
self.active_trades[tradeId]['action'] = order_data['action']
self.active_trades[tradeId]['position'] = order_data['position']
self.active_trades[tradeId]['exit_price'] = order_data['exit_price']
self.active_trades[tradeId]['exit_reason'] = order_data['exit_reason']
self.active_trades[tradeId]['exit_time'] = order_data['exit_time']
# calculate trade duration
try:
delta = int((self.active_trades[tradeId]['exit_time'] -
self.active_trades[tradeId]['entry_time']).total_seconds())
days, remainder = divmod(delta, 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
duration = ('%sd %sh %sm %ss' %
(days, hours, minutes, seconds))
self.active_trades[tradeId]['duration'] = duration.replace(
"0d ", "").replace("0h ", "").replace("0m ", "")
except Exception as e:
pass
trade = self.active_trades[tradeId]
if trade['entry_price'] > 0 and trade['position'] == 0:
if trade['direction'] == "SELL":
pnl = trade['entry_price'] - trade['exit_price']
else:
pnl = trade['exit_price'] - trade['entry_price']
pnl = tools.to_decimal(pnl)
# print("1)", pnl)
self.active_trades[tradeId]['realized_pnl'] = pnl
# print("\n\n-----------------")
# print(self.active_trades[tradeId])
# print("-----------------\n\n")
# get trade
trade = self.active_trades[tradeId].copy()
# sms trades
sms._send_trade(trade, self.sms_numbers, self.timezone)
# rename trade direction
trade['direction'] = trade['direction'].replace(
"BUY", "LONG").replace("SELL", "SHORT")
# log
self.log_trade(trade)
# remove from active trades and add to trade
if trade['action'] == "EXIT":
del self.active_trades[tradeId]
self.trades.append(trade)
# return trade
return trade | 0.001515 |
def get_location(self, ip, detailed=False):
"""Returns a dictionary with location data or False on failure.
Amount of information about IP contained in the dictionary depends
upon `detailed` flag state.
"""
seek = self._get_pos(ip)
if seek > 0:
return self._parse_location(seek, detailed=detailed)
return False | 0.005236 |
def build_kal_scan_band_string(kal_bin, band, args):
"""Return string for CLI invocation of kal, for band scan."""
option_mapping = {"gain": "-g",
"device": "-d",
"error": "-e"}
if not sanity.scan_band_is_valid(band):
err_txt = "Unsupported band designation: %" % band
raise ValueError(err_txt)
base_string = "%s -v -s %s" % (kal_bin, band)
base_string += options_string_builder(option_mapping, args)
return(base_string) | 0.001984 |
def get_files(files, extnames=['.root']):
"""Extract a list of file paths from a list containing both paths
and file lists with one path per line."""
files_out = []
for f in files:
mime = mimetypes.guess_type(f)
if os.path.splitext(f)[1] in extnames:
files_out += [f]
elif mime[0] == 'text/plain':
files_out += list(np.loadtxt(f, unpack=True, dtype='str'))
else:
raise Exception('Unrecognized input type.')
return files_out | 0.001946 |
def countdown(template, duration=datetime.timedelta(seconds=5)):
"""
Do a countdown for duration, printing the template (which may accept one
positional argument). Template should be something like
``countdown complete in {} seconds.``
"""
now = datetime.datetime.now()
deadline = now + duration
remaining = deadline - datetime.datetime.now()
while remaining:
remaining = deadline - datetime.datetime.now()
remaining = max(datetime.timedelta(), remaining)
msg = template.format(remaining.total_seconds())
print(msg, end=' ' * 10)
sys.stdout.flush()
time.sleep(.1)
print('\b' * 80, end='')
sys.stdout.flush()
print() | 0.029641 |
def filter_indices(self, options, verbosity, *args, **kwargs):
"""Filter indices and execute an action for each index."""
index_name_map = {
index.__class__.__name__: index
for index in index_builder.indexes
}
# Process includes.
if options['index']:
indices = set(options['index'])
else:
indices = set(index_name_map.keys())
# Process excludes.
for index_name in options['exclude']:
if index_name not in index_name_map:
self.invalid_index(index_name)
return
indices.discard(index_name)
# Execute action for each remaining index.
for index_name in indices:
try:
index = index_name_map[index_name]
except KeyError:
self.invalid_index(index_name)
return
if verbosity > 0:
self.stdout.write("Processing index '{}'...".format(index_name))
self.handle_index(index, *args, **kwargs) | 0.002786 |
def get_readable_filesize(size):
"""get_readable_filesize(size) -> filesize -- return human readable
filesize from given size in bytes.
"""
if(size < 1024):
return str(size)+' bytes'
temp = size/1024.0
level = 1
while(temp >= 1024 and level< 3):
temp = temp/1024
level += 1
if(level == 1):
return str(round(temp,2))+' KB'
elif(level == 2):
return str(round(temp,2))+' MB'
else:
return str(round(temp,2))+' GB' | 0.050926 |
def share(self, file_ids, pwd=None, **kwargs):
"""
创建一个文件的分享链接
:param file_ids: 要分享的文件fid列表
:type file_ids: list
:param pwd: 分享密码,没有则没有密码
:type pwd: str
:return: requests.Response 对象
.. note::
返回正确
{
"errno": 0,
"request_id": 请求识别号,
"shareid": 分享识别号,
"link": "分享地址",
"shorturl": "段网址",
"ctime": 创建时间,
"premis": false
}
"""
if pwd:
data = {
'fid_list': json.dumps([int(fid) for fid in file_ids]),
'pwd': pwd,
'schannel': 4,
'channel_list': json.dumps([])
}
else:
data = {
'fid_list': json.dumps([int(fid) for fid in file_ids]),
'schannel': 0,
'channel_list': json.dumps([])
}
url = 'http://pan.baidu.com/share/set'
return self._request('share/set', '', url=url, data=data, **kwargs) | 0.001689 |
def readonce(self, size = None):
"""
Read from current buffer. If current buffer is empty, returns an empty string. You can use `prepareRead`
to read the next chunk of data.
This is not a coroutine method.
"""
if self.eof:
raise EOFError
if self.errored:
raise IOError('Stream is broken before EOF')
if size is not None and size < len(self.data) - self.pos:
ret = self.data[self.pos: self.pos + size]
self.pos += size
return ret
else:
ret = self.data[self.pos:]
self.pos = len(self.data)
if self.dataeof:
self.eof = True
return ret | 0.008152 |
def dict_key(self):
"""
Find the next key in a dict. We skip any newlines and check for if the
dict has ended.
"""
while True:
token = self.next()
t_value = token["value"]
if t_value == "\n":
continue
if t_value == "}":
raise self.ParseEnd()
if token["type"] == "literal":
return self.make_value(t_value)
self.error("Invalid Key") | 0.004073 |
def build_ascii_property_table(output):
"""Build and write out Unicode property table."""
if not os.path.exists(output):
os.mkdir(output)
gen_properties(output, ascii_props=True, append=True) | 0.004717 |
def prt_tsv(prt, data_nts, **kws):
"""Print tab-separated table headers and data"""
# User-controlled printing options
prt_tsv_hdr(prt, data_nts, **kws)
return prt_tsv_dat(prt, data_nts, **kws) | 0.004785 |
def getCellVertexes(self, i, j):
"""
Edge coordinates of an hexagon centered in (x,y) having a side of d:
[x-d/2, y+sqrt(3)*d/2] [x+d/2, y+sqrt(3)*d/2]
[x-d, y] [x+d, y]
[x-d/2, y-sqrt(3)*d/2] [x+d/2, y-sqrt(3)*d/2]
"""
# Using unrotated centroid coordinates to avoid an extra computation
x,y = self._getUnrotatedCellCentroidCoords(i, j)
return [
self.rotatePoint(x - self._side, y ),
self.rotatePoint(x - self._side / 2.0, y - self._hexPerp ),
self.rotatePoint(x + self._side / 2.0, y - self._hexPerp ),
self.rotatePoint(x + self._side, y ),
self.rotatePoint(x + self._side / 2.0, y + self._hexPerp ),
self.rotatePoint(x - self._side / 2.0, y + self._hexPerp ),
] | 0.019822 |
def matomo(parser, token):
"""
Matomo tracking template tag.
Renders Javascript code to track page visits. You must supply
your Matomo domain (plus optional URI path), and tracked site ID
in the ``MATOMO_DOMAIN_PATH`` and the ``MATOMO_SITE_ID`` setting.
Custom variables can be passed in the ``matomo_vars`` context
variable. It is an iterable of custom variables as tuples like:
``(index, name, value[, scope])`` where scope may be ``'page'``
(default) or ``'visit'``. Index should be an integer and the
other parameters should be strings.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return MatomoNode() | 0.001344 |
def atleast_nd(n, u):
"""
If the input array has fewer than n dimensions, append singleton
dimensions so that it is n dimensional. Note that the interface
differs substantially from that of :func:`numpy.atleast_3d` etc.
Parameters
----------
n : int
Minimum number of required dimensions
u : array_like
Input array
Returns
-------
v : ndarray
Output array with at least n dimensions
"""
if u.ndim >= n:
return u
else:
return u.reshape(u.shape + (1,)*(n-u.ndim)) | 0.001805 |
def ensure_session(engine_or_session):
"""
If it is an engine, then create a session from it. And indicate that
this session should be closed after the job done.
"""
if isinstance(engine_or_session, Engine):
ses = sessionmaker(bind=engine_or_session)()
auto_close = True
elif isinstance(engine_or_session, Session):
ses = engine_or_session
auto_close = False
return ses, auto_close | 0.002268 |
def hfd(X, Kmax):
""" Compute Hjorth Fractal Dimension of a time series X, kmax
is an HFD parameter
"""
L = []
x = []
N = len(X)
for k in range(1, Kmax):
Lk = []
for m in range(0, k):
Lmk = 0
for i in range(1, int(numpy.floor((N - m) / k))):
Lmk += abs(X[m + i * k] - X[m + i * k - k])
Lmk = Lmk * (N - 1) / numpy.floor((N - m) / float(k)) / k
Lk.append(Lmk)
L.append(numpy.log(numpy.mean(Lk)))
x.append([numpy.log(float(1) / k), 1])
(p, r1, r2, s) = numpy.linalg.lstsq(x, L)
return p[0] | 0.00161 |
def add_host(host):
""" Put your host information in the prefix object. """
p = new_prefix()
p.prefix = str(host['ipaddr'])
p.type = "host"
p.description = host['description']
p.node = host['fqdn']
p.avps = {}
# Use remaining data from ipplan to populate comment field.
if 'additional' in host:
p.comment = host['additional']
# Use specific info to create extra attributes.
if len(host['location']) > 0:
p.avps['location'] = host['location']
if len(host['mac']) > 0:
p.avps['mac'] = host['mac']
if len(host['phone']) > 0:
p.avps['phone'] = host['phone']
if len(host['user']) > 0:
p.avps['user'] = host['user']
return p | 0.001381 |
def remove_semantic_data(self, path_as_list):
""" Removes a entry from the semantic data vividict.
:param list path_as_list: The path of the vividict to delete.
:return: removed value or dict
"""
if len(path_as_list) == 0:
raise AttributeError("The argument path_as_list is empty but but the method remove_semantic_data needs a "
"valid path to remove a vividict item.")
target_dict = self.get_semantic_data(path_as_list[0:-1])
removed_element = target_dict[path_as_list[-1]]
del target_dict[path_as_list[-1]]
return removed_element | 0.004615 |
def limit_gen(limit, iterable):
'''A generator that applies a count `limit`.'''
limit = int(limit)
assert limit >= 0, 'negative limit'
for item in iterable:
if limit <= 0:
break
yield item
limit -= 1 | 0.026549 |
def export(self, path, epoch=0):
"""Export HybridBlock to json format that can be loaded by
`SymbolBlock.imports`, `mxnet.mod.Module` or the C++ interface.
.. note:: When there are only one input, it will have name `data`. When there
Are more than one inputs, they will be named as `data0`, `data1`, etc.
Parameters
----------
path : str
Path to save model. Two files `path-symbol.json` and `path-xxxx.params`
will be created, where xxxx is the 4 digits epoch number.
epoch : int
Epoch number of saved model.
"""
if not self._cached_graph:
raise RuntimeError(
"Please first call block.hybridize() and then run forward with "
"this block at least once before calling export.")
sym = self._cached_graph[1]
sym.save('%s-symbol.json'%path)
arg_names = set(sym.list_arguments())
aux_names = set(sym.list_auxiliary_states())
arg_dict = {}
for name, param in self.collect_params().items():
if name in arg_names:
arg_dict['arg:%s'%name] = param._reduce()
else:
assert name in aux_names
arg_dict['aux:%s'%name] = param._reduce()
ndarray.save('%s-%04d.params'%(path, epoch), arg_dict) | 0.007273 |
def get_console_output(self, instance_id):
"""Get the console output for a single instance."""
InstanceIDParam = {"InstanceId": instance_id}
query = self.query_factory(
action="GetConsoleOutput", creds=self.creds,
endpoint=self.endpoint, other_params=InstanceIDParam)
d = query.submit()
return d.addCallback(self.parser.get_console_output) | 0.004963 |
def sel_entries(self):
"""Generator which returns all SEL entries."""
ENTIRE_RECORD = 0xff
rsp = self.send_message_with_name('GetSelInfo')
if rsp.entries == 0:
return
reservation_id = self.get_sel_reservation_id()
next_record_id = 0
while True:
req = create_request_by_name('GetSelEntry')
req.reservation_id = reservation_id
req.record_id = next_record_id
req.offset = 0
self.max_req_len = ENTIRE_RECORD
record_data = ByteBuffer()
while True:
req.length = self.max_req_len
if (self.max_req_len != 0xff
and (req.offset + req.length) > 16):
req.length = 16 - req.offset
rsp = self.send_message(req)
if rsp.completion_code == constants.CC_CANT_RET_NUM_REQ_BYTES:
if self.max_req_len == 0xff:
self.max_req_len = 16
else:
self.max_req_len -= 1
continue
else:
check_completion_code(rsp.completion_code)
record_data.extend(rsp.record_data)
req.offset = len(record_data)
if len(record_data) >= 16:
break
next_record_id = rsp.next_record_id
yield SelEntry(record_data)
if next_record_id == 0xffff:
break | 0.001312 |
def department(self):
"""
| Description: The ID of the department to which the chat is directed
"""
if self.api and self.department_id:
return self.api._get_department(self.department_id) | 0.008621 |
def validate(self, instance, value):
"""Check if value is a string, and strips it and changes case"""
value_type = type(value)
if not isinstance(value, string_types):
self.error(instance, value)
if self.regex is not None and self.regex.search(value) is None: #pylint: disable=no-member
self.error(instance, value, extra='Regex does not match.')
value = value.strip(self.strip)
if self.change_case == 'upper':
value = value.upper()
elif self.change_case == 'lower':
value = value.lower()
if self.unicode:
value = text_type(value)
else:
value = value_type(value)
return value | 0.005464 |
def stats(self):
"""
Return storage statistics about the library
Returns
-------
dictionary of storage stats
"""
res = {}
db = self._collection.database
conn = db.connection
res['sharding'] = {}
try:
sharding = conn.config.databases.find_one({'_id': db.name})
if sharding:
res['sharding'].update(sharding)
res['sharding']['collections'] = list(conn.config.collections.find({'_id': {'$regex': '^' + db.name + r"\..*"}}))
except OperationFailure:
# Access denied
pass
res['dbstats'] = db.command('dbstats')
res['chunks'] = db.command('collstats', self._collection.name)
res['versions'] = db.command('collstats', self._versions.name)
res['snapshots'] = db.command('collstats', self._snapshots.name)
res['totals'] = {'count': res['chunks']['count'],
'size': res['chunks']['size'] + res['versions']['size'] + res['snapshots']['size'],
}
return res | 0.003591 |
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end) | 0.006061 |
def _prepare_init_params_from_job_description(cls, job_details):
"""Convert the transform job description to init params that can be handled by the class constructor
Args:
job_details (dict): the returned job details from a describe_transform_job API call.
Returns:
dict: The transformed init_params
"""
init_params = dict()
init_params['model_name'] = job_details['ModelName']
init_params['instance_count'] = job_details['TransformResources']['InstanceCount']
init_params['instance_type'] = job_details['TransformResources']['InstanceType']
init_params['volume_kms_key'] = job_details['TransformResources'].get('VolumeKmsKeyId')
init_params['strategy'] = job_details.get('BatchStrategy')
init_params['assemble_with'] = job_details['TransformOutput'].get('AssembleWith')
init_params['output_path'] = job_details['TransformOutput']['S3OutputPath']
init_params['output_kms_key'] = job_details['TransformOutput'].get('KmsKeyId')
init_params['accept'] = job_details['TransformOutput'].get('Accept')
init_params['max_concurrent_transforms'] = job_details.get('MaxConcurrentTransforms')
init_params['max_payload'] = job_details.get('MaxPayloadInMB')
init_params['base_transform_job_name'] = job_details['TransformJobName']
return init_params | 0.008529 |
def plot(self, flip=False, ax_channels=None, ax=None, *args, **kwargs):
"""
{_gate_plot_doc}
"""
if ax == None:
ax = pl.gca()
if ax_channels is not None:
flip = self._find_orientation(ax_channels)
plot_func = ax.axes.axhline if flip else ax.axes.axvline
kwargs.setdefault('color', 'black')
a1 = plot_func(self.vert[0], *args, **kwargs)
a2 = plot_func(self.vert[1], *args, **kwargs)
return (a1, a2) | 0.005941 |
def _getPath():
""" Returns Chrome's cookie database path
Returns
str - Google Chrome's cookie database path
"""
if os.name == "posix":
path = os.getenv("HOME") + "/.config/google-chrome/Default/Cookies"
return path
import _winreg
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders')
path = _winreg.QueryValueEx(key, 'Local AppData')[0]
path = os.path.join(path, 'Google\\Chrome\\User Data\\Default\\Cookies')
return path | 0.012579 |
def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`AccountMerge`.
"""
destination = account_xdr_object(self.destination)
self.body.type = Xdr.const.ACCOUNT_MERGE
self.body.destination = destination
return super(AccountMerge, self).to_xdr_object() | 0.005848 |
def _get_boot_time_aix():
'''
Return the number of seconds since boot time on AIX
t=$(LC_ALL=POSIX ps -o etime= -p 1)
d=0 h=0
case $t in *-*) d=${t%%-*}; t=${t#*-};; esac
case $t in *:*:*) h=${t%%:*}; t=${t#*:};; esac
s=$((d*86400 + h*3600 + ${t%%:*}*60 + ${t#*:}))
t is 7-20:46:46
'''
boot_secs = 0
res = __salt__['cmd.run_all']('ps -o etime= -p 1')
if res['retcode'] > 0:
raise CommandExecutionError('Unable to find boot_time for pid 1.')
bt_time = res['stdout']
days = bt_time.split('-')
hms = days[1].split(':')
boot_secs = _number(days[0]) * 86400 + _number(hms[0]) * 3600 + _number(hms[1]) * 60 + _number(hms[2])
return boot_secs | 0.002805 |
def begin_auth(request):
"""The view function that initiates the entire handshake.
For the most part, this is 100% drag and drop.
"""
# Instantiate Twython with the first leg of our trip.
twitter = Twython(settings.TWITTER_KEY, settings.TWITTER_SECRET)
# Request an authorization url to send the user to...
callback_url = request.build_absolute_uri(reverse('twython_django_oauth.views.thanks'))
auth_props = twitter.get_authentication_tokens(callback_url)
# Then send them over there, durh.
request.session['request_token'] = auth_props
request.session['next_url'] = request.GET.get('next',None)
return HttpResponseRedirect(auth_props['auth_url']) | 0.005682 |
def callproc(self, procname, parameters = (), cursorClass = DictCursor, quiet = False):
"""Calls a MySQL stored procedure procname. This uses DictCursor by default."""
i = 0
errcode = 0
caughte = None
while i < self.numTries:
i += 1
try:
cursor = self.connection.cursor(cursorClass)
if type(parameters) != type(()):
parameters = (parameters,)
errcode = cursor.callproc(procname, parameters)
results = cursor.fetchall()
self.lastrowid = int(cursor.lastrowid)
cursor.close()
return results
except MySQLdb.OperationalError, e:
errcode = e[0]
self.connection.ping()
caughte = e
continue
except:
traceback.print_exc()
break
if not quiet:
sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte) | 0.045908 |
def all(self, domain=None):
"""
Gets the messages within a given domain.
If domain is None, it returns all messages.
@type id: The
@param id: message id
@rtype: dict
@return: A dict of messages
"""
if domain is None:
return {k: dict(v) for k, v in list(self.messages.items())}
return dict(self.messages.get(domain, {})) | 0.004819 |
def on_current_tab_changed(self):
"""
Update action states when the current tab changed.
"""
self.menuEdit.clear()
self.menuModes.clear()
self.menuPanels.clear()
editor = self.tabWidget.current_widget()
self.menuEdit.setEnabled(editor is not None)
self.menuModes.setEnabled(editor is not None)
self.menuPanels.setEnabled(editor is not None)
self.actionSave.setEnabled(editor is not None)
self.actionSave_as.setEnabled(editor is not None)
self.actionConfigure_run.setEnabled(editor is not None)
self.actionRun.setEnabled(editor is not None)
if editor is not None:
self.setup_mnu_edit(editor)
self.setup_mnu_modes(editor)
self.setup_mnu_panels(editor)
self.widgetOutline.set_editor(editor)
self._update_status_bar(editor) | 0.002245 |
def tophat_alignment_plot (self):
""" Make the HighCharts HTML to plot the alignment rates """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['aligned_not_multimapped_discordant'] = { 'color': '#437bb1', 'name': 'Aligned' }
keys['aligned_multimap'] = { 'color': '#f7a35c', 'name': 'Multimapped' }
keys['aligned_discordant'] = { 'color': '#e63491', 'name': 'Discordant mappings' }
keys['unaligned_total'] = { 'color': '#7f0000', 'name': 'Not aligned' }
# Config for the plot
config = {
'id': 'tophat_alignment',
'title': 'Tophat: Alignment Scores',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
self.add_section( plot = bargraph.plot(self.tophat_data, keys, config) ) | 0.02659 |
def __create_admin_entry(self, handleowner, permissions, index, handle, ttl=None):
'''
Create an entry of type "HS_ADMIN".
:param username: The username, i.e. a handle with an index
(index:prefix/suffix). The value referenced by the index contains
authentcation information, e.g. a hidden entry containing a key.
:param permissions: The permissions as a string of zeros and ones,
e.g. '0111011101011'. If not all twelve bits are set, the remaining
ones are set to zero.
:param index: The integer to be used as index of this admin entry (not
of the username!). Should be 1xx.
:param ttl: Optional. If not set, the library's default is set. If
there is no default, it is not set by this library, so Handle
System sets it.
:return: The entry as a dict.
'''
# If the handle owner is specified, use it. Otherwise, use 200:0.NA/prefix
# With the prefix taken from the handle that is being created, not from anywhere else.
if handleowner is None:
adminindex = '200'
prefix = handle.split('/')[0]
adminhandle = '0.NA/' + prefix
else:
adminindex, adminhandle = utilhandle.remove_index_from_handle(handleowner)
data = {
'value':{
'index':adminindex,
'handle':adminhandle,
'permissions':permissions
},
'format':'admin'
}
entry = {'index':index, 'type':'HS_ADMIN', 'data':data}
if ttl is not None:
entry['ttl'] = ttl
return entry | 0.008309 |
def setup_prjs_signals(self, ):
"""Setup the signals for the projects page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up projects page signals.")
self.prjs_prj_view_pb.clicked.connect(self.prjs_view_prj)
self.prjs_prj_create_pb.clicked.connect(self.prjs_create_prj) | 0.005682 |
def eat_line(self):
"""Move current position forward until the next line."""
if self.eos:
return None
eat_length = self.eat_length
get_char = self.get_char
has_space = self.has_space
while has_space() and get_char() != '\n':
eat_length(1)
eat_length(1) | 0.006024 |
def instagram_scrap_profile(username):
"""
Scrap an instagram profile page
:param username:
:return:
"""
try:
url = "https://www.instagram.com/{}/".format(username)
page = requests.get(url)
# Raise error for 404 cause by a bad profile name
page.raise_for_status()
return html.fromstring(page.content)
except HTTPError:
logging.exception('user profile "{}" not found'.format(username))
except (ConnectionError, socket_error) as e:
logging.exception("instagram.com unreachable") | 0.001773 |
def populate_class_members(self, element_cls, prop_name):
"""
Add the appropriate methods to *element_cls*.
"""
self._element_cls = element_cls
self._prop_name = prop_name
self._add_attr_property() | 0.00813 |
def from_shapely(geometry, label=None):
"""
Create a MultiPolygon from a Shapely MultiPolygon, a Shapely Polygon or a Shapely GeometryCollection.
This also creates all necessary Polygons contained by this MultiPolygon.
Parameters
----------
geometry : shapely.geometry.MultiPolygon or shapely.geometry.Polygon\
or shapely.geometry.collection.GeometryCollection
The object to convert to a MultiPolygon.
label : None or str, optional
A label assigned to all Polygons within the MultiPolygon.
Returns
-------
imgaug.MultiPolygon
The derived MultiPolygon.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
if isinstance(geometry, shapely.geometry.MultiPolygon):
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
elif isinstance(geometry, shapely.geometry.Polygon):
return MultiPolygon([Polygon.from_shapely(geometry, label=label)])
elif isinstance(geometry, shapely.geometry.collection.GeometryCollection):
ia.do_assert(all([isinstance(poly, shapely.geometry.Polygon) for poly in geometry.geoms]))
return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms])
else:
raise Exception("Unknown datatype '%s'. Expected shapely.geometry.Polygon or "
"shapely.geometry.MultiPolygon or "
"shapely.geometry.collections.GeometryCollection." % (type(geometry),)) | 0.005984 |
def max_min_col_update(self):
"""
Determines the maximum and minimum number in each column.
The result is a list whose k-th entry is [vmax, vmin], where vmax and
vmin denote the maximum and minimum of the k-th column (ignoring NaN).
This list is stored in self.max_min_col.
If the k-th column has a non-numerical dtype, then the k-th entry
is set to None. If the dtype is complex, then compute the maximum and
minimum of the absolute values. If vmax equals vmin, then vmin is
decreased by one.
"""
if self.df.shape[0] == 0: # If no rows to compute max/min then return
return
self.max_min_col = []
for dummy, col in self.df.iteritems():
if col.dtype in REAL_NUMBER_TYPES + COMPLEX_NUMBER_TYPES:
if col.dtype in REAL_NUMBER_TYPES:
vmax = col.max(skipna=True)
vmin = col.min(skipna=True)
else:
vmax = col.abs().max(skipna=True)
vmin = col.abs().min(skipna=True)
if vmax != vmin:
max_min = [vmax, vmin]
else:
max_min = [vmax, vmin - 1]
else:
max_min = None
self.max_min_col.append(max_min) | 0.003647 |
def strptime(cls, data, format, scale=DEFAULT_SCALE): # pragma: no cover
"""Convert a string representation of a date to a Date object
"""
return cls(datetime.strptime(data, format), scale=scale) | 0.009091 |
def getLabels(self, start=None, end=None):
"""
Get the labels on classified points within range start to end. Not inclusive
of end.
:returns: (dict) with format:
::
{
'isProcessing': boolean,
'recordLabels': list of results
}
``isProcessing`` - currently always false as recalculation blocks; used if
reprocessing of records is still being performed;
Each item in ``recordLabels`` is of format:
::
{
'ROWID': id of the row,
'labels': list of strings
}
"""
if len(self._recordsCache) == 0:
return {
'isProcessing': False,
'recordLabels': []
}
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = self._recordsCache[-1].ROWID
if end <= start:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'getLabels'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'numRecordsStored': len(self._recordsCache)
})
results = {
'isProcessing': False,
'recordLabels': []
}
ROWIDX = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
validIdx = numpy.where((ROWIDX >= start) & (ROWIDX < end))[0].tolist()
categories = self._knnclassifier.getCategoryList()
for idx in validIdx:
row = dict(
ROWID=int(ROWIDX[idx]),
labels=self._categoryToLabelList(categories[idx]))
results['recordLabels'].append(row)
return results | 0.010453 |
def _readFile(self, fname, sldir):
'''
private method that reads in and organizes the .DAT file
Loads the data of the .DAT File into the variables cattrs and cols.
In both these cases they are dictionaries, but in the case of cols,
it is a dictionary of numpy array exect for the element ,
element_name where it is just a list
'''
cattrs=[]
if sldir.endswith(os.sep):
#Making sure fname will be formatted correctly
fname = str(sldir)+str(fname)
else:
fname = str(sldir)+os.sep+str(fname)
self.sldir+=os.sep
f=open(fname,'r')
lines=f.readlines()
for i in range(len(lines)):
lines[i]=lines[i].strip()
cols=lines[0].strip('H')
cols=cols.strip()
cols=cols.split()
for i in range(len(lines)):
if lines[i].startswith('#'):
# if it is a cycle attribute line
lines[i]=lines[i].strip('#')
tmp=lines[i].split()
tmp1=[]
for j in range(len(tmp)):
if tmp[j] != '=' or '':
tmp1.append(tmp[j])
tmp=tmp1
j=0
while j <len(tmp):
cattrs.append(tmp[j])
j+=2
elif not lines[i].startswith('H'):
index = i-1
break
return cattrs,cols, index | 0.015374 |
def eval(self, data, name='eval', iteration=0):
"""Evaluate the model on mat.
Parameters
----------
data : DMatrix
The dmatrix storing the input.
name : str, optional
The name of the dataset.
iteration : int, optional
The current iteration number.
Returns
-------
result: str
Evaluation result string.
"""
self._validate_features(data)
return self.eval_set([(data, name)], iteration) | 0.003766 |
def build_struct_type(s_sdt):
'''
Build an xsd complexType out of a S_SDT.
'''
s_dt = nav_one(s_sdt).S_DT[17]()
struct = ET.Element('xs:complexType', name=s_dt.name)
first_filter = lambda selected: not nav_one(selected).S_MBR[46, 'succeeds']()
s_mbr = nav_any(s_sdt).S_MBR[44](first_filter)
while s_mbr:
s_dt = nav_one(s_mbr).S_DT[45]()
type_name = get_type_name(s_dt)
ET.SubElement(struct, 'xs:attribute', name=s_mbr.name, type=type_name)
s_mbr = nav_one(s_mbr).S_MBR[46, 'precedes']()
return struct | 0.010292 |
def user_func(func, arg_types=None, return_type=None):
"""Create an EFILTER-callable version of function 'func'.
As a security precaution, EFILTER will not execute Python callables
unless they implement the IApplicative protocol. There is a perfectly good
implementation of this protocol in the standard library and user functions
can inherit from it.
This will declare a subclass of the standard library TypedFunction and
return an instance of it that EFILTER will happily call.
Arguments:
func: A Python callable that will serve as the implementation.
arg_types (optional): A tuple of argument types. If the function takes
keyword arguments, they must still have a defined order.
return_type (optional): The type the function returns.
Returns:
An instance of a custom subclass of efilter.stdlib.core.TypedFunction.
Examples:
def my_callback(tag):
print("I got %r" % tag)
api.apply("if True then my_callback('Hello World!')",
vars={
"my_callback": api.user_func(my_callback)
})
# This should print "I got 'Hello World!'".
"""
class UserFunction(std_core.TypedFunction):
name = func.__name__
def __call__(self, *args, **kwargs):
return func(*args, **kwargs)
@classmethod
def reflect_static_args(cls):
return arg_types
@classmethod
def reflect_static_return(cls):
return return_type
return UserFunction() | 0.000631 |
def libvlc_media_player_get_agl(p_mi):
'''Get the agl handler previously set with L{libvlc_media_player_set_agl}().
@param p_mi: the Media Player.
@return: the agl handler or 0 if none where set.
'''
f = _Cfunctions.get('libvlc_media_player_get_agl', None) or \
_Cfunction('libvlc_media_player_get_agl', ((1,),), None,
ctypes.c_uint32, MediaPlayer)
return f(p_mi) | 0.007229 |
def cancel_signature_request(self, signature_request_id):
''' Cancels a SignatureRequest
Cancels a SignatureRequest. After canceling, no one will be able to sign
or access the SignatureRequest or its documents. Only the requester can
cancel and only before everyone has signed.
Args:
signing_request_id (str): The id of the signature request to cancel
Returns:
None
'''
request = self._get_request()
request.post(url=self.SIGNATURE_REQUEST_CANCEL_URL + signature_request_id, get_json=False) | 0.006768 |
def update(self):
"""
Updates the #rules_map dictionary and #skippable_rules list based on the
#rules list. Must be called after #rules or any of its items have been
modified. The same rule name may appear multiple times.
# Raises
TypeError: if an item in the `rules` list is not a rule.
"""
self.rules_map = {}
self.skippable_rules = []
for rule in self.rules:
if not isinstance(rule, Rule):
raise TypeError('item must be Rule instance', type(rule))
self.rules_map.setdefault(rule.name, []).append(rule)
if rule.skip:
self.skippable_rules.append(rule) | 0.006369 |
def control_change(self, channel, control, value):
"""Send a control change message.
See the MIDI specification for more information.
"""
if control < 0 or control > 128:
return False
if value < 0 or value > 128:
return False
self.cc_event(channel, control, value)
self.notify_listeners(self.MSG_CC, {'channel': int(channel),
'control': int(control), 'value': int(value)})
return True | 0.006186 |
def _parse(self, context=0, push=True):
"""Parse the wikicode string, using *context* for when to stop."""
if push:
self._push(context)
while True:
this = self._read()
if self._context & contexts.UNSAFE:
if not self._verify_safe(this):
if self._context & contexts.DOUBLE:
self._pop()
self._fail_route()
if this not in self.MARKERS:
self._emit_text(this)
self._head += 1
continue
if this is self.END:
return self._handle_end()
next = self._read(1)
if this == next == "{":
if self._can_recurse():
self._parse_template_or_argument()
else:
self._emit_text("{")
elif this == "|" and self._context & contexts.TEMPLATE:
self._handle_template_param()
elif this == "=" and self._context & contexts.TEMPLATE_PARAM_KEY:
self._handle_template_param_value()
elif this == next == "}" and self._context & contexts.TEMPLATE:
return self._handle_template_end()
elif this == "|" and self._context & contexts.ARGUMENT_NAME:
self._handle_argument_separator()
elif this == next == "}" and self._context & contexts.ARGUMENT:
if self._read(2) == "}":
return self._handle_argument_end()
else:
self._emit_text("}")
elif this == next == "[" and self._can_recurse():
if not self._context & contexts.NO_WIKILINKS:
self._parse_wikilink()
else:
self._emit_text("[")
elif this == "|" and self._context & contexts.WIKILINK_TITLE:
self._handle_wikilink_separator()
elif this == next == "]" and self._context & contexts.WIKILINK:
return self._handle_wikilink_end()
elif this == "[":
self._parse_external_link(True)
elif this == ":" and self._read(-1) not in self.MARKERS:
self._parse_external_link(False)
elif this == "]" and self._context & contexts.EXT_LINK_TITLE:
return self._pop()
elif this == "=" and not self._global & contexts.GL_HEADING:
if self._read(-1) in ("\n", self.START):
self._parse_heading()
else:
self._emit_text("=")
elif this == "=" and self._context & contexts.HEADING:
return self._handle_heading_end()
elif this == "\n" and self._context & contexts.HEADING:
self._fail_route()
elif this == "&":
self._parse_entity()
elif this == "<" and next == "!":
if self._read(2) == self._read(3) == "-":
self._parse_comment()
else:
self._emit_text(this)
elif this == "<" and next == "/" and self._read(2) is not self.END:
if self._context & contexts.TAG_BODY:
self._handle_tag_open_close()
else:
self._handle_invalid_tag_start()
elif this == "<" and not self._context & contexts.TAG_CLOSE:
if self._can_recurse():
self._parse_tag()
else:
self._emit_text("<")
elif this == ">" and self._context & contexts.TAG_CLOSE:
return self._handle_tag_close_close()
elif this == next == "'" and not self._skip_style_tags:
result = self._parse_style()
if result is not None:
return result
elif self._read(-1) in ("\n", self.START) and this in ("#", "*", ";", ":"):
self._handle_list()
elif self._read(-1) in ("\n", self.START) and (
this == next == self._read(2) == self._read(3) == "-"):
self._handle_hr()
elif this in ("\n", ":") and self._context & contexts.DL_TERM:
self._handle_dl_term()
if this == "\n":
# Kill potential table contexts
self._context &= ~contexts.TABLE_CELL_LINE_CONTEXTS
# Start of table parsing
elif this == "{" and next == "|" and (
self._read(-1) in ("\n", self.START) or
(self._read(-2) in ("\n", self.START) and self._read(-1).isspace())):
if self._can_recurse():
self._parse_table()
else:
self._emit_text("{")
elif self._context & contexts.TABLE_OPEN:
if this == next == "|" and self._context & contexts.TABLE_TD_LINE:
if self._context & contexts.TABLE_CELL_OPEN:
return self._handle_table_cell_end()
self._handle_table_cell("||", "td", contexts.TABLE_TD_LINE)
elif this == next == "|" and self._context & contexts.TABLE_TH_LINE:
if self._context & contexts.TABLE_CELL_OPEN:
return self._handle_table_cell_end()
self._handle_table_cell("||", "th", contexts.TABLE_TH_LINE)
elif this == next == "!" and self._context & contexts.TABLE_TH_LINE:
if self._context & contexts.TABLE_CELL_OPEN:
return self._handle_table_cell_end()
self._handle_table_cell("!!", "th", contexts.TABLE_TH_LINE)
elif this == "|" and self._context & contexts.TABLE_CELL_STYLE:
return self._handle_table_cell_end(reset_for_style=True)
# on newline, clear out cell line contexts
elif this == "\n" and self._context & contexts.TABLE_CELL_LINE_CONTEXTS:
self._context &= ~contexts.TABLE_CELL_LINE_CONTEXTS
self._emit_text(this)
elif (self._read(-1) in ("\n", self.START) or
(self._read(-2) in ("\n", self.START) and self._read(-1).isspace())):
if this == "|" and next == "}":
if self._context & contexts.TABLE_CELL_OPEN:
return self._handle_table_cell_end()
if self._context & contexts.TABLE_ROW_OPEN:
return self._handle_table_row_end()
return self._handle_table_end()
elif this == "|" and next == "-":
if self._context & contexts.TABLE_CELL_OPEN:
return self._handle_table_cell_end()
if self._context & contexts.TABLE_ROW_OPEN:
return self._handle_table_row_end()
self._handle_table_row()
elif this == "|":
if self._context & contexts.TABLE_CELL_OPEN:
return self._handle_table_cell_end()
self._handle_table_cell("|", "td", contexts.TABLE_TD_LINE)
elif this == "!":
if self._context & contexts.TABLE_CELL_OPEN:
return self._handle_table_cell_end()
self._handle_table_cell("!", "th", contexts.TABLE_TH_LINE)
else:
self._emit_text(this)
else:
self._emit_text(this)
else:
self._emit_text(this)
self._head += 1 | 0.001405 |
def _tarjan_head(ctx, v):
""" Used by @tarjan and @tarjan_iter. This is the head of the
main iteration """
ctx.index[v] = len(ctx.index)
ctx.lowlink[v] = ctx.index[v]
ctx.S.append(v)
ctx.S_set.add(v)
it = iter(ctx.g.get(v, ()))
ctx.T.append((it,False,v,None)) | 0.015244 |
def _str(value, depth):
"""
FOR DEBUGGING POSSIBLY RECURSIVE STRUCTURES
"""
output = []
if depth >0 and _get(value, CLASS) in data_types:
for k, v in value.items():
output.append(str(k) + "=" + _str(v, depth - 1))
return "{" + ",\n".join(output) + "}"
elif depth >0 and is_list(value):
for v in value:
output.append(_str(v, depth-1))
return "[" + ",\n".join(output) + "]"
else:
return str(type(value)) | 0.006085 |
def do_gate_matrix(self, matrix: np.ndarray,
qubits: Sequence[int]) -> 'AbstractQuantumSimulator':
"""
Apply an arbitrary unitary; not necessarily a named gate.
:param matrix: The unitary matrix to apply. No checks are done
:param qubits: A list of qubits to apply the unitary to.
:return: ``self`` to support method chaining.
"""
# e.g. 2-qubit matrix is 4x4; turns into (2,2,2,2) tensor.
tensor = np.reshape(matrix, (2,) * len(qubits) * 2)
# Note to developers: you can use either einsum- or tensordot- based functions.
# tensordot seems a little faster, but feel free to experiment.
# self.wf = targeted_einsum(gate=gate_matrix, wf=self.wf, wf_target_inds=qubits)
self.wf = targeted_tensordot(gate=tensor, wf=self.wf, wf_target_inds=qubits)
return self | 0.006787 |
def directory_to_pif(directory, **kwargs):
"""
Convert a directory to a pif
:param directory: Directory to convert to a pif
:param kwargs: any additional keyword arguments. (See `files_to_pif`)
:return: the created pif
"""
# Get the files
files = [os.path.join(directory, f) for f in os.listdir(directory)
if os.path.isfile(os.path.join(directory, f))]
# Run the pif
return files_to_pif(files, **kwargs) | 0.002183 |
def get_key_signature(key='C'):
"""Return the key signature.
0 for C or a, negative numbers for flat key signatures, positive numbers
for sharp key signatures.
"""
if not is_valid_key(key):
raise NoteFormatError("unrecognized format for key '%s'" % key)
for couple in keys:
if key in couple:
accidentals = keys.index(couple) - 7
return accidentals | 0.002421 |
def call_runtime(self):
'''
Execute the runtime
'''
cache = self.gather_cache()
chunks = self.get_chunks()
interval = self.opts['thorium_interval']
recompile = self.opts.get('thorium_recompile', 300)
r_start = time.time()
while True:
events = self.get_events()
if not events:
time.sleep(interval)
continue
start = time.time()
self.state.inject_globals['__events__'] = events
self.state.call_chunks(chunks)
elapsed = time.time() - start
left = interval - elapsed
if left > 0:
time.sleep(left)
self.state.reset_run_num()
if (start - r_start) > recompile:
cache = self.gather_cache()
chunks = self.get_chunks()
if self.reg_ret is not None:
self.returners['{0}.save_reg'.format(self.reg_ret)](chunks)
r_start = time.time() | 0.001919 |
def main():
''' Execute the "bokeh" command line program.
'''
import sys
from bokeh.command.bootstrap import main as _main
# Main entry point (see setup.py)
_main(sys.argv) | 0.010152 |
def nvmlDeviceGetMemoryErrorCounter(handle, errorType, counterType, locationType):
r"""
/**
* Retrieves the requested memory error counter for the device.
*
* For Fermi &tm; or newer fully supported devices.
* Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts.
* Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts.
*
* Only applicable to devices with ECC.
*
* Requires ECC Mode to be enabled.
*
* See \ref nvmlMemoryErrorType_t for a description of available memory error types.\n
* See \ref nvmlEccCounterType_t for a description of available counter types.\n
* See \ref nvmlMemoryLocation_t for a description of available counter locations.\n
*
* @param device The identifier of the target device
* @param errorType Flag that specifies the type of error.
* @param counterType Flag that specifies the counter-type of the errors.
* @param locationType Specifies the location of the counter.
* @param count Reference in which to return the ECC counter
*
* @return
* - \ref NVML_SUCCESS if \a count has been populated
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a bitTyp,e \a counterType or \a locationType is
* invalid, or \a count is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetMemoryErrorCounter
"""
c_count = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryErrorCounter")
ret = fn(handle,
_nvmlMemoryErrorType_t(errorType),
_nvmlEccCounterType_t(counterType),
_nvmlMemoryLocation_t(locationType),
byref(c_count))
_nvmlCheckReturn(ret)
return bytes_to_str(c_count.value) | 0.006964 |
def actor_url(parser, token):
"""
Renders the URL for a particular actor instance
::
<a href="{% actor_url request.user %}">View your actions</a>
<a href="{% actor_url another_user %}">{{ another_user }}'s actions</a>
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("Accepted format "
"{% actor_url [actor_instance] %}")
else:
return DisplayActivityActorUrl(*bits[1:]) | 0.002016 |
def command(self, rule, **options):
"""\
direct=False, override=True, inject=False, flags=0
"""
options.setdefault("direct", False)
options.setdefault("override", True)
options.setdefault("inject", False)
options.setdefault("flags", 0)
if not options["direct"]:
rule = self.regexy(rule)
regex = re.compile(rule, flags=options["flags"])
self.handlers.setdefault(regex, [])
def handler(f):
if f == noop:
f.options = {}
else:
f.options = options
if options["override"]:
self.handlers[regex] = [f]
else:
self.handlers[regex].append(f)
f.no_args = self.no_args(f)
return f
return handler | 0.003632 |
def get_cli_string(path=None, action=None, key=None, value=None, quote=None):
"""Returns a string suitable for running as a shell script.
Useful for converting a arguments passed to a fabric task
to be passed to a `local` or `run` command.
"""
command = ['dotenv']
if quote:
command.append('-q %s' % quote)
if path:
command.append('-f %s' % path)
if action:
command.append(action)
if key:
command.append(key)
if value:
if ' ' in value:
command.append('"%s"' % value)
else:
command.append(value)
return ' '.join(command).strip() | 0.001443 |
def delete_host(zone, name, keyname, keyfile, nameserver, timeout, port=53,
keyalgorithm='hmac-md5'):
'''
Delete both forward (A) and reverse (PTR) records for a host only if the
forward (A) record exists.
CLI Example:
.. code-block:: bash
salt-run ddns.delete_host domain.com my-test-vm my-tsig-key /etc/salt/tsig.keyring 10.0.0.1 5
'''
res = []
if zone in name:
name = name.replace(zone, '').rstrip('.')
fqdn = '{0}.{1}'.format(name, zone)
request = dns.message.make_query(fqdn, 'A')
answer = dns.query.udp(request, nameserver, timeout, port)
try:
ips = [i.address for i in answer.answer[0].items]
except IndexError:
ips = []
ret = delete(zone, name, keyname, keyfile, nameserver, timeout, port=port,
keyalgorithm=keyalgorithm)
res.append("{0} of type \'A\'".format(ret[fqdn]))
for ip in ips:
parts = ip.split('.')[::-1]
i = len(parts)
popped = []
# Iterate over possible reverse zones
while i > 1:
p = parts.pop(0)
i -= 1
popped.append(p)
zone = '{0}.{1}'.format('.'.join(parts), 'in-addr.arpa.')
name = '.'.join(popped)
rev_fqdn = '{0}.{1}'.format(name, zone)
ret = delete(zone, name, keyname, keyfile, nameserver, timeout,
'PTR', "{0}.".format(fqdn), port, keyalgorithm)
if "Deleted" in ret[rev_fqdn]:
res.append("{0} of type \'PTR\'".format(ret[rev_fqdn]))
return {fqdn: res}
res.append(ret[rev_fqdn])
return {fqdn: res} | 0.001198 |
def _dict_mapping_to_pb(mapping, proto_type):
"""
Convert a dict to protobuf.
Args:
mapping (dict): A dict that needs to be converted to protobuf.
proto_type (str): The type of the Protobuf.
Returns:
An instance of the specified protobuf.
"""
converted_pb = getattr(trace_pb2, proto_type)()
ParseDict(mapping, converted_pb)
return converted_pb | 0.002494 |
def edit(self, tag_name=None, target_commitish=None, name=None, body=None,
draft=None, prerelease=None):
"""Users with push access to the repository can edit a release.
If the edit is successful, this object will update itself.
:param str tag_name: (optional), Name of the tag to use
:param str target_commitish: (optional), The "commitish" value that
determines where the Git tag is created from. Defaults to the
repository's default branch.
:param str name: (optional), Name of the release
:param str body: (optional), Description of the release
:param boolean draft: (optional), True => Release is a draft
:param boolean prerelease: (optional), True => Release is a prerelease
:returns: True if successful; False if not successful
"""
url = self._api
data = {
'tag_name': tag_name,
'target_commitish': target_commitish,
'name': name,
'body': body,
'draft': draft,
'prerelease': prerelease,
}
self._remove_none(data)
r = self._session.patch(
url, data=json.dumps(data), headers=Release.CUSTOM_HEADERS
)
successful = self._boolean(r, 200, 404)
if successful:
# If the edit was successful, let's update the object.
self.__init__(r.json(), self)
return successful | 0.002049 |
def connect(self, addr):
"""Initiate a connection request to the peer router."""
if _debug: RouterToRouterService._debug("connect %r", addr)
# make a connection
conn = ConnectionState(addr)
self.multiplexer.connections[addr] = conn
# associate with this service, but it is not connected until the ack comes back
conn.service = self
# keep a list of pending NPDU objects until the ack comes back
conn.pendingNPDU = []
# build a service request
request = ServiceRequest(ROUTER_TO_ROUTER_SERVICE_ID)
request.pduDestination = addr
# send it
self.service_request(request)
# return the connection object
return conn | 0.005384 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.