text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_certificate_issuers(self, **kwargs): # noqa: E501
"""Get certificate issuers list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_certificate_issuers(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:return: CertificateIssuerInfoListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_certificate_issuers_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_certificate_issuers_with_http_info(**kwargs) # noqa: E501
return data | 0.002334 |
def sendCommands(comPort, commands):
"""Send X10 commands using the FireCracker on comPort
comPort should be the name of a serial port on the host platform. On
Windows, for example, 'com1'.
commands should be a string consisting of X10 commands separated by
commas. For example. 'A1 On, A Dim, A Dim, A Dim, A Lamps Off'. The
letter is a house code (A-P) and the number is the device number (1-16).
Possible commands for a house code / device number combination are
'On' and 'Off'. The commands 'Bright' and 'Dim' should be used with a
house code alone after sending an On command to a specific device. The
'All On', 'All Off', 'Lamps On', and 'Lamps Off' commands should also
be used with a house code alone.
# Turn on module A1
>>> sendCommands('com1', 'A1 On')
# Turn all modules with house code A off
>>> sendCommands('com1', 'A All Off')
# Turn all lamp modules with house code B on
>>> sendCommands('com1', 'B Lamps On')
# Turn on module A1 and dim it 3 steps, then brighten it 1 step
>>> sendCommands('com1', 'A1 On, A Dim, A Dim, A Dim, A Bright')
"""
mutex.acquire()
try:
try:
port = serial.Serial(port=comPort)
header = '11010101 10101010'
footer = '10101101'
for command in _translateCommands(commands):
_sendBinaryData(port, header + command + footer)
except serial.SerialException:
print('Unable to open serial port %s' % comPort)
print('')
raise
finally:
mutex.release() | 0.000623 |
def plot_line(axes, fname, ltype):
"""plot the ecliptic plane line on the given axes."""
x = np.genfromtxt(fname, unpack=True)
axes.plot(x[0], x[1], ltype) | 0.005988 |
def _build_process_container_tree(self, pids):
"""
tops = [1,2,3]
childs = {1: [4,5], 2: [6,7], 3: [], 4: []}
"""
containers = []
procs = []
ppids = []
childs = {}
for pid in pids:
proc = process.Process(pid)
procs.append(proc)
ppids.append(proc.ppid)
if proc.ppid not in childs:
childs[proc.ppid] = []
childs[proc.ppid].append(proc)
ppids = set(ppids)
tops = [proc for proc in procs if proc.ppid not in pids]
if len(tops) == 0:
tops = procs
def build_tree(proc_list):
_containers = []
for proc in proc_list:
if not self.args.show_kthread and proc.is_kthread():
continue
cont = TreeContainer(proc)
if proc.pid in childs:
cont.childs = build_tree(childs[proc.pid])
_containers.append(cont)
return _containers
for top_proc in tops:
if not self.args.show_kthread and top_proc.is_kthread():
continue
cont = TreeContainer(top_proc)
if top_proc.pid in childs:
cont.childs = build_tree(childs[top_proc.pid])
containers.append(cont)
return containers | 0.001453 |
def _invalid_frequency(self, frequency):
"""
Check to see that frequency was specified correctly
:param frequency (string): frequency string
:return (boolean):
"""
is_valid = self._is_eod_frequency(frequency) or re.match(self._frequency_pattern, frequency)
return not is_valid | 0.009036 |
def get_class_alias(klass):
"""
Tries to find a suitable L{pyamf.ClassAlias} subclass for C{klass}.
"""
for k, v in pyamf.ALIAS_TYPES.iteritems():
for kl in v:
try:
if issubclass(klass, kl):
return k
except TypeError:
# not a class
if hasattr(kl, '__call__'):
if kl(klass) is True:
return k | 0.002222 |
def watch(args):
" Watch directory for changes and auto pack sources "
assert op.isdir(args.source), "Watch mode allowed only for directories."
print 'Zeta-library v. %s watch mode' % VERSION
print '================================'
print 'Ctrl+C for exit\n'
observer = Observer()
handler = ZetaTrick(args=args)
observer.schedule(handler, args.source, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
print "\nWatch mode stoped."
observer.join() | 0.001706 |
def get(self, flex_sched_rule_id):
"""Retrieve the information for a flexscheduleRule entity."""
path = '/'.join(['flexschedulerule', flex_sched_rule_id])
return self.rachio.get(path) | 0.009662 |
def _attach_params(self, params, **kwargs):
"""Attach a list of parameters (or ParameterSet) to this ParameterSet.
:parameter list params: list of parameters, or ParameterSet
:parameter **kwargs: attributes to set for each parameter (ie tags)
"""
lst = params.to_list() if isinstance(params, ParameterSet) else params
for param in lst:
param._bundle = self
for k, v in kwargs.items():
# Here we'll set the attributes (_context, _qualifier, etc)
if getattr(param, '_{}'.format(k)) is None:
setattr(param, '_{}'.format(k), v)
self._params.append(param)
self._check_copy_for()
return | 0.002717 |
def _all_reads_from_contig(self, contig, fout):
'''Gets all reads from contig called "contig" and writes to fout'''
sam_reader = pysam.Samfile(self.bam, "rb")
for read in sam_reader.fetch(contig):
print(mapping.aligned_read_to_read(read, ignore_quality=not self.fastq_out), file=fout) | 0.009375 |
def parse_net_kwargs(kwargs):
"""Parse arguments for the estimator.
Resolves dotted names and instantiated classes.
Examples
--------
>>> kwargs = {'lr': 0.1, 'module__nonlin': 'torch.nn.Hardtanh(-2, max_val=3)'}
>>> parse_net_kwargs(kwargs)
{'lr': 0.1, 'module__nonlin': Hardtanh(min_val=-2, max_val=3)}
"""
if not kwargs:
return kwargs
resolved = {}
for k, v in kwargs.items():
resolved[k] = _resolve_dotted_name(v)
return resolved | 0.003984 |
def electrum_pub(self, s):
"""
Parse an electrum public key from a text string in seed form ("E:xxx" where xxx
is a 128-character hex string).
Return a :class:`ElectrumWallet <pycoin.key.electrum.ElectrumWallet>` or None.
"""
blob = self._electrum_to_blob(s)
if blob and len(blob) == 64:
return self._network.keys.electrum_public(master_public_key=blob) | 0.009501 |
def backport_makefile(self, mode="r", buffering=None, encoding=None,
errors=None, newline=None):
"""
Backport of ``socket.makefile`` from Python 3.5.
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError(
"invalid mode %r (only r, w, b allowed)" % (mode,)
)
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._makefile_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text | 0.000816 |
def setup(pin, mode, pullup=None, initial=False):
'''Setup pin with mode IN or OUT.
Args:
pin (int):
mode (str): use either gpio.OUT or gpio.IN
pullup (None): rpio compatibility. If anything but None, raises
value Error
pullup (bool, optional): Initial pin value. Default is False
'''
if pullup is not None:
raise ValueError("sysfs does not support pullups")
if mode not in (IN, OUT, LOW, HIGH):
raise ValueError(mode)
log.debug("Setup {0}: {1}".format(pin, mode))
f = _open[pin].direction
_write(f, mode)
if mode == OUT:
if initial:
set(pin, 1)
else:
set(pin, 0) | 0.001379 |
def leader_for_partition(self, partition):
"""Return node_id of leader, -1 unavailable, None if unknown."""
if partition.topic not in self._partitions:
return None
elif partition.partition not in self._partitions[partition.topic]:
return None
return self._partitions[partition.topic][partition.partition].leader | 0.00545 |
def CPUID(cpu):
"""
CPUID instruction.
The ID flag (bit 21) in the EFLAGS register indicates support for the
CPUID instruction. If a software procedure can set and clear this
flag, the processor executing the procedure supports the CPUID
instruction. This instruction operates the same in non-64-bit modes and
64-bit mode. CPUID returns processor identification and feature
information in the EAX, EBX, ECX, and EDX registers.
The instruction's output is dependent on the contents of the EAX
register upon execution.
:param cpu: current CPU.
"""
# FIXME Choose conservative values and consider returning some default when eax not here
conf = {0x0: (0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69),
0x1: (0x000306c3, 0x05100800, 0x7ffafbff, 0xbfebfbff),
0x2: (0x76035a01, 0x00f0b5ff, 0x00000000, 0x00c10000),
0x4: {0x0: (0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000),
0x1: (0x1c004122, 0x01c0003f, 0x0000003f, 0x00000000),
0x2: (0x1c004143, 0x01c0003f, 0x000001ff, 0x00000000),
0x3: (0x1c03c163, 0x03c0003f, 0x00000fff, 0x00000006)},
0x7: (0x00000000, 0x00000000, 0x00000000, 0x00000000),
0x8: (0x00000000, 0x00000000, 0x00000000, 0x00000000),
0xb: {0x0: (0x00000001, 0x00000002, 0x00000100, 0x00000005),
0x1: (0x00000004, 0x00000004, 0x00000201, 0x00000003)},
0xd: {0x0: (0x00000000, 0x00000000, 0x00000000, 0x00000000),
0x1: (0x00000000, 0x00000000, 0x00000000, 0x00000000)},
}
if cpu.EAX not in conf:
logger.warning('CPUID with EAX=%x not implemented @ %x', cpu.EAX, cpu.PC)
cpu.EAX, cpu.EBX, cpu.ECX, cpu.EDX = 0, 0, 0, 0
return
if isinstance(conf[cpu.EAX], tuple):
cpu.EAX, cpu.EBX, cpu.ECX, cpu.EDX = conf[cpu.EAX]
return
if cpu.ECX not in conf[cpu.EAX]:
logger.warning('CPUID with EAX=%x ECX=%x not implemented', cpu.EAX, cpu.ECX)
cpu.EAX, cpu.EBX, cpu.ECX, cpu.EDX = 0, 0, 0, 0
return
cpu.EAX, cpu.EBX, cpu.ECX, cpu.EDX = conf[cpu.EAX][cpu.ECX] | 0.002137 |
def hpss_demo(input_file, output_harmonic, output_percussive):
'''HPSS demo function.
:parameters:
- input_file : str
path to input audio
- output_harmonic : str
path to save output harmonic (wav)
- output_percussive : str
path to save output harmonic (wav)
'''
# 1. Load the wav file, resample
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# Separate components with the effects module
print('Separating harmonics and percussives... ')
y_harmonic, y_percussive = librosa.effects.hpss(y)
# 5. Save the results
print('Saving harmonic audio to: ', output_harmonic)
librosa.output.write_wav(output_harmonic, y_harmonic, sr)
print('Saving percussive audio to: ', output_percussive)
librosa.output.write_wav(output_percussive, y_percussive, sr) | 0.001155 |
def build(self):
"""Builds the index, creating an instance of `lunr.Index`.
This completes the indexing process and should only be called once all
documents have been added to the index.
"""
self._calculate_average_field_lengths()
self._create_field_vectors()
self._create_token_set()
return Index(
inverted_index=self.inverted_index,
field_vectors=self.field_vectors,
token_set=self.token_set,
fields=list(self._fields.keys()),
pipeline=self.search_pipeline,
) | 0.003361 |
def consult_response_hook(self, item_session: ItemSession) -> Actions:
'''Return scripting action when a response ends.'''
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_response, item_session
)
except HookDisconnected:
return Actions.NORMAL | 0.006024 |
def get_content_commit_date(extensions, acceptance_callback=None,
root_dir='.'):
"""Get the datetime for the most recent commit to a project that
affected certain types of content.
Parameters
----------
extensions : sequence of 'str'
Extensions of files to consider in getting the most recent commit
date. For example, ``('rst', 'svg', 'png')`` are content extensions
for a Sphinx project. **Extension comparision is case sensitive.** add
uppercase variants to match uppercase extensions.
acceptance_callback : callable
Callable function whose sole argument is a file path, and returns
`True` or `False` depending on whether the file's commit date should
be considered or not. This callback is only run on files that are
included by ``extensions``. Thus this callback is a way to exclude
specific files that would otherwise be included by their extension.
root_dir : 'str`, optional
Only content contained within this root directory is considered.
This directory must be, or be contained by, a Git repository. This is
the current working directory by default.
Returns
-------
commit_date : `datetime.datetime`
Datetime of the most recent content commit.
Raises
------
RuntimeError
Raised if no content files are found.
"""
logger = logging.getLogger(__name__)
def _null_callback(_):
return True
if acceptance_callback is None:
acceptance_callback = _null_callback
# Cache the repo object for each query
root_dir = os.path.abspath(root_dir)
repo = git.repo.base.Repo(path=root_dir, search_parent_directories=True)
# Iterate over all files with all file extensions, looking for the
# newest commit datetime.
newest_datetime = None
iters = [_iter_filepaths_with_extension(ext, root_dir=root_dir)
for ext in extensions]
for content_path in itertools.chain(*iters):
content_path = os.path.abspath(os.path.join(root_dir, content_path))
if acceptance_callback(content_path):
logger.debug('Found content path %r', content_path)
try:
commit_datetime = read_git_commit_timestamp_for_file(
content_path, repo=repo)
logger.debug('Commit timestamp of %r is %s',
content_path, commit_datetime)
except IOError:
logger.warning(
'Count not get commit for %r, skipping',
content_path)
continue
if not newest_datetime or commit_datetime > newest_datetime:
# Seed initial newest_datetime
# or set a newer newest_datetime
newest_datetime = commit_datetime
logger.debug('Newest commit timestamp is %s', newest_datetime)
logger.debug('Final commit timestamp is %s', newest_datetime)
if newest_datetime is None:
raise RuntimeError('No content files found in {}'.format(root_dir))
return newest_datetime | 0.000316 |
def create_downloadjob(entry, domain, config):
"""Create download jobs for all file formats from a summary file entry."""
logging.info('Checking record %r', entry['assembly_accession'])
full_output_dir = create_dir(entry, config.section, domain, config.output)
symlink_path = None
if config.human_readable:
symlink_path = create_readable_dir(entry, config.section, domain, config.output)
checksums = grab_checksums_file(entry)
# TODO: Only write this when the checksums file changed
with open(os.path.join(full_output_dir, 'MD5SUMS'), 'w') as handle:
handle.write(checksums)
parsed_checksums = parse_checksums(checksums)
download_jobs = []
for fmt in config.file_format:
try:
if has_file_changed(full_output_dir, parsed_checksums, fmt):
download_jobs.append(
download_file_job(entry, full_output_dir, parsed_checksums, fmt, symlink_path))
elif need_to_create_symlink(full_output_dir, parsed_checksums, fmt, symlink_path):
download_jobs.append(
create_symlink_job(full_output_dir, parsed_checksums, fmt, symlink_path))
except ValueError as err:
logging.error(err)
return download_jobs | 0.003915 |
def plot_blob(
sampler, blobidx=0, label=None, last_step=False, figure=None, **kwargs
):
"""
Plot a metadata blob as a fit to spectral data or value distribution
Additional ``kwargs`` are passed to `plot_fit`.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler with a stored chain.
blobidx : int, optional
Metadata blob index to plot.
label : str, optional
Label for the value distribution. Labels for the fit plot can be passed
as ``xlabel`` and ``ylabel`` and will be passed to `plot_fit`.
Returns
-------
figure : `matplotlib.pyplot.Figure`
`matplotlib` figure instance containing the plot.
"""
modelx, model = _process_blob(sampler, blobidx, last_step)
if label is None:
label = "Model output {0}".format(blobidx)
if modelx is None:
# Blob is scalar, plot distribution
f = plot_distribution(model, label, figure=figure)
else:
f = plot_fit(
sampler,
modelidx=blobidx,
last_step=last_step,
label=label,
figure=figure,
**kwargs
)
return f | 0.000846 |
def predict_dims(self, q, dims_x, dims_y, dims_out, sigma=None, k=None):
"""Provide a prediction of q in the output space
@param xq an array of float of length dim_x
@param estimated_sigma if False (default), sigma_sq=self.sigma_sq, else it is estimated from the neighbor distances in self._weights(.)
"""
assert len(q) == len(dims_x) + len(dims_y)
sigma_sq = self.sigma_sq if sigma is None else sigma*sigma
k = k or self.k
dists, index = self.dataset.nn_dims(q[:len(dims_x)], q[len(dims_x):], dims_x, dims_y, k=k)
w = self._weights(dists, index, sigma_sq)
Xq = np.array(np.append([1.0], q), ndmin = 2)
X = np.array([np.append([1.0], self.dataset.get_dims(i, dims_x=dims_x, dims_y=dims_y)) for i in index])
Y = np.array([self.dataset.get_dims(i, dims=dims_out) for i in index])
W = np.diag(w)
WX = np.dot(W, X)
WXT = WX.T
B = np.dot(np.linalg.pinv(np.dot(WXT, WX)),WXT)
self.mat = np.dot(B, np.dot(W, Y))
Yq = np.dot(Xq, self.mat)
return Yq.ravel() | 0.015652 |
def camel_to_snake(camel):
"""Convert camelCase to snake_case."""
ret = []
last_lower = False
for char in camel:
current_upper = char.upper() == char
if current_upper and last_lower:
ret.append("_")
ret.append(char.lower())
else:
ret.append(char.lower())
last_lower = not current_upper
return "".join(ret) | 0.002545 |
def add_other_ldflags(self, flags, target_name=None, configuration_name=None):
"""
Adds flag values to the OTHER_LDFLAGS flag.
:param flags: A string or array of strings. If none, removes all values from the flag.
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void
"""
self.add_flags(XCBuildConfigurationFlags.OTHER_LDFLAGS, flags, target_name, configuration_name) | 0.010204 |
def _load_image_set_index(self, shuffle):
"""
find out which indexes correspond to given image set (train or val)
Parameters:
----------
shuffle : boolean
whether to shuffle the image list
Returns:
----------
entire list of images specified in the setting
"""
image_set_index_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file)
with open(image_set_index_file) as f:
image_set_index = [x.strip() for x in f.readlines()]
if shuffle:
np.random.shuffle(image_set_index)
return image_set_index | 0.005215 |
def _write_script(path, lines, chmod=True):
'''write a script with some lines content to path in the image. This
is done by way of adding echo statements to the install section.
Parameters
==========
path: the path to the file to write
lines: the lines to echo to the file
chmod: If true, change permission to make u+x
'''
if len(lines) > 0:
lastline = lines.pop()
for line in lines:
self.install.append('echo "%s" >> %s' %path)
self.install.append(lastline)
if chmod is True:
self.install.append('chmod u+x %s' %path) | 0.007396 |
def compound(clr, flip=False):
"""
Roughly the complement and some far analogs.
"""
def _wrap(x, min, threshold, plus):
if x - min < threshold:
return x + plus
else:
return x - min
d = 1
if flip: d = -1
clr = color(clr)
colors = colorlist(clr)
c = clr.rotate_ryb(30 * d)
c.brightness = _wrap(clr.brightness, 0.25, 0.6, 0.25)
colors.append(c)
c = clr.rotate_ryb(30 * d)
c.saturation = _wrap(clr.saturation, 0.4, 0.1, 0.4)
c.brightness = _wrap(clr.brightness, 0.4, 0.2, 0.4)
colors.append(c)
c = clr.rotate_ryb(160 * d)
c.saturation = _wrap(clr.saturation, 0.25, 0.1, 0.25)
c.brightness = max(0.2, clr.brightness)
colors.append(c)
c = clr.rotate_ryb(150 * d)
c.saturation = _wrap(clr.saturation, 0.1, 0.8, 0.1)
c.brightness = _wrap(clr.brightness, 0.3, 0.6, 0.3)
colors.append(c)
c = clr.rotate_ryb(150 * d)
c.saturation = _wrap(clr.saturation, 0.1, 0.8, 0.1)
c.brightness = _wrap(clr.brightness, 0.4, 0.2, 0.4)
# colors.append(c)
return colors | 0.001815 |
def on_error(self, headers, body):
"""
Increment the error count. See :py:meth:`ConnectionListener.on_error`
:param dict headers: headers in the message
:param body: the message content
"""
if log.isEnabledFor(logging.DEBUG):
log.debug("received an error %s [%s]", body, headers)
else:
log.info("received an error %s", body)
self.errors += 1 | 0.004651 |
def reduce(self, func):
"""Return a new DStream where each RDD was reduced with ``func``.
:rtype: DStream
"""
# avoid RDD.reduce() which does not return an RDD
return self.transform(
lambda rdd: (
rdd
.map(lambda i: (None, i))
.reduceByKey(func)
.map(lambda none_i: none_i[1])
)
) | 0.004785 |
def get_playlist(self, channel):
"""Return the playlist for the given channel
:param channel: the channel
:type channel: :class:`models.Channel` | :class:`str`
:returns: the playlist
:rtype: :class:`m3u8.M3U8`
:raises: :class:`requests.HTTPError` if channel is offline.
"""
if isinstance(channel, models.Channel):
channel = channel.name
token, sig = self.get_channel_access_token(channel)
params = {'token': token, 'sig': sig,
'allow_audio_only': True,
'allow_source': True}
r = self.usher_request(
'GET', 'channel/hls/%s.m3u8' % channel, params=params)
playlist = m3u8.loads(r.text)
return playlist | 0.002614 |
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__) | 0.00551 |
def logs_handle_experiment_job(experiment_name: str,
experiment_uuid: str,
log_lines: Optional[Union[str, Iterable[str]]],
temp: bool = True) -> None:
"""Task handling for sidecars logs."""
handle_experiment_job_log(experiment_name=experiment_name,
experiment_uuid=experiment_uuid,
log_lines=log_lines,
temp=temp) | 0.001984 |
def p_case_statement(self, p):
'case_statement : CASE LPAREN case_comp RPAREN casecontent_statements ENDCASE'
p[0] = CaseStatement(p[3], p[5], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | 0.013953 |
def has_datastore(self):
# type: () -> bool
"""Check if the resource has a datastore.
Returns:
bool: Whether the resource has a datastore or not
"""
success, result = self._read_from_hdx('datastore', self.data['id'], 'resource_id',
self.actions()['datastore_search'])
if not success:
logger.debug(result)
else:
if result:
return True
return False | 0.009823 |
def get_model_and_form_class(model, form_class):
"""
Returns a model and form class based on the model and form_class
parameters that were passed to the generic view.
If ``form_class`` is given then its associated model will be returned along
with ``form_class`` itself. Otherwise, if ``model`` is given, ``model``
itself will be returned along with a ``ModelForm`` class created from
``model``.
"""
if form_class:
return form_class._meta.model, form_class
if model:
# The inner Meta class fails if model = model is used for some reason.
tmp_model = model
# TODO: we should be able to construct a ModelForm without creating
# and passing in a temporary inner class.
class Meta:
model = tmp_model
class_name = model.__name__ + 'Form'
form_class = ModelFormMetaclass(
class_name, (ModelForm,), {'Meta': Meta})
return model, form_class
raise GenericViewError("Generic view must be called with either a model or"
" form_class argument.") | 0.000906 |
def _divide(self, x1, x2, out):
"""Compute the entry-wise quotient ``x1 / x2``.
This function is part of the subclassing API. Do not
call it directly.
Parameters
----------
x1, x2 : `NumpyTensor`
Dividend and divisor in the quotient.
out : `NumpyTensor`
Element to which the result is written.
Examples
--------
>>> space = odl.rn(3)
>>> x = space.element([2, 0, 4])
>>> y = space.element([1, 1, 2])
>>> space.divide(x, y)
rn(3).element([ 2., 0., 2.])
>>> out = space.element()
>>> result = space.divide(x, y, out=out)
>>> result
rn(3).element([ 2., 0., 2.])
>>> result is out
True
"""
np.divide(x1.data, x2.data, out=out.data) | 0.002398 |
def search(self, query):
""" Perform request tracker search """
# Prepare the path
log.debug("Query: {0}".format(query))
path = self.url.path + '?Format=__id__+__Subject__'
path += "&Order=ASC&OrderBy=id&Query=" + urllib.quote(query)
# Get the tickets
lines = self.get(path)
log.info(u"Fetched tickets: {0}".format(len(lines)))
return [self.parent.ticket(line, self.parent) for line in lines] | 0.004301 |
def handle_termination(cls, pid, is_cancel=True):
'''
Internal method to terminate a subprocess spawned by `pexpect` representing an invocation of runner.
:param pid: the process id of the running the job.
:param is_cancel: flag showing whether this termination is caused by
instance's cancel_flag.
'''
try:
main_proc = psutil.Process(pid=pid)
child_procs = main_proc.children(recursive=True)
for child_proc in child_procs:
try:
os.kill(child_proc.pid, signal.SIGKILL)
except (TypeError, OSError):
pass
os.kill(main_proc.pid, signal.SIGKILL)
except (TypeError, psutil.Error, OSError):
try:
os.kill(pid, signal.SIGKILL)
except (OSError):
pass | 0.003311 |
def read(morph_file, data_wrapper=DataWrapper):
'''return a 'raw_data' np.array with the full neuron, and the format of the file
suitable to be wrapped by DataWrapper
'''
msg = ('This is an experimental reader. '
'There are no guarantees regarding ability to parse '
'Neurolucida .asc files or correctness of output.')
warnings.warn(msg)
L.warning(msg)
with open(morph_file, encoding='utf-8', errors='replace') as morph_fd:
sections = _parse_sections(morph_fd)
raw_data = _sections_to_raw_data(sections)
return data_wrapper(raw_data, 'NL-ASCII') | 0.003257 |
def get_release(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_path = os.path.join(PROJECT_PATH, package, '__init__.py')
init_py = open(init_path).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1) | 0.00346 |
def get_function(self):
"""
Return function object for my function.
raise ProcessorConfigurationError when function could not be resolved.
"""
if not hasattr(self, '_function'):
try:
modname, funcname = self.function.rsplit('.', 1)
mod = import_module(modname)
self._function = getattr(mod, funcname)
except (ImportError, AttributeError, ValueError), err:
raise ProcessorConfigurationError(err)
return self._function | 0.003617 |
def hist(self, var: str, title: str = '',
label: str = '') -> object:
"""
This method requires a numeric column (use the contents method to see column types) and generates a histogram.
:param var: the NUMERIC variable (column) you want to plot
:param title: an optional Title for the chart
:param label: LegendLABEL= value for sgplot
:return:
"""
code = "proc sgplot data=" + self.libref + '.' + self.table + self._dsopts()
code += ";\n\thistogram " + var + " / scale=count"
if len(label) > 0:
code += " LegendLABEL='" + label + "'"
code += ";\n"
if len(title) > 0:
code += '\ttitle "' + title + '";\n'
code += "\tdensity " + var + ';\nrun;\n' + 'title;'
if self.sas.nosub:
print(code)
return
ll = self._is_valid()
if not ll:
html = self.HTML
self.HTML = 1
ll = self.sas._io.submit(code)
self.HTML = html
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll | 0.004288 |
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply the transformation.
Args:
structure: input structure
return_ranked_list (bool/int): Boolean stating whether or not
multiple structures are returned. If return_ranked_list is
an int, that number of structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
sp = get_el_sp(self.specie_to_remove)
specie_indices = [i for i in range(len(structure))
if structure[i].species ==
Composition({sp: 1})]
trans = PartialRemoveSitesTransformation([specie_indices],
[self.fraction_to_remove],
algo=self.algo)
return trans.apply_transformation(structure, return_ranked_list) | 0.00133 |
def _set_vlag(self, v, load=False):
"""
Setter method for vlag, mapped from YANG variable /interface/port_channel/vlag (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlag() directly.
YANG Description: The vLAG properties for this port-channel.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vlag.vlag, is_container='container', presence=False, yang_name="vlag", rest_name="vlag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual LAG', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlag must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=vlag.vlag, is_container='container', presence=False, yang_name="vlag", rest_name="vlag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual LAG', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__vlag = t
if hasattr(self, '_set'):
self._set() | 0.005938 |
def compile_file(fullpath, outfile_name, compiler_args):
"""Calls HamlPy compiler."""
if Options.VERBOSE:
print '%s %s -> %s' % (strftime("%H:%M:%S"), fullpath, outfile_name)
try:
if Options.DEBUG:
print "Compiling %s -> %s" % (fullpath, outfile_name)
haml_lines = codecs.open(fullpath, 'r', encoding = 'utf-8').read().splitlines()
compiler = hamlpy.Compiler(compiler_args)
output = compiler.process_lines(haml_lines)
outfile = codecs.open(outfile_name, 'w', encoding = 'utf-8')
outfile.write(output)
except Exception, e:
# import traceback
print "Failed to compile %s -> %s\nReason:\n%s" % (fullpath, outfile_name, e) | 0.009749 |
def get(self, index, doc_type, id, fields=None, model=None, **query_params):
"""
Get a typed JSON document from an index based on its id.
"""
path = make_path(index, doc_type, id)
if fields is not None:
query_params["fields"] = ",".join(fields)
model = model or self.model
return model(self, self._send_request('GET', path, params=query_params)) | 0.007264 |
def iter_data(self):
"""Iterate over key-value pairs that are really meant to be displayed"""
for (k, v) in self.proxy.items():
if (
not (isinstance(k, str) and k[0] == '_') and
k not in (
'character',
'name',
'location'
)
):
yield k, v | 0.0075 |
def aggregation_postprocessors_extractor(impact_report, component_metadata):
"""Extracting aggregate result of demographic.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {
'sections': OrderedDict()
}
"""Initializations."""
extra_args = component_metadata.extra_args
# Find out aggregation report type
aggregation_summary = impact_report.aggregation_summary
analysis_layer = impact_report.analysis
analysis_layer_fields = impact_report.analysis.keywords['inasafe_fields']
use_rounding = impact_report.impact_function.use_rounding
use_aggregation = bool(impact_report.impact_function.provenance[
'aggregation_layer'])
provenance = impact_report.impact_function.provenance
exposure_keywords = provenance['exposure_keywords']
# Get exposure type definition
exposure_type = definition(exposure_keywords['exposure'])
# this entire section is only for population exposure type
if not exposure_type == exposure_population:
return context
# check zero displaced (there will be no output to display)
try:
displaced_field_name = analysis_layer_fields[displaced_field['key']]
total_displaced = value_from_field_name(
displaced_field_name, analysis_layer)
zero_displaced = False
if total_displaced == 0:
zero_displaced = True
except KeyError:
# in case no displaced field
# let each section handled itself
zero_displaced = False
context['component_key'] = component_metadata.key
context['use_aggregation'] = use_aggregation
context['header'] = resolve_from_dictionary(
extra_args, 'header')
group_header_format = resolve_from_dictionary(
extra_args, ['defaults', 'group_header_format'])
section_header_format = resolve_from_dictionary(
extra_args,
['defaults', 'section_header_format'])
if not use_aggregation:
section_header_format = resolve_from_dictionary(
extra_args, ['defaults', 'section_header_format_no_aggregation'])
"""Age Groups."""
age_items = {
'group': age_displaced_count_group,
'group_header': group_header_format.format(
header_name=age_displaced_count_group['header_name']),
'fields': [postprocessor_output_field(p) for p in age_postprocessors]
}
# check age_fields exists
for field in age_items['fields']:
if field['key'] in analysis_layer_fields:
no_age_field = False
break
else:
no_age_field = True
context['sections']['age'] = []
age_section_header = section_header_format.format(
header_name=age_displaced_count_group['header_name'])
if zero_displaced:
context['sections']['age'].append(
{
'header': age_section_header,
'empty': True,
'message': resolve_from_dictionary(
extra_args, ['defaults', 'zero_displaced_message'])
}
)
elif no_age_field:
context['sections']['age'].append(
{
'header': age_section_header,
'empty': True,
'message': resolve_from_dictionary(
extra_args, ['defaults', 'no_age_rate_message'])
}
)
else:
context['sections']['age'].append(
create_section(
aggregation_summary,
analysis_layer,
age_items,
age_section_header,
use_aggregation=use_aggregation,
use_rounding=use_rounding,
extra_component_args=extra_args)
)
"""Gender Groups."""
gender_items = {
'group': gender_displaced_count_group,
'group_header': group_header_format.format(
header_name=gender_displaced_count_group['header_name']),
'fields': [
postprocessor_output_field(p) for p in gender_postprocessors]
}
# check gender_fields exists
for field in gender_items['fields']:
if field['key'] in analysis_layer_fields:
no_gender_field = False
break
else:
no_gender_field = True
context['sections']['gender'] = []
gender_section_header = section_header_format.format(
header_name=gender_displaced_count_group['header_name'])
if zero_displaced:
context['sections']['gender'].append(
{
'header': gender_section_header,
'empty': True,
'message': resolve_from_dictionary(
extra_args, ['defaults', 'zero_displaced_message'])
}
)
elif no_gender_field:
context['sections']['gender'].append(
{
'header': gender_section_header,
'empty': True,
'message': resolve_from_dictionary(
extra_args, ['defaults', 'no_gender_rate_message'])
}
)
else:
context['sections']['gender'].append(
create_section(
aggregation_summary,
analysis_layer,
gender_items,
gender_section_header,
use_aggregation=use_aggregation,
use_rounding=use_rounding,
extra_component_args=extra_args)
)
"""Vulnerability Groups."""
context['sections']['vulnerability'] = []
for vulnerability_group in vulnerability_displaced_count_groups:
vulnerability_items = {
'group': vulnerability_group,
'group_header': group_header_format.format(
header_name=vulnerability_group['header_name']),
'fields': [field for field in vulnerability_group['fields']]
}
# check vulnerability_fields exists
for field in vulnerability_items['fields']:
if field['key'] in analysis_layer_fields:
no_vulnerability_field = False
break
else:
no_vulnerability_field = True
vulnerability_section_header = section_header_format.format(
header_name=vulnerability_group['header_name'])
if zero_displaced:
context['sections']['vulnerability'].append(
{
'header': vulnerability_section_header,
'empty': True,
'message': resolve_from_dictionary(
extra_args, ['defaults', 'zero_displaced_message'])
}
)
elif no_vulnerability_field:
context['sections']['vulnerability'].append(
{
'header': vulnerability_section_header,
'empty': True,
'message': resolve_from_dictionary(
extra_args,
['defaults', 'no_vulnerability_rate_message'])
}
)
else:
context['sections']['vulnerability'].append(
create_section(
aggregation_summary,
analysis_layer,
vulnerability_items,
vulnerability_section_header,
use_aggregation=use_aggregation,
use_rounding=use_rounding,
extra_component_args=extra_args)
)
"""Minimum Needs."""
context['sections']['minimum_needs'] = []
minimum_needs_section_header = resolve_from_dictionary(
extra_args, ['sections', 'minimum_needs', 'header'])
# Don't show minimum needs if there is no displaced
if zero_displaced:
context['sections']['minimum_needs'].append(
{
'header': minimum_needs_section_header,
'empty': True,
'message': resolve_from_dictionary(
extra_args, ['defaults', 'zero_displaced_message'])
}
)
# Only provides minimum needs breakdown if there is aggregation layer
elif use_aggregation:
# minimum needs should provide unit for column headers
units_label = []
minimum_needs_items = {
'group_header': 'Minimum needs breakdown',
'fields': minimum_needs_fields + additional_minimum_needs
}
for field in minimum_needs_items['fields']:
unit = None
if field.get('need_parameter'):
need = field['need_parameter']
if isinstance(need, ResourceParameter):
unit_abbreviation = need.unit.abbreviation
elif field.get('unit'):
need_unit = field.get('unit')
unit_abbreviation = need_unit.get('abbreviation')
if unit_abbreviation:
unit_format = '{unit}'
unit = unit_format.format(
unit=unit_abbreviation)
units_label.append(unit)
context['sections']['minimum_needs'].append(
create_section(
aggregation_summary,
analysis_layer,
minimum_needs_items,
minimum_needs_section_header,
units_label=units_label,
use_rounding=use_rounding,
extra_component_args=extra_args)
)
else:
sections_not_empty = True
for _, values in list(context['sections'].items()):
for value in values:
if value.get('rows'):
break
else:
sections_not_empty = False
context['sections_not_empty'] = sections_not_empty
return context | 0.000098 |
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0,sys.maxint)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid) | 0.009456 |
def get_pending_withdrawals(self, currency=None):
"""
Used to view your pending withdrawals
Endpoint:
1.1 NO EQUIVALENT
2.0 /key/balance/getpendingwithdrawals
:param currency: String literal for the currency (ie. BTC)
:type currency: str
:return: pending withdrawals in JSON
:rtype : list
"""
return self._api_query(path_dict={
API_V2_0: '/key/balance/getpendingwithdrawals'
}, options={'currencyname': currency} if currency else None,
protection=PROTECTION_PRV) | 0.003425 |
def purge_archives(base_dir: str, retain_latest: bool = False) -> None:
"""
Erase all (or nearly all) cache archives.
:param base_dir: archive base directory
:param retain_latest: retain latest archive if present, purge all others
"""
LOGGER.debug('purge_archives >>> base_dir: %s, retain_latest: %s', base_dir, retain_latest)
if isdir(base_dir):
timestamps = sorted([int(t) for t in listdir(base_dir) if t.isdigit()])
if retain_latest and timestamps:
timestamps.pop()
for timestamp in timestamps:
timestamp_dir = join(base_dir, str(timestamp))
rmtree(timestamp_dir)
LOGGER.info('Purged archive cache directory %s', timestamp_dir)
LOGGER.debug('purge_archives <<<') | 0.006002 |
def move_edges(self,n1,n2):
"""Move edges from node 1 to node 2
Not self edges though
Overwrites edges
"""
#Traverse edges to find incoming with n1
incoming = []
for e in self._edges.values():
if e.node2.id == n1.id: incoming.append(e)
#Traverse edges to find outgoing from n1
outgoing = []
for e in self._edges.values():
if e.node1.id == n1.id: outgoing.append(e)
#Make new edges to the new target
for e in incoming:
if e.node1.id == n2.id: continue # skip self
newedge = Edge(e.node1,n2,payload_list=n2.payload_list+n1.payload_list)
self.add_edge(newedge)
for e in outgoing:
if e.node2.id == n2.id: continue # skip self
newedge = Edge(n2,e.node2,payload_list=n2.payload_list+n1.payload_list)
self.add_edge(newedge)
#now remove the edges that got transfered
for e in incoming: self.remove_edge(e)
for e in outgoing: self.remove_edge(e) | 0.043435 |
def runExperiment():
"""
Experiment 1: Calculate error rate as a function of training sequence numbers
:return:
"""
trainSeqN = [5, 10, 20, 50, 100, 200]
rptPerCondition = 20
correctRateAll = np.zeros((len(trainSeqN), rptPerCondition))
missRateAll = np.zeros((len(trainSeqN), rptPerCondition))
fpRateAll = np.zeros((len(trainSeqN), rptPerCondition))
for i in xrange(len(trainSeqN)):
for rpt in xrange(rptPerCondition):
numTrainSequence = trainSeqN[i]
correctRate, missRate, fpRate = runSingleExperiment(numTrainSequence=numTrainSequence)
correctRateAll[i, rpt] = correctRate
missRateAll[i, rpt] = missRate
fpRateAll[i, rpt] = fpRate
plt.figure()
plt.subplot(2,2,1)
plt.semilogx(trainSeqN, 100*np.mean(correctRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Hit Rate - Best Match (%)')
plt.subplot(2,2,2)
plt.semilogx(trainSeqN, 100*np.mean(missRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Miss Rate (%)')
plt.subplot(2,2,3)
plt.semilogx(trainSeqN, 100*np.mean(fpRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' False Positive Rate (%)')
plt.savefig('result/ReberSequence_HMMperformance.pdf')
plt.show() | 0.032695 |
def ffconvert(fname, limit_states, ff, min_iml=1E-10):
"""
Convert a fragility function into a numpy array plus a bunch
of attributes.
:param fname: path to the fragility model file
:param limit_states: expected limit states
:param ff: fragility function node
:returns: a pair (array, dictionary)
"""
with context(fname, ff):
ffs = ff[1:]
imls = ff.imls
nodamage = imls.attrib.get('noDamageLimit')
if nodamage == 0:
# use a cutoff to avoid log(0) in GMPE.to_distribution_values
logging.warning('Found a noDamageLimit=0 in %s, line %s, '
'using %g instead', fname, ff.lineno, min_iml)
nodamage = min_iml
with context(fname, imls):
attrs = dict(format=ff['format'],
imt=imls['imt'],
id=ff['id'],
nodamage=nodamage)
LS = len(limit_states)
if LS != len(ffs):
with context(fname, ff):
raise InvalidFile('expected %d limit states, found %d' %
(LS, len(ffs)))
if ff['format'] == 'continuous':
minIML = float(imls['minIML'])
if minIML == 0:
# use a cutoff to avoid log(0) in GMPE.to_distribution_values
logging.warning('Found minIML=0 in %s, line %s, using %g instead',
fname, imls.lineno, min_iml)
minIML = min_iml
attrs['minIML'] = minIML
attrs['maxIML'] = float(imls['maxIML'])
array = numpy.zeros(LS, [('mean', F64), ('stddev', F64)])
for i, ls, node in zip(range(LS), limit_states, ff[1:]):
if ls != node['ls']:
with context(fname, node):
raise InvalidFile('expected %s, found' %
(ls, node['ls']))
array['mean'][i] = node['mean']
array['stddev'][i] = node['stddev']
elif ff['format'] == 'discrete':
attrs['imls'] = ~imls
valid.check_levels(attrs['imls'], attrs['imt'], min_iml)
num_poes = len(attrs['imls'])
array = numpy.zeros((LS, num_poes))
for i, ls, node in zip(range(LS), limit_states, ff[1:]):
with context(fname, node):
if ls != node['ls']:
raise InvalidFile('expected %s, found' %
(ls, node['ls']))
poes = (~node if isinstance(~node, list)
else valid.probabilities(~node))
if len(poes) != num_poes:
raise InvalidFile('expected %s, found' %
(num_poes, len(poes)))
array[i, :] = poes
# NB: the format is constrained in nrml.FragilityNode to be either
# discrete or continuous, there is no third option
return array, attrs | 0.000349 |
def get_child_catalog_ids(self, catalog_id):
"""Gets the child ``Ids`` of the given catalog.
arg: catalog_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the catalog
raise: NotFound - ``catalog_id`` is not found
raise: NullArgument - ``catalog_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_child_catalog_ids(catalog_id=catalog_id)
return self._hierarchy_session.get_children(id_=catalog_id) | 0.003576 |
def print_trip_table(document):
""" Print trip table """
headers = [
'Alt.',
'Name',
'Time',
'Track',
'Direction',
'Dest.',
'Track',
'Arrival']
table = []
altnr = 0
for alternative in document:
altnr += 1
first_trip_in_alt = True
if not isinstance(alternative['Leg'], list):
alternative['Leg'] = [alternative['Leg']]
for part in alternative['Leg']:
orig = part['Origin']
dest = part['Destination']
row = [
altnr if first_trip_in_alt else None,
part['name'],
orig['rtTime'] if 'rtTime' in orig else orig['time'],
orig['track'],
part['direction'] if 'direction' in part else None,
dest['name'],
dest['track'],
dest['rtTime'] if 'rtTime' in dest else dest['time'],
]
table.append(row)
first_trip_in_alt = False
print(tabulate.tabulate(table, headers)) | 0.000923 |
def batch_size(self):
"""int: The number of results to fetch per batch. Clamped to
limit if limit is set and is smaller than the given batch
size.
"""
batch_size = self.get("batch_size", DEFAULT_BATCH_SIZE)
if self.limit is not None:
return min(self.limit, batch_size)
return batch_size | 0.005634 |
def warn_sf(messages, response, verbs=None, klass=SalesforceWarning):
"""Issue a warning SalesforceWarning, with message combined from message and data from SFDC response"""
warnings.warn(klass(messages, response, verbs), stacklevel=2) | 0.00823 |
def tenant_create(name, description=None, enabled=True, profile=None,
**connection_args):
'''
Create a keystone tenant
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_create nova description='nova tenant'
salt '*' keystone.tenant_create test enabled=False
'''
kstone = auth(profile, **connection_args)
new = getattr(kstone, _TENANTS, None).create(name, description, enabled)
return tenant_get(new.id, profile=profile, **connection_args) | 0.001934 |
def serializer_by_type_id(self, type_id):
"""
Find and return the serializer for the type-id
:param type_id: type-id the serializer
:return: the serializer
"""
if type_id <= 0:
indx = index_for_default_type(type_id)
serializer = self._constant_type_ids.get(indx, None)
if serializer is not None:
return serializer
return self._id_dic.get(type_id, None) | 0.004348 |
def get_data(self, df):
"""Returns the chart data"""
chart_data = []
if len(self.groupby) > 0:
groups = df.groupby(self.groupby)
else:
groups = [((), df)]
for keys, data in groups:
chart_data.extend([{
'key': self.labelify(keys, column),
'values': data[column].tolist()}
for column in self.columns])
return chart_data | 0.004454 |
def extract(pcmiter, samplerate, channels, duration = -1):
"""Given a PCM data stream, extract fingerprint data from the
audio. Returns a byte string of fingerprint data. Raises an
ExtractionError if fingerprinting fails.
"""
extractor = _fplib.Extractor(samplerate, channels, duration)
# Get first block.
try:
next_block = next(pcmiter)
except StopIteration:
raise ExtractionError()
# Get and process subsequent blocks.
while True:
# Shift over blocks.
cur_block = next_block
try:
next_block = next(pcmiter)
except StopIteration:
next_block = None
done = next_block is None
# Process the block.
try:
if extractor.process(cur_block, done):
# Success!
break
except RuntimeError as exc:
# Exception from fplib. Most likely the file is too short.
raise ExtractionError(exc.args[0])
# End of file but processor never became ready?
if done:
raise ExtractionError()
# Get resulting fingerprint data.
out = extractor.result()
if out is None:
raise ExtractionError()
# Free extractor memory.
extractor.free()
return out | 0.003852 |
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell | 0.007605 |
def list_packages_in_eups_table(table_text):
"""List the names of packages that are required by an EUPS table file.
Parameters
----------
table_text : `str`
The text content of an EUPS table file.
Returns
-------
names : `list` [`str`]
List of package names that are required byy the EUPS table file.
"""
logger = logging.getLogger(__name__)
# This pattern matches required product names in EUPS table files.
pattern = re.compile(r'setupRequired\((?P<name>\w+)\)')
listed_packages = [m.group('name') for m in pattern.finditer(table_text)]
logger.debug('Packages listed in the table file: %r', listed_packages)
return listed_packages | 0.001416 |
def ecdsa_public_key(pubkey_str, compressed=None):
"""
Make a public key object, but enforce the following rule:
* if compressed is True or False, make the key compressed/uncompressed.
* otherwise, return whatever the hex encoding is
"""
if compressed == True:
pubkey_str = keylib.key_formatting.compress(pubkey_str)
elif compressed == False:
pubkey_str = keylib.key_formatting.decompress(pubkey_str)
return _ECPublicKey(pubkey_str) | 0.006237 |
def mavlink_packet(self, m):
'''handle mavlink packets'''
if m.get_type() == 'SYSTEM_TIME':
if self.system_time_settings.verbose:
print("ST: Received from (%u/%u): %s" %
(m.get_srcSystem(), m.get_srcComponent(), m))
if m.get_type() == 'TIMESYNC':
if m.tc1 == 0:
# this is a request for a timesync response
time_ns = time.time() * 1000000000
time_ns += 1234
if True or self.system_time_settings.verbose:
if self.system_time_settings.verbose:
print("ST: received timesync; sending response: %u" %
(time_ns))
self.master.mav.timesync_send(time_ns,
m.ts1)
else:
if m.ts1 == self.last_sent_ts1:
# we sent this one!
now_ns = time.time() * 1000000000
now_ns += 1234
if self.system_time_settings.verbose:
print("ST: timesync response: sysid=%u latency=%fms" %
(m.get_srcSystem(),
(now_ns-self.last_sent_ts1)/1000000.0)) | 0.001541 |
def get_links(self, text=None, *args, **kwargs):
"""Find anchors or buttons by containing text, as well as standard
BeautifulSoup arguments.
:param text: String or regex to be matched in link text
:return: List of BeautifulSoup tags
"""
return helpers.find_all(
self.parsed, _link_ptn, text=text, *args, **kwargs
) | 0.005208 |
def mixin_params(self, params):
"""
Merge in the MdsolAttribute for the passed parameter
:param dict params: dictionary of object parameters
"""
if not isinstance(params, (dict,)):
raise AttributeError("Cannot mixin to object of type {}".format(type(params)))
for attribute in self.attributes:
params.update({attribute.tag: attribute.value}) | 0.007246 |
def open_application(self, remote_url, alias=None, **kwargs):
"""Opens a new application to given Appium server.
Capabilities of appium server, Android and iOS,
Please check https://github.com/appium/appium/blob/master/docs/en/writing-running-appium/server-args.md
| *Option* | *Man.* | *Description* |
| remote_url | Yes | Appium server url |
| alias | no | alias |
Examples:
| Open Application | http://localhost:4723/wd/hub | alias=Myapp1 | platformName=iOS | platformVersion=7.0 | deviceName='iPhone Simulator' | app=your.app |
| Open Application | http://localhost:4723/wd/hub | platformName=Android | platformVersion=4.2.2 | deviceName=192.168.56.101:5555 | app=${CURDIR}/demoapp/OrangeDemoApp.apk | appPackage=com.netease.qa.orangedemo | appActivity=MainActivity |
"""
desired_caps = kwargs
application = webdriver.Remote(str(remote_url), desired_caps)
self._debug('Opened application with session id %s' % application.session_id)
return self._cache.register(application, alias) | 0.00487 |
def add(self, interval):
"""
Returns self after adding the interval and balancing.
"""
if self.center_hit(interval):
self.s_center.add(interval)
return self
else:
direction = self.hit_branch(interval)
if not self[direction]:
self[direction] = Node.from_interval(interval)
self.refresh_balance()
return self
else:
self[direction] = self[direction].add(interval)
return self.rotate() | 0.003565 |
def _quoted(value):
"""Return a single-quoted and escaped (percent-encoded) version of value
This function will also perform transforms of known data types to a representation
that will be handled by Device Cloud. For instance, datetime objects will be
converted to ISO8601.
"""
if isinstance(value, datetime.datetime):
value = isoformat(to_none_or_dt(value))
else:
value = str(value)
return "'{}'".format(value) | 0.006466 |
def load_inversion_results(self, sipdir):
"""Given an sEIT inversion directory, load inversion results and store
the corresponding parameter ids in self.assignments
Note that all previous data stored in this instance of the eitManager
will be overwritten, if required!
"""
# load frequencies and initialize tomodir objects for all frequencies
frequency_file = sipdir + os.sep + 'frequencies.dat'
frequencies = np.loadtxt(frequency_file)
self._init_frequencies(frequencies)
# cycle through all tomodirs on disc and load the data
for nr, (frequency_key, item) in enumerate(sorted(self.tds.items())):
for label in ('rmag', 'rpha', 'cre', 'cim'):
if label not in self.assigments:
self.a[label] = {}
tdir = sipdir + os.sep + 'invmod' + os.sep + '{:02}_{:.6f}'.format(
nr, frequency_key) + os.sep
rmag_file = sorted(glob(tdir + 'inv/*.mag'))[-1]
rmag_data = np.loadtxt(rmag_file, skiprows=1)[:, 2]
pid_rmag = item.parman.add_data(rmag_data)
self.a['rmag'][frequency_key] = pid_rmag
rpha_file = sorted(glob(tdir + 'inv/*.pha'))[-1]
rpha_data = np.loadtxt(rpha_file, skiprows=1)[:, 2]
pid_rpha = item.parman.add_data(rpha_data)
self.a['rpha'][frequency_key] = pid_rpha
sigma_file = sorted(glob(tdir + 'inv/*.sig'))[-1]
sigma_data = np.loadtxt(sigma_file, skiprows=1)
pid_cre = item.parman.add_data(sigma_data[:, 0])
pid_cim = item.parman.add_data(sigma_data[:, 1])
self.a['cre'][frequency_key] = pid_cre
self.a['cim'][frequency_key] = pid_cim | 0.001129 |
def deconstruct(self):
"""
to support Django 1.7 migrations, see also the add_introspection_rules
section at bottom of this file for South + earlier Django versions
"""
name, path, args, kwargs = super(
ExclusiveBooleanField, self).deconstruct()
if self._on_fields:
kwargs['on'] = self._on_fields
return name, path, args, kwargs | 0.004902 |
def __internal_union(self, root_a, root_b):
"""Internal function to join two set trees specified by root_a and root_b.
Assumes root_a and root_b are distinct.
"""
# Merge the trees, smaller to larger
update_rank = False
# --Determine the larger tree
rank_a = self.__forest[root_a]
rank_b = self.__forest[root_b]
if rank_a < rank_b:
larger = root_b
smaller = root_a
else:
larger = root_a
smaller = root_b
if rank_a == rank_b:
update_rank = True
# --Make the smaller tree a subtree of the larger tree
self.__forest[smaller] = larger
# --Update the rank of the new tree (if necessary)
if update_rank:
self.__forest[larger] -= 1 | 0.003641 |
def inv_std_norm_cdf(x):
"""
Inverse cumulative standard Gaussian distribution
Based on Winitzki, S. (2008)
"""
z = 2*x -1
ln1z2 = np.log(1-z**2)
a = 8*(np.pi -3)/(3*np.pi*(4-np.pi))
b = 2/(np.pi * a) + ln1z2/2
inv_erf = np.sign(z) * np.sqrt( np.sqrt(b**2 - ln1z2/a) - b )
return np.sqrt(2) * inv_erf | 0.014706 |
def _rm_get_reference_coords_from_header(parts):
"""
extract the reference (genomic sequence match) coordinates of a repeat
occurrence from a repeatmakser header line. An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
the genomic start and end are always at positions 5 and 6 resepctively. In
the repeatmasker format, the end is inclusive, but in pyokit end coordinates
are exclusive, so we adjust it when we parse here.
:param parts: the header line, as a tokenized list.
:return: tuple of (start, end)
"""
s = int(parts[5])
e = int(parts[6]) + 1
if (s >= e):
raise AlignmentIteratorError("invalid repeatmakser header: " +
" ".join(parts))
return (s, e) | 0.007792 |
def __read_frame(self):
"""*Attempt* to read a frame. If we get an EAGAIN on the frame header,
it'll raise to our caller. If we get it *after* we already got the
header, wait-out the rest of the frame.
"""
if self.__frame_header_cache is None:
_logger.debug("Reading frame header.")
(length, frame_type) = struct.unpack('!II', self.__read(8))
self.__frame_header_cache = (length, frame_type)
else:
(length, frame_type) = self.__frame_header_cache
try:
data = self.__read(length - 4)
except errno.EAGAIN:
self.__frame_header_cache = (length, frame_type)
raise
self.__frame_header_cache = None
self.__process_message(frame_type, data) | 0.004994 |
def set_definition_node(self, node, name):
"""Set definition by name."""
definition = self.get_definition(name)
if definition:
definition.node = node | 0.010811 |
async def traverse(self, func):
"""
Traverses an async function or generator, yielding each result.
This function is private. The class should be used as an iterator instead of using this method.
"""
# this allows the reference to be stolen
async_executor = self
if inspect.isasyncgenfunction(func):
async for result in func(*async_executor.args):
yield result
else:
yield await func(*async_executor.args) | 0.005859 |
def post(self, request):
"""
Save the user and profile, login and send the right signals.
"""
if request.user.is_authenticated():
return self.error_to_response(request, dict(
error=_("You are already logged in.")))
try:
user, profile, client = self.get_session_data(request)
except KeyError:
return self.error_to_response(request, dict(
error=_("A social profile is missing from your session.")))
form = self.get_form()(request.POST, request.FILES,
initial=self.get_initial_data(request, user, profile, client))
if not form.is_valid():
additional_context = self.get_context(request, user, profile, client)
return self.render_to_response(dict({'form': form}, **additional_context))
user, profile = form.save(request, user, profile, client)
user = profile.authenticate()
self.send_connect_signal(request, user, profile, client)
self.login(request, user)
self.send_login_signal(request, user, profile, client)
self.delete_session_data(request)
return HttpResponseRedirect(self.get_next(request)) | 0.010778 |
def rename(oldPath, newPath, **kwargs):
"""rename the file oldPath to newPath"""
import os
return os.rename(oldPath, newPath, **kwargs) | 0.006803 |
def read_request_from_str(data, **params):
"""
从字符串中读取请求头,并根据格式化字符串模板,进行字符串格式化
:param data:
:param params:
:return:
"""
method, uri = None, None
headers = {}
host = ''
try:
split_list = data.split('\n\n')
headers_text = split_list[0]
body = '\n\n'.join(split_list[1:])
except:
headers_text = data
body = ''
body = force_bytes(body)
for k, v in params.items():
body = body.replace(b'{%s}' % force_bytes(k), force_bytes(v))
header_list = headers_text.split('\n')
for i, line in enumerate(header_list):
line = line.strip()
if line.strip() == '':
continue
line = line.format(**params)
if i == 0:
# 至多3个
split_line = line.strip().split(' ')
method, uri, _ = split_line[0], ' '.join(split_line[1:-1]), split_line[-1]
else:
# 至多2个
header, value = line.split(':', 1)
header = header.strip()
value = value.strip()
headers[header] = value
if header.lower() == 'host':
host = value
return headers, method, uri, host, body | 0.002492 |
def lstat(path):
'''
.. versionadded:: 2014.1.0
Returns the lstat attributes for the given file or dir. Does not support
symbolic links.
CLI Example:
.. code-block:: bash
salt '*' file.lstat /path/to/file
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to file must be absolute.')
try:
lst = os.lstat(path)
return dict((key, getattr(lst, key)) for key in ('st_atime', 'st_ctime',
'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
except Exception:
return {} | 0.004808 |
def gumbel_softmax_discrete_bottleneck(x,
bottleneck_bits,
beta=0.25,
decay=0.999,
epsilon=1e-5,
temperature_warmup_steps=150000,
hard=False,
summary=True):
"""VQ-VAE using Gumbel-Softmax.
Different from `gumbel_softmax()` function as
this function calculates the KL by using the discrete entropy
instead of taking the argmax, and it also uses an exponential moving average
to update the codebook while the `gumbel_softmax()` function includes no
codebook update.
Args:
x: A `float`-like `Tensor` containing the latent vectors to be compared to
the codebook, whose squared difference is used as the Gumbel-Softmax
logits.
bottleneck_bits: An `int` that sets the size of the bottleneck in `log_2`.
beta: Beta factor for commitment loss (Default: 0.25).
decay: Decay factor for exponential moving average (Default: 0.999).
epsilon: Small value to avoid dividing by zero in EMA update
(Default: 1e-5).
temperature_warmup_steps: Number of steps it takes to decay temperature to 0
(Default: 150000).
hard: When `True`, we use hard Gumbel-Softmax samples and force
discrete latents by taking the argmax. When `False`, we use soft samples,
which we treat as codebook weights (Default: False).
summary: When `True`, we save histogram summaries of the KL term (Default:
True).
Returns:
x_means_assignments: A `float`-like `Tensor` containing the codebook
assignments. When `hard == True`, this is one-hot, containing the arg-max
of the Gumbel-Softmax samples (and we use the straightthrough gradient).
Otherwise, it contains the Gumbel-Softmax samples exactly, which are
values from the `(K-1)`-simplex where `K` is the bottleneck size.
loss: The loss, which is the sum of the KL between the Gumbel-Softmax and
the uniform prior and the commitment loss multiplied by the beta factor.
We approximate the KL by using the entropy of a categorical distribution
instead of the Gumbel Softmax.
"""
bottleneck_size = 2**bottleneck_bits
x_shape = common_layers.shape_list(x)
hidden_size = x_shape[-1]
means, ema_means, ema_count = get_vq_codebook(bottleneck_size, hidden_size)
x = tf.reshape(x, [-1, hidden_size])
bottleneck_size = common_layers.shape_list(means)[0]
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
class_probs = tf.nn.softmax(dist)
log_class_probs = tf.nn.log_softmax(dist)
gumbel_samples = gumbel_sample(common_layers.shape_list(dist))
steps = temperature_warmup_steps
gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5
temperature = 1.2 - common_layers.inverse_lin_decay(steps)
# 10% of the time keep reasonably high temperature to keep learning.
temperature = tf.cond(
tf.less(tf.random_uniform([]), 0.9), lambda: temperature,
lambda: tf.random_uniform([], minval=0.5, maxval=1.0))
gumbel_softmax_samples = tf.nn.softmax(
(log_class_probs + gumbel_samples) / temperature)
# Calculate KL between q and a uniform prior.
kl = tf.reduce_sum(
class_probs * (log_class_probs - tf.log(1.0 / bottleneck_size)), -1)
if summary:
tf.summary.histogram("KL", tf.reshape(kl, [-1]))
# Straight-through gradient estimation when we're using hard assignments.
if hard:
x_means_idx = tf.reshape(tf.argmax(gumbel_softmax_samples, axis=-1), [-1])
x_means_hot = tf.one_hot(x_means_idx, bottleneck_size)
x_means_assignments = gumbel_softmax_samples + tf.stop_gradient(
x_means_hot - gumbel_softmax_samples)
else:
x_means_assignments = gumbel_softmax_samples
x_means_assignments_flat = tf.reshape(x_means_assignments,
[-1, bottleneck_size])
x_means = tf.matmul(x_means_assignments_flat, means)
commitment_loss = tf.reduce_mean(
tf.squared_difference(x, tf.stop_gradient(x_means)))
# Update the ema variables.
updated_ema_count = moving_averages.assign_moving_average(
ema_count,
tf.reduce_sum(
tf.reshape(x_means_assignments, shape=[-1, bottleneck_size]), axis=0),
decay,
zero_debias=False)
dw = tf.matmul(x_means_assignments, x, transpose_a=True)
updated_ema_means = tf.identity(
moving_averages.assign_moving_average(
ema_means, dw, decay, zero_debias=False))
n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True)
updated_ema_count = (
(updated_ema_count + epsilon) / (n + bottleneck_size * epsilon) * n)
updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1)
with tf.control_dependencies([commitment_loss]):
update_means = means.assign(updated_ema_means)
with tf.control_dependencies([update_means]):
loss = beta * commitment_loss
# Add KL loss.
loss += tf.reduce_mean(kl)
x_means_assignments = tf.reshape(x_means_assignments,
x_shape[:-1] + [bottleneck_size])
return x_means_assignments, loss | 0.008296 |
def construct_cfgs(**kargs):
"""construct_cfgs
Performs actions to construct either the setup.cfg (rpm) or stdeb.cfg (deb)
files as per the operating system specified. This construction is done as
per the setup_requirements.txt file from within the working directory
specified.
This is a very tempermental function by design. It intentionally exits
with a non-zero if/when an error has occurred on its own. Therefore, it is
not suggested to use this function if you intend to get control back again.
"""
docker_dir = _check_args(**kargs)
if not docker_dir or not os.path.isdir(docker_dir):
print("Unable to determine the %s/%s combo under supported versioning"
% (kargs['operating_system'], kargs['version']))
exit_cleanly(error_number=errno.ENOSYS)
if kargs['operating_system'] == 'redhat':
_build_setup_cfg(kargs['working_directory'])
elif kargs['operating_system'] == 'ubuntu':
_build_stdeb_cfg(kargs['working_directory'])
else:
print("Unsure of what to do... operating_system(%s) is not recognized!"
% kargs['operating_system']) | 0.000883 |
def can_undo(self):
"""
Are there actions to undo?
"""
return bool(self._undo) or bool(self._open and self._open[0]) | 0.013514 |
def images(cam):
"""Extract images from input stream to jpg files.
Args:
cam: Input stream of raw rosbag messages.
Returns:
File instances for images of input stream.
"""
# Set output stream title and pull first message
yield marv.set_header(title=cam.topic)
# Fetch and process first 20 image messages
name_template = '%s-{}.jpg' % cam.topic.replace('/', ':')[1:]
while True:
idx, msg = yield marv.pull(cam, enumerate=True)
if msg is None or idx >= 20:
break
# Deserialize raw ros message
pytype = get_message_type(cam)
rosmsg = pytype()
rosmsg.deserialize(msg.data)
# Write image to jpeg and push it to output stream
img = imgmsg_to_cv2(rosmsg, "rgb8")
name = name_template.format(idx)
imgfile = yield marv.make_file(name)
cv2.imwrite(imgfile.path, img)
yield marv.push(imgfile) | 0.001058 |
def get_requirements():
"""
Returns the content of 'requirements.txt' in a list.
:return: The content of 'requirements.txt'.
:rtype: list(str)
"""
requirements = []
with open(
os.path.join(BASE_DIRECTORY, 'requirements.txt'),
'r',
encoding='utf-8'
) as requirements_file:
lines = requirements_file.readlines()
for line in lines:
requirements.append(line.strip())
return requirements | 0.002119 |
def set_data(self, data):
"Use this method to set the data for this blob"
if data is None:
self.data_size = 0
self.data = None
return
self.data_size = len(data)
# create a string buffer so that null bytes aren't interpreted
# as the end of the string
self.data = ctypes.cast(ctypes.create_string_buffer(data), ctypes.c_void_p) | 0.031609 |
def buy_holding_pnl(self):
"""
[float] 买方向当日持仓盈亏
"""
return (self.last_price - self.buy_avg_holding_price) * self.buy_quantity * self.contract_multiplier | 0.016216 |
def close(self: Any) -> None:
"""Close any files linked to this object
"""
if self._file_obj is not None:
self._file_obj.close()
self._file_obj = None | 0.010309 |
def make_discord_blueprint(
client_id=None,
client_secret=None,
scope=None,
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
session_class=None,
storage=None,
):
"""
Make a blueprint for authenticating with Discord using OAuth 2. This requires
a client ID and client secret from Discord. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`DISCORD_OAUTH_CLIENT_ID` and
:envvar:`DISCORD_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your application on Discord.
client_secret (str): The client secret for your application on Discord
scope (list, optional): list of scopes (str) for the OAuth token
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/discord``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/discord/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.consumer.requests.OAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app.
"""
scope = scope or ["identify"]
discord_bp = OAuth2ConsumerBlueprint(
"discord",
__name__,
client_id=client_id,
client_secret=client_secret,
scope=scope,
base_url="https://discordapp.com/",
token_url="https://discordapp.com/api/oauth2/token",
authorization_url="https://discordapp.com/api/oauth2/authorize",
redirect_url=redirect_url,
redirect_to=redirect_to,
login_url=login_url,
authorized_url=authorized_url,
session_class=session_class,
storage=storage,
)
discord_bp.from_config["client_id"] = "DISCORD_OAUTH_CLIENT_ID"
discord_bp.from_config["client_secret"] = "DISCORD_OAUTH_CLIENT_SECRET"
@discord_bp.before_app_request
def set_applocal_session():
ctx = stack.top
ctx.discord_oauth = discord_bp.session
return discord_bp | 0.001414 |
def create_api_v4_virtual_interface(self):
"""Get an instance of Api Virtual Interface services facade."""
return ApiV4VirtualInterface(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | 0.007605 |
def modified_environ(added=None, absent=()):
"""
Temporarily updates the os.environ dictionary in-place. Can be used as a
context manager or a decorator.
The os.environ dictionary is updated in-place so that the modification is
sure to work in all situations.
:param added: Dictionary of environment variables and values to set.
:param absent: List of environment variables to unset.
"""
env = os.environ
added = dict(added or {})
absent = tuple(absent)
in_env = partial(__filter, lambda i: i in env)
not_in_env = partial(__filter, lambda i: i not in env)
# List of environment variables being updated or removed.
stomped = in_env(chain(__keys(added), absent))
# Environment variables and values to restore on exit.
update_after = dict((a, env[a]) for a in stomped)
# Environment variables and values to remove on exit.
remove_after = tuple(not_in_env(__keys(added)))
def update(other):
return env.update(other)
def popper(item):
return env.pop(item, None)
def remove(items):
return tuple(__map(popper, items))
try:
update(added)
remove(absent)
yield
finally:
update(update_after)
remove(remove_after) | 0.000788 |
def exec_helper(self, cmd, builddir):
''' Execute the given command, returning an error message if an error occured
or None if the command was succesful.'''
try:
child = subprocess.Popen(cmd, cwd=builddir)
child.wait()
except OSError as e:
if e.errno == errno.ENOENT:
if cmd[0] == 'cmake':
return 'CMake is not installed, please follow the installation instructions at http://docs.yottabuild.org/#installing'
else:
return '%s is not installed' % (cmd[0])
else:
return 'command %s failed' % (cmd)
if child.returncode:
return 'command %s failed' % (cmd) | 0.005384 |
def add_url(self, name: str, pattern: str, application: Callable) -> None:
""" add url pattern dispatching to application"""
self.urlmapper.add(name, self.prefix + pattern)
self.register_app(name, application) | 0.008584 |
def init(name, runtime):
"""Create a new Django app."""
runtime = click.unstyle(runtime)
stdout.write(
style.format_command(
'Initializing',
'%s %s %s' % (name, style.gray('@'), style.green(runtime))
)
)
config = Config(os.getcwd())
config.set('runtime', runtime)
config.save()
generate.main(['init', name], standalone_mode=False)
run.main(['python', 'manage.py', 'migrate']) | 0.002203 |
def get_request(self, request):
"""Get a list of DownloadRequests for all data that are under the given field in the table of a Geopedia layer.
:return: list of items which have to be downloaded
:rtype: list(DownloadRequest)
"""
request.layer = self._parse_layer(request.layer, return_wms_name=True)
return super().get_request(request) | 0.007792 |
def xlink_href_target(self, node, group=None):
"""
Return either:
- a tuple (renderer, node) when the the xlink:href attribute targets
a vector file or node
- the path to an image file for any raster image targets
- None if any problem occurs
"""
xlink_href = node.attrib.get('{http://www.w3.org/1999/xlink}href')
if not xlink_href:
return None
# First handle any raster embedded image data
match = re.match(r"^data:image/(jpeg|png);base64", xlink_href)
if match:
img_format = match.groups()[0]
image_data = base64.decodestring(xlink_href[(match.span(0)[1] + 1):].encode('ascii'))
file_indicator, path = tempfile.mkstemp(suffix='.%s' % img_format)
with open(path, 'wb') as fh:
fh.write(image_data)
# Close temporary file (as opened by tempfile.mkstemp)
os.close(file_indicator)
# this needs to be removed later, not here...
# if exists(path): os.remove(path)
return path
# From here, we can assume this is a path.
if '#' in xlink_href:
iri, fragment = xlink_href.split('#', 1)
else:
iri, fragment = xlink_href, None
if iri:
# Only local relative paths are supported yet
if not isinstance(self.source_path, str):
logger.error(
"Unable to resolve image path '%s' as the SVG source is not a file system path." % iri
)
return None
path = os.path.normpath(os.path.join(os.path.dirname(self.source_path), iri))
if not os.access(path, os.R_OK):
return None
if path == self.source_path:
# Self-referencing, ignore the IRI part
iri = None
if iri:
if path.endswith('.svg'):
if path in self._parent_chain:
logger.error("Circular reference detected in file.")
raise CircularRefError()
if path not in self._external_svgs:
self._external_svgs[path] = ExternalSVG(path, self)
ext_svg = self._external_svgs[path]
if ext_svg.root_node is not None:
if fragment:
ext_frag = ext_svg.get_fragment(fragment)
if ext_frag is not None:
return ext_svg.renderer, ext_frag
else:
return ext_svg.renderer, ext_svg.root_node
else:
# A raster image path
try:
# This will catch invalid images
PDFImage(path, 0, 0)
except IOError:
logger.error("Unable to read the image %s. Skipping..." % path)
return None
return path
elif fragment:
# A pointer to an internal definition
if fragment in self.definitions:
return self, self.definitions[fragment]
else:
# The missing definition should appear later in the file
self.waiting_use_nodes[fragment].append((node, group))
return DELAYED | 0.002072 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.