text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""
Based on:
http://stackoverflow.com/questions/7861196/check-if-a-geopoint-with-latitude-and-longitude-is-within-a-shapefile
"""
from osgeo import ogr
import sys
DATAPATH = '/home/kedo/Dropbox/CS 194-16/neighborhoods/'
drv = ogr.GetDriverByName('ESRI Shapefile') #We will load a shape file
#Get the contents of the shape file
ds_in = drv.Open(DATAPATH + 'sfneighborhoods.shp')
lyr_in = ds_in.GetLayer(0) #Get the shape file's first layer
#Put the title of the field you are interested in here
idx_reg = lyr_in.GetLayerDefn().GetFieldIndex('NAME')
#If the latitude/longitude we're going to use is not in the projection
#of the shapefile, then we will get erroneous results.
#The following assumes that the latitude longitude is in WGS84
#This is identified by the number "4236", as in "EPSG:4326"
#We will create a transformation between this and the shapefile's
#project, whatever it may be
geo_ref = lyr_in.GetSpatialRef()
point_ref=ogr.osr.SpatialReference()
point_ref.ImportFromEPSG(4269)
ctran=ogr.osr.CoordinateTransformation(point_ref, geo_ref)
def get_neighborhood(lon=0.0, lat=0.0):
#Transform incoming longitude/latitude to the shapefile's projection
[lon,lat,z]=ctran.TransformPoint(lon,lat)
#Create a point
pt = ogr.Geometry(ogr.wkbPoint)
pt.SetPoint_2D(0, lon, lat)
#Set up a spatial filter such that the only features we see when we
#loop through "lyr_in" are those which overlap the point defined above
lyr_in.SetSpatialFilter(pt)
#Loop through the overlapped features and display the field of interest
for feat_in in lyr_in:
# the first feature is what we want
return feat_in.GetFieldAsString(idx_reg)
# Our neighborhoods don't cover all the area of SF
# (ie: Golden Gate Park isn't a neighborhood)
# so we sometimes return None
return None
| {
"content_hash": "a6d92ffbabbfea8fb3884fe6c43ad694",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 112,
"avg_line_length": 38.354166666666664,
"alnum_prop": 0.7409016838674634,
"repo_name": "kennydo/datascience-hanzai-keisu",
"id": "6411030c195a4105c26b71f6c202fc587cec8032",
"size": "1841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/neighborhood.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5880"
}
],
"symlink_target": ""
} |
"""Loss for a Joint Multimodal Variational Auto-Encoder (JMVAE).
Closely follows the implementation in the following paper:
Joint Multimodal Learning with Deep Generative Models
Masahiro Suzuki, Kotaro Nakayama, Yutaka Matsuo
ArXiv: https://arxiv.org/abs/1611.01891
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import tensorflow as tf
from joint_vae import utils
from joint_vae import loss
# pylint: disable=not-callable
# TODO(hmoraldo): share more code between the three Loss subclasses.
class JmvaeLoss(loss.Loss):
r"""A class for the loss of a Joint Multimodal Variational Autoencoder.
The objective to optimize is the following:
Given modalities x and y, we want to learn a joint probabilistic model of
some inputs, and marginalizing over latents 'z'. This code describes a
variational approach to the problem, using inference networks.
The overall objective called (JMVAE-kl), from Suzuki et.al. for networks
q(z| x, y), q(z| x) and q(z| y) is as follows:
\max E_{z~q(z| x, y}} [\log p(y| z) + \log p(x| z) - log q(z| x, y)/p(z)\
- \alpha * (\log q(z| x, y)/q(z| x) + \log q(z| x, y)/q(z|y)
"""
def __init__(
self,
encoders,
decoders,
prior,
alpha_x=1.0,
alpha_y=1.0,
jmvae_alpha=0.1,
joint_distribution_index=0, # TODO(hmoraldo): should be list.
name='jmvae',
mode='train',
add_summary=True):
"""Initialises the components of the module.
JMVAEs are built from a set of three objects described below.
Args:
encoders: an Encoders object.
decoders: a Decoders object.
prior: callable returning `p(z|v)`.
alphas: list of scaling factors, for the likelihood corresponding to each
encoder / decoder.
jmvae_alpha: scalar; scaling factor for KL divergence in elbo.
joint_distribution_index: index of the encoder to use for sampling, and
to treat specially as joint distribution when computing the loss.
name: name of the Jmvae.
mode: 'train', 'val', or 'test'.
add_summary: Whether to spawn summary ops that record the value of
each term in the loss.
"""
alphas = [alpha_x, alpha_y]
super(JmvaeLoss, self).__init__(encoders, decoders, prior, alphas, name,
mode, add_summary)
self.jmvae_alpha = jmvae_alpha
self._cache = {}
self.joint_distribution_index = joint_distribution_index
def log_pdf_elbo(self, inputs, v=None):
"""Construct the jmvae kl objective function.
Args:
inputs: List of input observations, each a `Tensor` of size `[B, ...]`.
v: Placeholder.
Returns:
jmave_kl_objective: Tensor of size [1], returns the objective value for
the evidence lower bound augmented with KL divergence that jmvae-kl
computes.
"""
log_ps, kl_ps = self._log_pdf_elbo_components(inputs, v)
log_ps_no_joint = [
p for i, p in enumerate(log_ps) if i != self.joint_distribution_index
]
kl_ps_no_joint = [
kl for i, kl in enumerate(kl_ps) if i != self.joint_distribution_index
]
# Build evidence lower bound.
elbo = tf.reduce_mean(
tf.reduce_sum(log_ps_no_joint, axis=0) -
kl_ps[self.joint_distribution_index])
kl = tf.reduce_mean(tf.reduce_sum(kl_ps_no_joint, axis=0))
jmvae_kl_objective = elbo - self.jmvae_alpha * kl
if self._add_summary:
utils.scalar_summary_with_scope('elbo', elbo, self._mode)
utils.scalar_summary_with_scope('kl', kl, self._mode)
return jmvae_kl_objective
# TODO(vrama): Clean this, handle mask argument better.
def build_nelbo_loss(self, inputs, mask=None, v=None):
"""Construct the final loss to train the JMVAE.
Args:
inputs: List of input observations, `Tensor` of size `[B, ...]``.
mask: Placeholder, does not do anything.
v: Placeholder, does not do anything.
Returns:
loss: [1] `Tensor`, loss to train the JMVAE.
"""
if mask is not None:
logging.warn('Masking is not implemented for JMVAE.')
elbo = self.log_pdf_elbo(inputs)
loss_tensor = -elbo
if self._add_summary:
utils.scalar_summary_with_scope('NELBO', loss_tensor, self._mode)
return loss_tensor
def _log_pdf_elbo_components(self, inputs, v=None):
"""Calculates a components for the ELBO and for JMVAE-kl objectives.
Args:
inputs: List of input observations, each a `Tensor` of size `[B, ...]`.
v: The covariate to condition the inference over, e.g. labels.
Returns:
log_ps: List of [B] `Tensor`, each representing log p(x | z).
kls: List of [B] `Tensor`, each representing a KL divergence.
"""
with tf.name_scope('{}_log_pdf_elbo'.format(self.scope_name)):
# Calculate sampling KL and keep z around.
kls, z = self._kl_and_z(inputs, v)
# Evaluate log_p.
predictions = self._decoders.predict(z, v)
log_ps = [
alpha * utils.compute_likelihood(p, inpt)
for alpha, p, inpt in zip(self._alphas, predictions, inputs)
]
return log_ps, kls
def _kl_and_z(self, inputs, v=None):
"""Returns analytical or sampled KL divergence and a sample.
This will return the analytical KL divergence if one is available (as
registered with `kullback_leibler.RegisterKL`), and if this is not available
then it will return a sampled KL divergence (in this case the returned
sample is the one used for the KL divergence).
Args:
inputs: List of input observations, each a `Tensor` of size `[B, ...]`.
v: The covariate to condition over, e.g. labels.
Returns:
Pair `(kl, z)`, where `kl` is a list of KL divergences (each a `Tensor`
with shape `[B]`, where `B` is the batch size), and `z` is the sample
from the latent space used to compute it.
"""
prior = self._prior(v)
latents = self._encoders.infer_latent(inputs, v)
# Always sample from the specified distribution to compute expectation.
z = latents[self.joint_distribution_index].density.sample()
try:
q_joint = latents[self.joint_distribution_index].density
kls_q_p = [
tf.contrib.distributions.kl_divergence(
q_joint, (prior
if i == self.joint_distribution_index else q.density))
for i, q, in enumerate(latents)
]
except NotImplementedError:
logging.warn('Analytic KLD not available, using sampling KLD instead.')
log_p_z = prior.log_prob(z, name='log_p_z')
q_joint = latents[self.joint_distribution_index].density.log_prob(
z, name='log_q_z_%s' % i)
kls_q_p = [
q_joint - (log_p_z if i == self.joint_distribution_index else
q.density.log_prob(z, name='log_qxy_z_%s' % i))
for i, q in enumerate(latents)
]
# Reduce over all dimension except batch. Assumes all kls have same shape.
sum_axis = range(1, kls_q_p[0].get_shape().ndims)
kl = [tf.reduce_sum(k, sum_axis, name='kl_q_p_%s' % i) for k in kls_q_p]
return kl, z
| {
"content_hash": "7f031879a6bb0d935a439b7c18c1acc7",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 80,
"avg_line_length": 35.805,
"alnum_prop": 0.6427873202066751,
"repo_name": "google/joint_vae",
"id": "e246c5cfbf427e9b93b4b1331802e7f568f154b5",
"size": "7740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "joint_vae/jmvae_loss.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "117462"
},
{
"name": "OpenEdge ABL",
"bytes": "3969555"
},
{
"name": "Python",
"bytes": "497142"
},
{
"name": "Shell",
"bytes": "44681"
}
],
"symlink_target": ""
} |
"""Bio.SeqIO support for the "phd" file format.
PHD files are output by PHRED and used by PHRAP and CONSED.
You are expected to use this module via the Bio.SeqIO functions, under the
format name "phd". See also the underlying Bio.Sequencing.Phd module.
For example, using Bio.SeqIO we can read in one of the example PHRED files
from the Biopython unit tests:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Phd/phd1", "phd"):
... print(record.id)
... print("%s..." % record.seq[:10])
... print("%s..." % record.letter_annotations["phred_quality"][:10])
34_222_(80-A03-19).b.ab1
ctccgtcgga...
[9, 9, 10, 19, 22, 37, 28, 28, 24, 22]...
425_103_(81-A03-19).g.ab1
cgggatccca...
[14, 17, 22, 10, 10, 10, 15, 8, 8, 9]...
425_7_(71-A03-19).b.ab1
acataaatca...
[10, 10, 10, 10, 8, 8, 6, 6, 6, 6]...
Since PHRED files contain quality scores, you can save them as FASTQ or as
QUAL files, for example using Bio.SeqIO.write(...), or simply with the format
method of the SeqRecord object:
>>> print(record[:50].format("fastq"))
@425_7_(71-A03-19).b.ab1
acataaatcaaattactnaccaacacacaaaccngtctcgcgtagtggag
+
++++))'''')(''')$!$''')''''(+.''$!$))))+)))'''''''
<BLANKLINE>
Or,
>>> print(record[:50].format("qual"))
>425_7_(71-A03-19).b.ab1
10 10 10 10 8 8 6 6 6 6 8 7 6 6 6 8 3 0 3 6 6 6 8 6 6 6 6 7
10 13 6 6 3 0 3 8 8 8 8 10 8 8 8 6 6 6 6 6 6 6
<BLANKLINE>
Note these examples only show the first 50 bases to keep the output short.
"""
from __future__ import print_function
from Bio.SeqRecord import SeqRecord
from Bio.Sequencing import Phd
from Bio.SeqIO.Interfaces import SequentialSequenceWriter
from Bio.SeqIO import QualityIO
def PhdIterator(handle):
"""Returns SeqRecord objects from a PHD file.
This uses the Bio.Sequencing.Phd module to do the hard work.
"""
phd_records = Phd.parse(handle)
for phd_record in phd_records:
# Convert the PHY record into a SeqRecord...
# The "filename" can contain spaces, e.g. 'HWI-EAS94_4_1_1_602_99 1'
# from unit test example file phd_solexa.
# This will cause problems if used as the record identifier
# (e.g. output for FASTQ format).
name = phd_record.file_name.split(None, 1)[0]
seq_record = SeqRecord(phd_record.seq,
id=name, name=name,
description=phd_record.file_name)
# Just re-use the comments dictionary as the SeqRecord's annotations
seq_record.annotations = phd_record.comments
# And store the qualities and peak locations as per-letter-annotation
seq_record.letter_annotations["phred_quality"] = \
[int(site[1]) for site in phd_record.sites]
try:
seq_record.letter_annotations["peak_location"] = \
[int(site[2]) for site in phd_record.sites]
except IndexError:
# peak locations are not always there according to
# David Gordon (the Consed author)
pass
yield seq_record
# All done
class PhdWriter(SequentialSequenceWriter):
"""Class to write Phd format files"""
def __init__(self, handle):
SequentialSequenceWriter.__init__(self, handle)
def write_record(self, record):
"""Write a single Phd record to the file."""
assert record.seq, "No sequence present in SeqRecord"
# This method returns the 'phred_quality' scores or converted
# 'solexa_quality' scores if present, else raises a value error
phred_qualities = QualityIO._get_phred_quality(record)
peak_locations = record.letter_annotations.get("peak_location")
assert len(record.seq) == len(phred_qualities), "Number of " + \
"phd quality scores does not match length of sequence"
if peak_locations:
assert len(record.seq) == len(peak_locations), "Number " + \
"of peak location scores does not match length of sequence"
if None in phred_qualities:
raise ValueError("A quality value of None was found")
if record.description.startswith("%s " % record.id):
title = record.description
else:
title = "%s %s" % (record.id, record.description)
self.handle.write("BEGIN_SEQUENCE %s\nBEGIN_COMMENT\n"
% self.clean(title))
for annot in [k.lower() for k in Phd.CKEYWORDS]:
value = None
if annot == "trim":
if record.annotations.get("trim"):
value = "%s %s %.4f" % record.annotations["trim"]
elif annot == "trace_peak_area_ratio":
if record.annotations.get("trace_peak_area_ratio"):
value = "%.4f" % record.annotations[
"trace_peak_area_ratio"]
else:
value = record.annotations.get(annot)
if value or value == 0:
self.handle.write("%s: %s\n" % (annot.upper(), value))
self.handle.write("END_COMMENT\nBEGIN_DNA\n")
for i, site in enumerate(record.seq):
if peak_locations:
self.handle.write("%s %i %i\n" % (
site,
round(phred_qualities[i]),
peak_locations[i])
)
else:
self.handle.write("%s %i\n" % (
site,
round(phred_qualities[i]))
)
self.handle.write("END_DNA\nEND_SEQUENCE\n")
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| {
"content_hash": "021bf0f64746a5683a6fe95d70838ab8",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 77,
"avg_line_length": 38.69387755102041,
"alnum_prop": 0.5831575246132208,
"repo_name": "zjuchenyuan/BioWeb",
"id": "92771e21adf864cbd360b80aaa1f9b25259e7c12",
"size": "5984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/Bio/SeqIO/PhdIO.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "22925"
},
{
"name": "Batchfile",
"bytes": "143"
},
{
"name": "C",
"bytes": "414849"
},
{
"name": "CSS",
"bytes": "84526"
},
{
"name": "HTML",
"bytes": "6119"
},
{
"name": "Perl",
"bytes": "11818"
},
{
"name": "Python",
"bytes": "6614790"
}
],
"symlink_target": ""
} |
"""
Module containing useful exceptions for pepperstack
"""
class PepperstackException(Exception):
"""
Base class for all pepperstack exception to make
catch-all easier
"""
pass
class DoesNotExistsException(PepperstackException):
"""
An exception class raised when a model is not found
"""
pass
class DuplicateException(PepperstackException):
"""
An exception class raised when trying to create duplicate
entries in database
"""
pass
class CommandException(PepperstackException):
"""
An exception class used to handle command errors
"""
pass
| {
"content_hash": "8261d213a7bda30f2419b995c2c855f1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 61,
"avg_line_length": 17.36111111111111,
"alnum_prop": 0.6896,
"repo_name": "Korrigan/pepperstack",
"id": "ae5a130506b5c1e325896d1fdc391b9b74bd8080",
"size": "625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pepperstack/utils/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34396"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
import sys
from hashlib import sha1
from pants.build_graph.build_graph import sort_targets
from pants.build_graph.target import Target
from pants.invalidation.build_invalidator import BuildInvalidator, CacheKeyGenerator
from pants.util.dirutil import relative_symlink, safe_delete, safe_mkdir, safe_rmtree
class VersionedTargetSet(object):
"""Represents a list of targets, a corresponding CacheKey, and a flag determining whether the
list of targets is currently valid.
When invalidating a single target, this can be used to represent that target as a singleton.
When checking the artifact cache, this can also be used to represent a list of targets that are
built together into a single artifact.
"""
class IllegalResultsDir(Exception):
"""Indicate a problem interacting with a versioned target results directory."""
@staticmethod
def from_versioned_targets(versioned_targets):
"""
:API: public
"""
first_target = versioned_targets[0]
cache_manager = first_target._cache_manager
# Quick sanity check; all the versioned targets should have the same cache manager.
# TODO(ryan): the way VersionedTargets store their own links to a single CacheManager instance
# feels hacky; see if there's a cleaner way for callers to handle awareness of the CacheManager.
for versioned_target in versioned_targets:
if versioned_target._cache_manager != cache_manager:
raise ValueError("Attempting to combine versioned targets {} and {} with different"
" CacheManager instances: {} and {}".format(first_target, versioned_target,
cache_manager,
versioned_target._cache_manager))
return VersionedTargetSet(cache_manager, versioned_targets)
def __init__(self, cache_manager, versioned_targets):
self._cache_manager = cache_manager
self.versioned_targets = versioned_targets
self.targets = [vt.target for vt in versioned_targets]
# The following line is a no-op if cache_key was set in the VersionedTarget __init__ method.
self.cache_key = CacheKeyGenerator.combine_cache_keys([vt.cache_key
for vt in versioned_targets])
# NB: previous_cache_key may be None on the first build of a target.
self.previous_cache_key = cache_manager.previous_key(self.cache_key)
self.valid = self.previous_cache_key == self.cache_key
if cache_manager.invalidation_report:
cache_manager.invalidation_report.add_vts(cache_manager, self.targets, self.cache_key,
self.valid, phase='init')
self._results_dir = None
self._current_results_dir = None
self._previous_results_dir = None
# True if the results_dir for this VT was created incrementally via clone of the
# previous results_dir.
self.is_incremental = False
def update(self):
self._cache_manager.update(self)
def force_invalidate(self):
# Note: This method isn't exposted as Public because the api is not yet
# finalized, however it is currently used by Square for plugins. There is
# an open OSS issue to finalize this API. Please take care when changing
# until https://github.com/pantsbuild/pants/issues/2532 is resolved.
self._cache_manager.force_invalidate(self)
@property
def has_results_dir(self):
return self._results_dir is not None
@property
def has_previous_results_dir(self):
return self._previous_results_dir is not None and os.path.isdir(self._previous_results_dir)
@property
def results_dir(self):
"""The directory that stores results for these targets.
The results_dir is represented by a stable symlink to the current_results_dir: consumers
should generally prefer to access the stable directory.
"""
if self._results_dir is None:
raise ValueError('No results_dir was created for {}'.format(self))
return self._results_dir
@property
def current_results_dir(self):
"""A unique directory that stores results for this version of these targets.
"""
if self._current_results_dir is None:
raise ValueError('No results_dir was created for {}'.format(self))
return self._current_results_dir
@property
def previous_results_dir(self):
"""The directory that stores results for the previous version of these targets.
Only valid if is_incremental is true.
TODO: Exposing old results is a bit of an abstraction leak, because ill-behaved Tasks could
mutate them.
"""
if not self.has_previous_results_dir:
raise ValueError('There is no previous_results_dir for: {}'.format(self))
return self._previous_results_dir
def ensure_legal(self):
"""Return True as long as the state does not break any internal contracts."""
# Do our best to provide complete feedback, it's easy to imagine the frustration of flipping between error states.
if self._results_dir:
errors = ''
if not os.path.islink(self._results_dir):
errors += '\nThe results_dir is no longer a symlink:\n\t* {}'.format(self._results_dir)
if not os.path.isdir(self._current_results_dir):
errors += '\nThe current_results_dir directory was not found\n\t* {}'.format(self._current_results_dir)
if errors:
raise self.IllegalResultsDir(
'\nThe results_dirs state should not be manually cleaned or recreated by tasks.\n{}'.format(errors)
)
return True
def live_dirs(self):
"""Yields directories that must exist for this VersionedTarget to function."""
# The only caller of this function is the workdir cleaning pipeline. It is not clear that the previous_results_dir
# should be returned for that purpose. And, by the time this is called, the contents have already been copied.
if self.has_results_dir:
yield self.results_dir
yield self.current_results_dir
if self.has_previous_results_dir:
yield self.previous_results_dir
def __repr__(self):
return 'VTS({}, {})'.format(','.join(target.address.spec for target in self.targets),
'valid' if self.valid else 'invalid')
class VersionedTarget(VersionedTargetSet):
"""This class represents a singleton VersionedTargetSet, and has links to VersionedTargets that
the wrapped target depends on (after having resolved through any "alias" targets.
:API: public
"""
def __init__(self, cache_manager, target, cache_key):
"""
:API: public
"""
if not isinstance(target, Target):
raise ValueError("The target {} must be an instance of Target but is not.".format(target.id))
self.target = target
self.cache_key = cache_key
# Must come after the assignments above, as they are used in the parent's __init__.
super(VersionedTarget, self).__init__(cache_manager, [self])
self.id = target.id
def create_results_dir(self):
"""Ensure that the empty results directory and a stable symlink exist for these versioned targets."""
self._current_results_dir = self._cache_manager.results_dir_path(self.cache_key, stable=False)
self._results_dir = self._cache_manager.results_dir_path(self.cache_key, stable=True)
if not self.valid:
# Clean the workspace for invalid vts.
safe_mkdir(self._current_results_dir, clean=True)
relative_symlink(self._current_results_dir, self._results_dir)
self.ensure_legal()
def copy_previous_results(self, root_dir):
"""Use the latest valid results_dir as the starting contents of the current results_dir.
Should be called after the cache is checked, since previous_results are not useful if there is a cached artifact.
"""
# TODO(mateo): An immediate followup removes the root_dir param, it is identical to the task.workdir.
# TODO(mateo): This should probably be managed by the task, which manages the rest of the incremental support.
if not self.previous_cache_key:
return None
previous_path = self._cache_manager.results_dir_path(self.previous_cache_key, stable=False)
if os.path.isdir(previous_path):
self.is_incremental = True
safe_rmtree(self._current_results_dir)
shutil.copytree(previous_path, self._current_results_dir)
safe_mkdir(self._current_results_dir)
relative_symlink(self._current_results_dir, self.results_dir)
# Set the self._previous last, so that it is only True after the copy completed.
self._previous_results_dir = previous_path
def __repr__(self):
return 'VT({}, {})'.format(self.target.id, 'valid' if self.valid else 'invalid')
class InvalidationCheck(object):
"""The result of calling check() on a CacheManager.
Each member is a list of VersionedTargetSet objects. Sorting of the targets depends
on how you order the InvalidationCheck from the InvalidationCacheManager.
Tasks may need to perform no, some or all operations on either of these, depending on how they
are implemented.
"""
def __init__(self, all_vts, invalid_vts):
"""
:API: public
"""
# All the targets, valid and invalid.
self.all_vts = all_vts
# Just the invalid targets.
self.invalid_vts = invalid_vts
class InvalidationCacheManager(object):
"""Manages cache checks, updates and invalidation keeping track of basic change
and invalidation statistics.
Note that this is distinct from the ArtifactCache concept, and should probably be renamed.
"""
class CacheValidationError(Exception):
"""Indicates a problem accessing the cache."""
_STABLE_DIR_NAME = 'current'
def __init__(self,
results_dir_root,
cache_key_generator,
build_invalidator_dir,
invalidate_dependents,
fingerprint_strategy=None,
invalidation_report=None,
task_name=None,
task_version=None,
artifact_write_callback=lambda _: None):
"""
:API: public
"""
self._cache_key_generator = cache_key_generator
self._task_name = task_name or 'UNKNOWN'
self._task_version = task_version or 'Unknown_0'
self._invalidate_dependents = invalidate_dependents
self._invalidator = BuildInvalidator(build_invalidator_dir)
self._fingerprint_strategy = fingerprint_strategy
self._artifact_write_callback = artifact_write_callback
self.invalidation_report = invalidation_report
# Create the task-versioned prefix of the results dir, and a stable symlink to it (useful when debugging).
self._results_dir_prefix = os.path.join(results_dir_root, sha1(self._task_version).hexdigest()[:12])
safe_mkdir(self._results_dir_prefix)
stable_prefix = os.path.join(results_dir_root, self._STABLE_DIR_NAME)
safe_delete(stable_prefix)
relative_symlink(self._results_dir_prefix, stable_prefix)
def update(self, vts):
"""Mark a changed or invalidated VersionedTargetSet as successfully processed."""
for vt in vts.versioned_targets:
vt.ensure_legal()
if not vt.valid:
self._invalidator.update(vt.cache_key)
vt.valid = True
self._artifact_write_callback(vt)
if not vts.valid:
vts.ensure_legal()
self._invalidator.update(vts.cache_key)
vts.valid = True
self._artifact_write_callback(vts)
def force_invalidate(self, vts):
"""Force invalidation of a VersionedTargetSet."""
for vt in vts.versioned_targets:
self._invalidator.force_invalidate(vt.cache_key)
vt.valid = False
self._invalidator.force_invalidate(vts.cache_key)
vts.valid = False
def check(self,
targets,
topological_order=False):
"""Checks whether each of the targets has changed and invalidates it if so.
Returns a list of VersionedTargetSet objects (either valid or invalid). The returned sets
'cover' the input targets, with one caveat: if the FingerprintStrategy
opted out of fingerprinting a target because it doesn't contribute to invalidation, then that
target will be excluded from all_vts and invalid_vts.
Callers can inspect these vts and rebuild the invalid ones, for example.
"""
all_vts = self.wrap_targets(targets, topological_order=topological_order)
invalid_vts = filter(lambda vt: not vt.valid, all_vts)
return InvalidationCheck(all_vts, invalid_vts)
@property
def task_name(self):
return self._task_name
def results_dir_path(self, key, stable):
"""Return a results directory path for the given key.
:param key: A CacheKey to generate an id for.
:param stable: True to use a stable subdirectory, false to use a portion of the cache key to
generate a path unique to the key.
"""
# TODO: Shorten cache_key hashes in general?
return os.path.join(
self._results_dir_prefix,
key.id,
self._STABLE_DIR_NAME if stable else sha1(key.hash).hexdigest()[:12]
)
def wrap_targets(self, targets, topological_order=False):
"""Wrap targets and their computed cache keys in VersionedTargets.
If the FingerprintStrategy opted out of providing a fingerprint for a target, that target will not
have an associated VersionedTarget returned.
Returns a list of VersionedTargets, each representing one input target.
"""
def vt_iter():
if topological_order:
target_set = set(targets)
sorted_targets = [t for t in reversed(sort_targets(targets)) if t in target_set]
else:
sorted_targets = sorted(targets)
for target in sorted_targets:
target_key = self._key_for(target)
if target_key is not None:
yield VersionedTarget(self, target, target_key)
return list(vt_iter())
def previous_key(self, cache_key):
return self._invalidator.previous_key(cache_key)
def _key_for(self, target):
try:
return self._cache_key_generator.key_for_target(target,
transitive=self._invalidate_dependents,
fingerprint_strategy=self._fingerprint_strategy)
except Exception as e:
# This is a catch-all for problems we haven't caught up with and given a better diagnostic.
# TODO(Eric Ayers): If you see this exception, add a fix to catch the problem earlier.
exc_info = sys.exc_info()
new_exception = self.CacheValidationError("Problem validating target {} in {}: {}"
.format(target.id, target.address.spec_path, e))
raise self.CacheValidationError, new_exception, exc_info[2]
| {
"content_hash": "871b649683c427b82e3ac7d3585e5055",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 118,
"avg_line_length": 41.73314606741573,
"alnum_prop": 0.6835834959951538,
"repo_name": "peiyuwang/pants",
"id": "508a42ad47789eb6da6c427c18cce1c10d128d1b",
"size": "15004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/invalidation/cache_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "78744"
},
{
"name": "Java",
"bytes": "463179"
},
{
"name": "JavaScript",
"bytes": "30784"
},
{
"name": "Protocol Buffer",
"bytes": "4749"
},
{
"name": "Python",
"bytes": "5586816"
},
{
"name": "Rust",
"bytes": "168825"
},
{
"name": "Scala",
"bytes": "79707"
},
{
"name": "Shell",
"bytes": "64292"
},
{
"name": "Thrift",
"bytes": "2183"
}
],
"symlink_target": ""
} |
from .base import * # noqa
DEBUG = True
TEMPLATE_DEBUG = True
| {
"content_hash": "1c57e7f47ae6e6b939a5eef93b76f1f7",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 27,
"avg_line_length": 16,
"alnum_prop": 0.6875,
"repo_name": "uppsaladatavetare/foobar-api",
"id": "57c40671a7d08c11d15766e3ce246c21dee7a5a0",
"size": "64",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/foobar/settings/dev.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3317"
},
{
"name": "HTML",
"bytes": "10880"
},
{
"name": "JavaScript",
"bytes": "10604"
},
{
"name": "Makefile",
"bytes": "796"
},
{
"name": "Python",
"bytes": "318730"
}
],
"symlink_target": ""
} |
from django.contrib import admin
import models
from django.utils.html import format_html
# Register your models here.
class ReporteProductoAdmin(admin.ModelAdmin):
list_display = ['id_reporte', 'nombre', 'inicio', 'fin', 'accion_reporte']
search_fields = ['id ', 'nombre', 'inicio', 'fin']
list_display_links = ('id_reporte',)
def id_reporte(self, obj):
i = 0
men = ''
while i < 10 - len(str(obj.pk)):
men = men + '0'
i = i+1
# end ford
return '%s%d' % (men, obj.pk)
# end def
class Media:
js = ('/static/reporte/js/jquery-3.1.1.js', '/static/reporte/js/reporte.js',)
# end class
def accion_reporte(self, obj):
return format_html("<a href='{0}' class='generar addlink'>Imprimir</a>", obj.id)
# end def
id_reporte.allow_tags = True
id_reporte.short_description = 'Reporte Id'
accion_reporte.allow_tags = True
accion_reporte.short_description = 'Generar'
# end class
admin.site.register(models.ReporteProducto, ReporteProductoAdmin)
| {
"content_hash": "f372f013b57d1a38391ed1e27e81f228",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 88,
"avg_line_length": 30.62857142857143,
"alnum_prop": 0.6147388059701493,
"repo_name": "vpadillar/pventa",
"id": "7c7b47c6dd4ad8a161cd0e34114f5d29d0d09204",
"size": "1072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reporte/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "491"
},
{
"name": "CSS",
"bytes": "87140"
},
{
"name": "Groff",
"bytes": "76"
},
{
"name": "HTML",
"bytes": "47212"
},
{
"name": "JavaScript",
"bytes": "177804"
},
{
"name": "Python",
"bytes": "201594"
},
{
"name": "SQLPL",
"bytes": "1006"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class MoonTrackerConfig(AppConfig):
name = 'moon_tracker'
verbose_name = 'Moon Scan Tracker'
def ready(self):
import moon_tracker.signals
| {
"content_hash": "f19a273dde1d78c691c2c263af6f6a6c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 38,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.7025641025641025,
"repo_name": "StephenSwat/eve_lunar_mining_organiser",
"id": "a75f09e146974ab23fee68da000f7acffa71a4c3",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elmo/moon_tracker/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6345"
},
{
"name": "HTML",
"bytes": "24410"
},
{
"name": "Python",
"bytes": "49457"
}
],
"symlink_target": ""
} |
import sitefile
class Channel(object):
def __init__(self, xml, config):
if "site" in xml.attrib:
self.basic = True
self.site = xml.attrib['site']
self.site_id = xml.attrib['site_id']
self.update = xml.attrib['update'] # TODO: Something useful with this
if self.site not in config.sites:
config.sites[self.site] = sitefile.Site(self.site, config)
else:
self.basic = False
self.offset = xml.attrib['offset']
self.same_as = xml.attrib['same_as']
self.xmltvid = xml.attrib['xmltv_id']
self.name = xml.text
| {
"content_hash": "9676fc3e51770a4c2d76b1e98af9a2c0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 82,
"avg_line_length": 36.27777777777778,
"alnum_prop": 0.552833078101072,
"repo_name": "cyberjacob/pygrab",
"id": "80f9fa3388b13f3b1794b58573979b8510df1775",
"size": "676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "channel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19527"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
import weakref
from .base import SubsystemBase
from ..dispatch import Signal
from ..results import ResultsDictionary
__all__ = ['PeerList']
_log = logging.getLogger(__name__)
class PeerList(SubsystemBase):
def __init__(self, core):
self.core = weakref.ref(core)
self._results = ResultsDictionary()
core.register('peerlist', self._handle_subsystem, self._handle_error)
self.onadd = Signal()
self.ondrop = Signal()
def list(self):
socket = self.core().socket
result = next(self._results)
socket.send_vip(b'', b'peerlist', [b'list'], result.ident)
return result
__call__ = list
def _handle_subsystem(self, message):
try:
op = bytes(message.args[0])
except IndexError:
_log.error('missing peerlist subsystem operation')
return
if op in [b'add', b'drop']:
try:
peer = bytes(message.args[1])
except IndexError:
_log.error('missing peerlist identity in %s operation', op)
return
getattr(self, 'on' + op).send(self, peer=peer)
elif op == b'listing':
try:
result = self._results.pop(bytes(message.id))
except KeyError:
return
result.set([bytes(arg) for arg in message.args[1:]])
else:
_log.error('unknown peerlist subsystem operation')
def _handle_error(self, sender, message, error, **kwargs):
try:
result = self._results.pop(bytes(message.id))
except KeyError:
return
result.set_exception(error)
| {
"content_hash": "e88177aa31841562071b11bc7d8f6b48",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 77,
"avg_line_length": 28.916666666666668,
"alnum_prop": 0.5694524495677233,
"repo_name": "schandrika/volttron",
"id": "df33fc820f836a2f133077510d0d6f39c950305f",
"size": "4636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "volttron/platform/vip/agent/subsystems/peerlist.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "33023"
},
{
"name": "HTML",
"bytes": "61489"
},
{
"name": "JavaScript",
"bytes": "497583"
},
{
"name": "Python",
"bytes": "3090478"
},
{
"name": "Shell",
"bytes": "41093"
}
],
"symlink_target": ""
} |
from jsonmodels.models import Base
from .channel import Channel
class Device(Base):
""" Contains info about a device and it's channels. """
def __init__(self, **kwargs):
"""
Initializes a Device object by looping through the
keywords in kwargs and setting them as attributes.
:param kwargs: Dictionary containing a device.
"""
for keyword in ["id", "naturalId", "name",
"thingId", "channels", "signatures"]:
if keyword == "channels" and kwargs[keyword] is not None:
kwargs[keyword] = [Channel(**channel_info)
for channel_info in kwargs[keyword]]
setattr(self, keyword, kwargs[keyword])
| {
"content_hash": "e911946ce224ffc06a1f7af2135c032d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 39.1578947368421,
"alnum_prop": 0.5806451612903226,
"repo_name": "keerts/pyninjasphere",
"id": "4aab4aec66dcbdcbda6ed1d64c795c8a60461e88",
"size": "744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyninjasphere/logic/device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32469"
}
],
"symlink_target": ""
} |
def convert_to_list_of_dicts(header_row, data_rows):
return [dict(zip(header_row, data_row)) for data_row in data_rows]
def convert_to_table_structure(list_of_dicts):
header_row = sorted(list_of_dicts[0].keys())
data_rows = []
for dictionary in list_of_dicts:
data_row = []
for key in header_row:
data_row.append(dictionary.get(key))
data_rows.append(data_row)
return header_row, data_rows
def split_dict(data):
headers = []
values = []
for key, value in data.items():
headers.append(key)
values.append(value)
return headers, values
| {
"content_hash": "bd0458af0db981e09faa0cbabbff897a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 70,
"avg_line_length": 22.535714285714285,
"alnum_prop": 0.6196513470681458,
"repo_name": "spothero/py-responsys",
"id": "fab44b942fd671a0887a8d44ea9469c9509850ec",
"size": "631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "responsys_client/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25913"
}
],
"symlink_target": ""
} |
from enable.savage.svg.svg_extras import *
| {
"content_hash": "9bb1aabee64870c3ae0adfe0e4cf66e6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 42,
"avg_line_length": 43,
"alnum_prop": 0.7906976744186046,
"repo_name": "enthought/etsproxy",
"id": "860d5f1b59b72a173b024147a416dc1bc2746d1f",
"size": "58",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/savage/svg/svg_extras.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
from starthinker.util.google_api import API_DCM
from starthinker.util.cm import get_profile_for_api
def bulkdozer_test(config, task):
print('testing bulkdozer')
if 'verify' in task['traffic']:
is_admin, profile_id = get_profile_for_api(
config, task['auth'], task['traffic']['account_id'])
for entity in task['traffic']['verify']:
service = getattr(
API_DCM(config, task['auth'], internal=is_admin), entity['type'])
cm_entity = service().get(profileId=profile_id, id=entity['id']).execute()
values = entity['values']
for key in values:
if values[key] != cm_entity[key]:
raise ValueError('%s %s expected to be %s, was %s' % (entity['type'], key, values[key], cm_entity[key]))
| {
"content_hash": "cd4a0386d16bbcbf24c018680b887a21",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 114,
"avg_line_length": 35.857142857142854,
"alnum_prop": 0.6347941567065073,
"repo_name": "google/starthinker",
"id": "e8c7ac0877242387e133884e44c492ad242412d4",
"size": "1495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "starthinker/task/traffic/check.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "89775"
},
{
"name": "Jupyter Notebook",
"bytes": "1088964"
},
{
"name": "Python",
"bytes": "2356647"
},
{
"name": "Shell",
"bytes": "89492"
}
],
"symlink_target": ""
} |
"""
Show vector field flow
"""
from __future__ import division
from vispy import app, scene, visuals, gloo
from vispy.util import ptime
import numpy as np
class VectorFieldVisual(visuals.Visual):
vertex = """
uniform sampler2D field;
attribute vec2 index;
uniform vec2 shape;
uniform vec2 field_shape;
uniform float spacing;
varying float dist; // distance along path for this vertex
varying vec2 ij;
uniform sampler2D offset;
uniform float seg_len;
uniform int n_iter; // iterations to integrate along field per vertex
uniform vec2 attractor;
varying vec4 base_color;
uniform sampler2D color;
void main() {
// distance along one line
dist = index.y * seg_len;
vec2 local;
ij = vec2(mod(index.x, shape.x), floor(index.x / shape.x));
// *off* is a random offset to the starting location, which prevents
// the appearance of combs in the field
vec2 off = texture2D(offset, ij / shape).xy - 0.5;
local = spacing * (ij + off);
vec2 uv;
vec2 dir;
vec2 da;
int index_y = int(index.y);
for( int i=0; i<index.y; i+=1 ) {
for ( int j=0; j<n_iter; j += 1 ) {
uv = local / field_shape;
dir = texture2D(field, uv).xy;
// add influence of variable attractor (mouse)
da = attractor - local;
float al = 0.1 * length(da);
da /= 0.5 * (1 + al*al);
dir += da;
// maybe pick a more accurate integration method?
local += seg_len * dir / n_iter;
}
}
base_color = texture2D(color, uv);
gl_Position = $transform(vec4(local, 0, 1));
}
"""
fragment = """
uniform float time;
uniform float speed;
varying float dist;
varying vec2 ij;
uniform sampler2D offset;
uniform vec2 shape;
uniform float nseg;
uniform float seg_len;
varying vec4 base_color;
void main() {
float totlen = nseg * seg_len;
float phase = texture2D(offset, ij / shape).b;
float alpha;
// vary alpha along the length of the line to give the appearance of
// motion
alpha = mod((dist / totlen) + phase - time * speed, 1);
// add a cosine envelope to fade in and out smoothly at the ends
alpha *= (1 - cos(2 * 3.141592 * dist / totlen)) * 0.5;
gl_FragColor = vec4(base_color.rgb, base_color.a * alpha);
}
"""
def __init__(self, field, spacing=10, segments=3, seg_len=0.5,
color=(1, 1, 1, 0.3)):
self._time = 0.0
self._last_time = ptime.time()
rows = int(field.shape[0] / spacing)
cols = int(field.shape[1] / spacing)
index = np.empty((rows * cols, int(segments) * 2, 2), dtype=np.float32)
# encodes starting position within vector field
index[:, :, 0] = np.arange(rows * cols)[:, np.newaxis]
# encodes distance along length of line
index[:, ::2, 1] = np.arange(segments)[np.newaxis, :]
index[:, 1::2, 1] = np.arange(segments)[np.newaxis, :] + 1
self._index = gloo.VertexBuffer(index)
if not isinstance(color, np.ndarray):
color = np.array([[list(color)]], dtype='float32')
self._color = gloo.Texture2D(color)
offset = np.random.uniform(256, size=(rows, cols, 3)).astype(np.ubyte)
self._offset = gloo.Texture2D(offset, format='rgb')
self._field = gloo.Texture2D(field, format='rg',
internalformat='rg32f',
interpolation='linear')
self._field_shape = field.shape[:2]
visuals.Visual.__init__(self, vcode=self.vertex, fcode=self.fragment)
self.timer = app.Timer(interval='auto', connect=self.update_time,
start=False)
self.freeze()
self.shared_program['field'] = self._field
self.shared_program['field_shape'] = self._field.shape[:2]
self.shared_program['shape'] = (rows, cols)
self.shared_program['index'] = self._index
self.shared_program['spacing'] = spacing
self.shared_program['t'] = self._time
self.shared_program['offset'] = self._offset
self.shared_program['speed'] = 1
self.shared_program['color'] = self._color
self.shared_program['seg_len'] = seg_len
self.shared_program['nseg'] = segments
self.shared_program['n_iter'] = 1
self.shared_program['attractor'] = (0, 0)
self.shared_program['time'] = 0
self._draw_mode = 'lines'
self.set_gl_state('translucent', depth_test=False)
self.timer.start()
def _prepare_transforms(self, view):
view.view_program.vert['transform'] = view.get_transform()
def _prepare_draw(self, view):
pass
def _compute_bounds(self, axis, view):
if axis > 1:
return (0, 0)
return (0, self._field_shape[axis])
def update_time(self, ev):
t = ptime.time()
self._time += t - self._last_time
self._last_time = t
self.shared_program['time'] = self._time
self.update()
VectorField = scene.visuals.create_visual_node(VectorFieldVisual)
def fn(y, x):
dx = x-50
dy = y-30
hyp = (dx**2 + dy**2)**0.5 + 0.01
return np.array([100 * dy / hyp**1.7, -100 * dx / hyp**1.8])
field = np.fromfunction(fn, (100, 100)).transpose(1, 2, 0).astype('float32')
field[..., 0] += 10 * np.cos(np.linspace(0, 2 * 3.1415, 100))
color = np.zeros((100, 100, 4), dtype='float32')
color[..., :2] = (field + 5) / 10.
color[..., 2] = 0.5
color[..., 3] = 0.5
canvas = scene.SceneCanvas(keys='interactive', show=True)
view = canvas.central_widget.add_view(camera='panzoom')
vfield = VectorField(field[..., :2], spacing=0.5, segments=30, seg_len=0.05,
parent=view.scene, color=color)
view.camera.set_range()
@canvas.connect
def on_mouse_move(event):
if 3 in event.buttons:
tr = canvas.scene.node_transform(vfield)
vfield.shared_program['attractor'] = tr.map(event.pos)[:2]
if __name__ == '__main__':
app.run()
| {
"content_hash": "270fdba0e73c20b7fe23255cdec26c71",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 79,
"avg_line_length": 33.442708333333336,
"alnum_prop": 0.5530291231895343,
"repo_name": "Eric89GXL/vispy",
"id": "471e56885940a820587c2418ebdcecff6a96018c",
"size": "6741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/demo/scene/flow_lines.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "195460"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1638"
},
{
"name": "PowerShell",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "2461885"
}
],
"symlink_target": ""
} |
from pymongo import Connection
connection = Connection('localhost', 27017)
db = connection.gobblerbot
| {
"content_hash": "fc7e73314bac5151e938d4f6b902cece",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 43,
"avg_line_length": 20.8,
"alnum_prop": 0.7980769230769231,
"repo_name": "alxlit/gobblerbot",
"id": "eb6dcaa58bc2d807dd5ca2e2bb13aecdb960e031",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4197"
},
{
"name": "Python",
"bytes": "15675"
}
],
"symlink_target": ""
} |
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import threading
import weakref
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_module
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
py_all = all
py_sum = sum
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
_GRAPH = None
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
_SESSION = threading.local()
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = weakref.WeakKeyDictionary()
# _DUMMY_EAGER_GRAPH is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES. We use a dummy class instead of something like a
# string because strings are not weakly-referencable.
class _DummyEagerGraph(object):
pass
_DUMMY_EAGER_GRAPH = _DummyEagerGraph()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# The type of float to use throughout a session.
_FLOATX = 'float32'
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 1e-7
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = 'channels_last'
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# This dictionary holds a mapping between a graph and variables to initialize
# in the graph.
_GRAPH_VARIABLES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping between a graph and TF optimizers created in
# the graph.
_GRAPH_TF_OPTIMIZERS = weakref.WeakKeyDictionary()
@tf_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@tf_export('keras.backend.epsilon')
def epsilon():
"""Returns the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
```python
>>> keras.backend.epsilon()
1e-07
```
"""
return _EPSILON
@tf_export('keras.backend.set_epsilon')
def set_epsilon(value):
"""Sets the value of the fuzz factor used in numeric expressions.
Arguments:
value: float. New value of epsilon.
Example:
```python
>>> from keras import backend as K
>>> K.epsilon()
1e-07
>>> K.set_epsilon(1e-05)
>>> K.epsilon()
1e-05
```
"""
global _EPSILON
_EPSILON = value
@tf_export('keras.backend.floatx')
def floatx():
"""Returns the default float type, as a string.
E.g. 'float16', 'float32', 'float64'.
Returns:
String, the current default float type.
Example:
```python
>>> keras.backend.floatx()
'float32'
```
"""
return _FLOATX
@tf_export('keras.backend.set_floatx')
def set_floatx(value):
"""Sets the default float type.
Arguments:
value: String; 'float16', 'float32', or 'float64'.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> K.set_floatx('float16')
>>> K.floatx()
'float16'
```
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
if value not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(value))
_FLOATX = str(value)
@tf_export('keras.backend.cast_to_floatx')
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array.
Returns:
The same Numpy array, cast to its new type.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=_FLOATX)
@tf_export('keras.backend.image_data_format')
def image_data_format():
"""Returns the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`
Example:
```python
>>> keras.backend.image_data_format()
'channels_first'
```
"""
return _IMAGE_DATA_FORMAT
@tf_export('keras.backend.set_image_data_format')
def set_image_data_format(data_format):
"""Sets the value of the image data format convention.
Arguments:
data_format: string. `'channels_first'` or `'channels_last'`.
Example:
```python
>>> from keras import backend as K
>>> K.image_data_format()
'channels_first'
>>> K.set_image_data_format('channels_last')
>>> K.image_data_format()
'channels_last'
```
Raises:
ValueError: In case of invalid `data_format` value.
"""
global _IMAGE_DATA_FORMAT
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('Unknown data_format: ' + str(data_format))
_IMAGE_DATA_FORMAT = str(data_format)
# A global dictionary mapping graph objects to an index of counters used
# for various layer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_LAYER_NAME_UIDS = weakref.WeakKeyDictionary()
@tf_export('keras.backend.get_uid')
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
```
>>> get_uid('dense')
1
>>> get_uid('dense')
2
```
"""
graph = get_graph()
if graph not in PER_GRAPH_LAYER_NAME_UIDS:
PER_GRAPH_LAYER_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@tf_export('keras.backend.reset_uids')
def reset_uids():
"""Resets graph identifiers.
"""
per_graph_layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS
keys = list(per_graph_layer_name_uids.keys())
for key in keys:
del per_graph_layer_name_uids[key]
@tf_export('keras.backend.clear_session')
def clear_session():
"""Destroys the current TF graph and creates a new one.
Useful to avoid clutter from old models / layers.
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned
global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned
ops.reset_default_graph()
reset_uids()
_SESSION.session = None
graph = get_graph()
with graph.as_default():
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[graph] = phase
_GRAPH_VARIABLES.pop(graph, None)
_GRAPH_TF_OPTIMIZERS.pop(graph, None)
@tf_export('keras.backend.manual_variable_initialization')
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@tf_export('keras.backend.learning_phase')
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
if context.executing_eagerly():
if _DUMMY_EAGER_GRAPH not in _GRAPH_LEARNING_PHASES:
# Fallback to inference mode as default.
return 0
return _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
return symbolic_learning_phase()
def symbolic_learning_phase():
graph = get_graph()
with graph.as_default():
if graph not in _GRAPH_LEARNING_PHASES:
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
@tf_export('keras.backend.set_learning_phase')
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
else:
_GRAPH_LEARNING_PHASES[get_graph()] = value
@tf_contextlib.contextmanager
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the scope.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
The provided value.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
previous_value = learning_phase()
try:
set_learning_phase(value)
yield value
finally:
# Restore learning phase to initial value.
with ops.init_scope():
if context.executing_eagerly():
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_value
else:
_GRAPH_LEARNING_PHASES[get_graph()] = previous_value
def _get_session():
"""Returns the session object for the current thread."""
global _SESSION
default_session = ops.get_default_session()
if default_session is not None:
session = default_session
else:
if getattr(_SESSION, 'session', None) is None:
_SESSION.session = session_module.Session(
config=get_default_session_config())
session = _SESSION.session
return session
@tf_export(v1=['keras.backend.get_session'])
def get_session():
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Returns:
A TensorFlow session.
"""
session = _get_session()
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
def get_graph():
if context.executing_eagerly():
global _GRAPH
if _GRAPH is None:
_GRAPH = func_graph.FuncGraph('keras_graph')
return _GRAPH
else:
return ops.get_default_graph()
@tf_export('keras.backend.set_session')
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION.session = session
def get_default_session_config():
if not os.environ.get('OMP_NUM_THREADS'):
config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
num_thread = int(os.environ.get('OMP_NUM_THREADS'))
config = config_pb2.ConfigProto(
intra_op_parallelism_threads=num_thread, allow_soft_placement=True)
return config
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
self.device = device
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
return op.device
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor(x, dtype=dtype)
@tf_export('keras.backend.is_sparse')
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
"""
return isinstance(tensor, sparse_tensor.SparseTensor)
@tf_export('keras.backend.to_dense')
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
name_scope = ops.name_scope
@tf_export('keras.backend.variable')
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
```python
>>> import numpy as np
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> kvar.eval()
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
return v
v = resource_variable_ops.ResourceVariable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'shape'):
v._keras_shape = int_shape(value)
track_variable(v)
return v
def track_tf_optimizer(tf_optimizer):
"""Tracks the given TF optimizer for initialization of its variables."""
if context.executing_eagerly():
return
graph = get_graph()
optimizers = _GRAPH_TF_OPTIMIZERS.setdefault(graph, weakref.WeakSet())
optimizers.add(tf_optimizer)
def track_variable(v):
"""Tracks the given variable for initialization."""
if context.executing_eagerly():
return
graph = v.graph if hasattr(v, 'graph') else get_graph()
if graph not in _GRAPH_VARIABLES:
_GRAPH_VARIABLES[graph] = weakref.WeakSet()
_GRAPH_VARIABLES[graph].add(v)
def _get_variables(graph=None):
"""Returns variables corresponding to the given graph for initialization."""
assert not context.executing_eagerly()
variables = _GRAPH_VARIABLES.setdefault(graph, weakref.WeakSet())
for opt in _GRAPH_TF_OPTIMIZERS.get(graph, set()):
variables.update(opt.optimizer.variables())
return variables
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = _get_variables(get_graph())
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@tf_export('keras.backend.constant')
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
```python
>>> import tensorflow as tf
>>> import numpy
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.placeholder('float32', shape=(1,1))
>>> K.is_keras_tensor(k_var) # A variable indirectly created outside of
keras is not a Keras tensor.
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable created with the keras
backend is not a Keras tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras
tensor.
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a
Keras tensor.
True
```
"""
if not isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
@tf_export('keras.backend.placeholder')
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
Raises:
ValueError: If called with eager execution.
Returns:
Tensor instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
"""
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
with get_graph().as_default():
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
return x.op.type == 'Placeholder'
except AttributeError:
return False
@tf_export('keras.backend.shape')
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
```python
# TensorFlow example
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> input = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(input)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(input).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
"""
return array_ops.shape(x)
@tf_export('keras.backend.int_shape')
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@tf_export('keras.backend.ndim')
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(input)
3
>>> K.ndim(kvar)
2
```
"""
dims = x.shape._dims
if dims is not None:
return len(dims)
return None
@tf_export('keras.backend.dtype')
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32_ref'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32_ref'
```
"""
return x.dtype.base_dtype.name
@tf_export('keras.backend.eval')
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
return get_value(to_dense(x))
@tf_export('keras.backend.zeros')
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable
dtype: String, data type of returned Keras variable
name: String, name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.zeros((3,4))
>>> K.eval(kvar)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@tf_export('keras.backend.ones')
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@tf_export('keras.backend.eye')
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@tf_export('keras.backend.zeros_like')
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with zeros.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_zeros = K.zeros_like(kvar)
>>> K.eval(kvar_zeros)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@tf_export('keras.backend.ones_like')
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@tf_export('keras.backend.random_uniform_variable')
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@tf_export('keras.backend.random_normal_variable')
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@tf_export('keras.backend.count_params')
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return np.prod(x.shape.as_list())
@tf_export('keras.backend.cast')
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Example:
```python
>>> from keras import backend as K
>>> input = K.placeholder((2, 3), dtype='float32')
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# It doesn't work in-place as below.
>>> K.cast(input, dtype='float16')
<tf.Tensor 'Cast_1:0' shape=(2, 3) dtype=float16>
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# you need to assign it.
>>> input = K.cast(input, dtype='float16')
>>> input
<tf.Tensor 'Cast_2:0' shape=(2, 3) dtype=float16>
```
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@tf_export('keras.backend.update')
def update(x, new_x):
return state_ops.assign(x, new_x)
@tf_export('keras.backend.update_add')
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@tf_export('keras.backend.update_sub')
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@tf_export('keras.backend.moving_average_update')
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
Arguments:
x: A Variable.
value: A tensor with the same shape as `variable`.
momentum: The moving average momentum.
Returns:
An Operation to update the variable.
"""
# `training` is higher-up than the Keras backend in the abstraction hierarchy.
# In particular, `training` depends on layers, and thus on Keras.
# moving_averages, being low-level ops, should not be part of the training
# module.
from tensorflow.python.training import moving_averages # pylint: disable=g-import-not-at-top
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=True)
# LINEAR ALGEBRA
@tf_export('keras.backend.dot')
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@tf_export('keras.backend.batch_dot')
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: list of (or single) int with target dimensions.
The lengths of `axes[0]` and `axes[1]` should be the same.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`
`batch_dot(x, y, axes=1) = [[17, 53]]` which is the main diagonal
of `x.dot(y.T)`, although we never have to calculate the off-diagonal
elements.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
"""
if isinstance(axes, int):
axes = (axes, axes)
x_ndim = ndim(x)
y_ndim = ndim(y)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [x_ndim - 1, y_ndim - 2]
if x_ndim > y_ndim:
diff = x_ndim - y_ndim
y = array_ops.reshape(y,
array_ops.concat(
[array_ops.shape(y), [1] * (diff)], axis=0))
elif y_ndim > x_ndim:
diff = y_ndim - x_ndim
x = array_ops.reshape(x,
array_ops.concat(
[array_ops.shape(x), [1] * (diff)], axis=0))
else:
diff = 0
if ndim(x) == 2 and ndim(y) == 2:
if axes[0] == axes[1]:
out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0])
else:
out = math_ops.reduce_sum(
math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1])
else:
adj_x = None if axes[0] == ndim(x) - 1 else True
adj_y = True if axes[1] == ndim(y) - 1 else None
out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
if diff:
if x_ndim > y_ndim:
idx = x_ndim + y_ndim - 3
else:
idx = x_ndim - 1
out = array_ops.squeeze(out, list(range(idx, idx + diff)))
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
@tf_export('keras.backend.transpose')
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> input = K.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(input)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
"""
return array_ops.transpose(x)
@tf_export('keras.backend.gather')
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@tf_export('keras.backend.max')
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@tf_export('keras.backend.min')
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with miminum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@tf_export('keras.backend.sum')
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@tf_export('keras.backend.prod')
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@tf_export('keras.backend.var')
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
@tf_export('keras.backend.std')
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the standard deviation.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the standard deviation of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
@tf_export('keras.backend.mean')
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@tf_export('keras.backend.any')
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@tf_export('keras.backend.all')
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@tf_export('keras.backend.argmax')
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@tf_export('keras.backend.argmin')
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@tf_export('keras.backend.square')
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@tf_export('keras.backend.abs')
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@tf_export('keras.backend.sqrt')
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _to_tensor(0., x.dtype.base_dtype)
inf = _to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
@tf_export('keras.backend.exp')
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@tf_export('keras.backend.log')
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@tf_export('keras.backend.round')
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@tf_export('keras.backend.sign')
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@tf_export('keras.backend.pow')
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@tf_export('keras.backend.clip')
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float or integer.
max_value: Python float or integer.
Returns:
A tensor.
"""
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _to_tensor(min_value, x.dtype.base_dtype)
max_value = _to_tensor(max_value, x.dtype.base_dtype)
return clip_ops.clip_by_value(x, min_value, max_value)
@tf_export('keras.backend.equal')
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@tf_export('keras.backend.not_equal')
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@tf_export('keras.backend.greater')
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@tf_export('keras.backend.greater_equal')
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@tf_export('keras.backend.less')
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@tf_export('keras.backend.less_equal')
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@tf_export('keras.backend.maximum')
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.maximum(x, y)
@tf_export('keras.backend.minimum')
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@tf_export('keras.backend.sin')
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@tf_export('keras.backend.cos')
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@tf_export('keras.backend.normalize_batch_in_training')
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@tf_export('keras.backend.batch_normalization')
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
Returns:
A tensor.
"""
if ndim(x) == 4:
# The CPU implementation of `fused_batch_norm` only supports NHWC
if axis == 1 or axis == -3:
tf_data_format = 'NCHW'
elif axis == 3 or axis == -1:
tf_data_format = 'NHWC'
else:
tf_data_format = None
if (tf_data_format == 'NHWC' or
tf_data_format == 'NCHW' and _has_nchw_support()):
# The mean / var / beta / gamma tensors may be broadcasted
# so they may have extra axes of size 1, which should be squeezed.
if ndim(mean) > 1:
mean = array_ops.reshape(mean, [-1])
if ndim(var) > 1:
var = array_ops.reshape(var, [-1])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) > 1:
beta = array_ops.reshape(beta, [-1])
if gamma is None:
gamma = ones_like(mean)
elif ndim(gamma) > 1:
gamma = array_ops.reshape(gamma, [-1])
y, _, _ = nn.fused_batch_norm(
x,
gamma,
beta,
epsilon=epsilon,
mean=mean,
variance=var,
data_format=tf_data_format,
is_training=False
)
return y
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@tf_export('keras.backend.concatenate')
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@tf_export('keras.backend.reshape')
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
"""
return array_ops.reshape(x, shape)
@tf_export('keras.backend.permute_dimensions')
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
"""
return array_ops.transpose(x, perm=pattern)
@tf_export('keras.backend.resize_images')
def resize_images(x, height_factor, width_factor, data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
ValueError: in case of incorrect value for
`data_format` or `interpolation`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
elif data_format == 'channels_last':
rows, cols = 1, 2
else:
raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[rows:cols + 1]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = image_ops.resize_nearest_neighbor(x, new_shape)
elif interpolation == 'bilinear':
x = image_ops.resize_bilinear(x, new_shape)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
if original_shape[rows] is None:
new_height = None
else:
new_height = original_shape[rows] * height_factor
if original_shape[cols] is None:
new_width = None
else:
new_width = original_shape[cols] * width_factor
if data_format == 'channels_first':
output_shape = (None, None, new_height, new_width)
else:
output_shape = (None, new_height, new_width, None)
x.set_shape(output_shape)
return x
@tf_export('keras.backend.resize_volumes')
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@tf_export('keras.backend.repeat_elements')
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
"""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@tf_export('keras.backend.repeat')
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
@tf_export('keras.backend.arange')
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
@tf_export('keras.backend.tile')
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@tf_export('keras.backend.flatten')
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
"""
return array_ops.reshape(x, [-1])
@tf_export('keras.backend.batch_flatten')
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
@tf_export('keras.backend.expand_dims')
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@tf_export('keras.backend.squeeze')
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@tf_export('keras.backend.temporal_padding')
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@tf_export('keras.backend.spatial_2d_padding')
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@tf_export('keras.backend.spatial_3d_padding')
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@tf_export('keras.backend.stack')
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
"""
return array_ops.stack(x, axis=axis)
@tf_export('keras.backend.one_hot')
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@tf_export('keras.backend.reverse')
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
@tf_export('keras.backend.get_value')
def get_value(x):
"""Returns the value of a variable.
Arguments:
x: input variable.
Returns:
A Numpy array.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return x.numpy()
elif ops.inside_function():
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
return x.eval(session=get_session())
@tf_export('keras.backend.batch_get_value')
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
elif ops.inside_function(): # pylint: disable=protected-access
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
if tensors:
return get_session().run(tensors)
else:
return []
@tf_export('keras.backend.set_value')
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
Arguments:
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
if ops.executing_eagerly_outside_functions():
x.assign(value)
else:
with get_graph().as_default():
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@tf_export('keras.backend.batch_set_value')
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if ops.executing_eagerly_outside_functions():
for x, value in tuples:
x.assign(np.asarray(value, dtype=dtype(x)))
else:
with get_graph().as_default():
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype,
shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
@tf_export('keras.backend.print_tensor')
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
```python
>>> x = K.print_tensor(x, message="x is: ")
```
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
return logging_ops.Print(x, [x], message)
# GRAPH MANIPULATION
class GraphExecutionFunction(object):
"""Runs a computation graph.
It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
In particular additional operations via `fetches` argument and additional
tensor substitutions via `feed_dict` arguments. Note that given
substitutions are merged with substitutions from `inputs`. Even though
`feed_dict` is passed once in the constructor (called in `model.compile()`)
we can modify the values in the dictionary. Through this feed_dict we can
provide additional substitutions besides Keras inputs.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Arguments to `tf.Session.run()`:
`fetches`, `feed_dict`, `options`, `run_metadata`.
"""
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(inputs, (list, tuple)):
raise TypeError('`inputs` to a Keras backend function '
'should be a list or tuple.')
if not isinstance(outputs, (list, tuple)):
raise TypeError('`outputs` of a Keras backend function '
'should be a list or tuple.')
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
self.inputs = list(inputs)
self.outputs = list(outputs)
with ops.control_dependencies(self.outputs):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
# additional tensor substitutions
self.feed_dict = session_kwargs.pop('feed_dict', None)
# additional operations
self.fetches = session_kwargs.pop('fetches', [])
if not isinstance(self.fetches, list):
self.fetches = [self.fetches]
self.run_options = session_kwargs.pop('options', None)
self.run_metadata = session_kwargs.pop('run_metadata', None)
# The main use case of `fetches` being passed to a model is the ability
# to run custom updates
# This requires us to wrap fetches in `identity` ops.
self.fetches = [array_ops.identity(x) for x in self.fetches]
self.session_kwargs = session_kwargs
# This mapping keeps track of the function that should receive the
# output from a fetch in `fetches`: { fetch: function(fetch_output) }
# A Callback can use this to register a function with access to the
# output values for a fetch it added.
self.fetch_callbacks = dict()
if session_kwargs:
raise ValueError('Some keys in session_kwargs are not supported at this '
'time: %s' % (session_kwargs.keys(),))
self._callable_fn = None
self._feed_arrays = None
self._feed_symbols = None
self._symbol_vals = None
self._fetches = None
self._session = None
def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
"""Generates a callable that runs the graph.
Arguments:
feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
feed_symbols: List of input tensors to be fed symbolic tensors at runtime.
symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
session: Session to use to generate the callable.
Returns:
Function that runs the graph according to the above options.
"""
# Prepare callable options.
callable_opts = config_pb2.CallableOptions()
# Handle external-data feed.
for x in feed_arrays:
callable_opts.feed.append(x.name)
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
callable_opts.feed.append(key.name)
# Handle symbolic feed.
for x, y in zip(feed_symbols, symbol_vals):
connection = callable_opts.tensor_connection.add()
if x.dtype != y.dtype:
y = math_ops.cast(y, dtype=x.dtype)
from_tensor = ops._as_graph_element(y)
if from_tensor is None:
from_tensor = y
connection.from_tensor = from_tensor.name # Data tensor
connection.to_tensor = x.name # Placeholder
# Handle fetches.
for x in self.outputs + self.fetches:
callable_opts.fetch.append(x.name)
# Handle updates.
callable_opts.target.append(self.updates_op.name)
# Handle run_options.
if self.run_options:
callable_opts.run_options.CopyFrom(self.run_options)
# Create callable.
callable_fn = session._make_callable_from_options(callable_opts)
# Cache parameters corresponding to the generated callable, so that
# we can detect future mismatches and refresh the callable.
self._callable_fn = callable_fn
self._feed_arrays = feed_arrays
self._feed_symbols = feed_symbols
self._symbol_vals = symbol_vals
self._fetches = list(self.fetches)
self._session = session
def _call_fetch_callbacks(self, fetches_output):
for fetch, output in zip(self._fetches, fetches_output):
if fetch in self.fetch_callbacks:
self.fetch_callbacks[fetch](output)
def __call__(self, inputs):
if not isinstance(inputs, (list, tuple)):
raise TypeError('`inputs` should be a list or tuple.')
session = get_session()
feed_arrays = []
array_vals = []
feed_symbols = []
symbol_vals = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
continue
if is_sparse(tensor):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1),
np.expand_dims(sparse_coo.col, 1)), 1)
value = (indices, sparse_coo.data, sparse_coo.shape)
if tensor_util.is_tensor(value):
# Case: feeding symbolic tensor.
feed_symbols.append(tensor)
symbol_vals.append(value)
else:
# Case: feeding Numpy array.
feed_arrays.append(tensor)
# We need to do array conversion and type casting at this level, since
# `callable_fn` only supports exact matches.
tensor_type = dtypes_module.as_dtype(tensor.dtype)
array_vals.append(np.asarray(value,
dtype=tensor_type.as_numpy_dtype))
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
array_vals.append(
np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))
# Refresh callable if anything has changed.
if (self._callable_fn is None or feed_arrays != self._feed_arrays or
symbol_vals != self._symbol_vals or
feed_symbols != self._feed_symbols or self.fetches != self._fetches or
session != self._session):
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
fetched = self._callable_fn(*array_vals,
run_metadata=self.run_metadata)
self._call_fetch_callbacks(fetched[-len(self._fetches):])
return fetched[:len(self.outputs)]
class EagerExecutionFunction(object):
"""Helper class for constructing a TF graph function from the Keras graph.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Unsupported.
"""
def __init__(self, inputs, outputs, updates=None, name=None):
updates = updates or []
if not isinstance(inputs, (list, tuple)):
raise TypeError('`inputs` to a Keras backend function '
'should be a list or tuple.')
if not isinstance(outputs, (list, tuple)):
raise TypeError('`outputs` of a Keras backend function '
'should be a list or tuple.')
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
self.inputs = list(inputs)
self.outputs = list(outputs)
self.name = name
graph = get_graph()
# Consolidate updates
with graph.as_default():
with ops.control_dependencies(self.outputs):
# In general, updates should be run after the outputs have been
# computed. However, we can only ensure this when we create
# the updates here (i.e. when updates are passed as tuples).
# We cannot modify the control dependencies of preexisting update ops.
updates_ops = []
for update in updates:
# For legacy reasons it is allowed to pass an update as a tuple
# `(variable, new_value)` (this maps to an assign op).
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# Assumed already an op -- we cannot control its execution order.
updates_ops.append(update)
# We set the update ops to run at the end by conditioning it on output[0]
if updates and not self.outputs:
# Edge case; never happens in practice
raise ValueError('Cannot create a Keras backend function with updates'
' but no outputs during eager execution.')
with ops.control_dependencies(updates_ops):
self.outputs[0] = array_ops.identity(self.outputs[0])
# Prepare graph function
# TODO(fchollet): can we restrict `captures` to variables actually used in
# the relevant subgraph?
graph.inputs = self.inputs + list(graph.captures.values())
graph.outputs = self.outputs
graph_fn = eager_function.Function(graph)
graph_fn._num_positional_args = len(self.inputs)
graph_fn._arg_keywords = []
self._graph_fn = graph_fn
# Handle placeholders with default
# (treated as required placeholder by graph functions)
self._placeholder_default_values = {}
with graph.as_default():
for x in self.inputs:
if x.op.type == 'PlaceholderWithDefault':
self._placeholder_default_values[x] = tensor_util.constant_value(
x.op.inputs[0])
def __call__(self, inputs):
converted_inputs = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
# Assume `value` is a placeholder with default
value = self._placeholder_default_values.get(tensor, None)
if value is None:
raise ValueError(
'You must feed a value for placeholder %s' % (tensor,))
value = ops.convert_to_tensor(value, dtype=tensor.dtype)
if value.dtype != tensor.dtype:
# Temporary workaround due to `convert_to_tensor` not casting floats.
# See b/119637405
value = math_ops.cast(value, tensor.dtype)
converted_inputs.append(value)
outputs = self._graph_fn(*converted_inputs)
return [x.numpy() for x in outputs]
@tf_export('keras.backend.function')
def function(inputs, outputs, updates=None, name=None, **kwargs):
"""Instantiates a Keras function.
Arguments:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
name: String, name of function.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in or if in eager execution.
"""
if context.executing_eagerly():
if kwargs:
raise ValueError('Session keyword arguments are not support during '
'eager execution. You passed: %s' % (kwargs,))
return EagerExecutionFunction(inputs, outputs, updates=updates, name=name)
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0]
and key not in ['inputs', 'outputs', 'updates', 'name']):
msg = ('Invalid argument "%s" passed to K.function with TensorFlow '
'backend') % key
raise ValueError(msg)
return GraphExecutionFunction(inputs, outputs, updates=updates, **kwargs)
@tf_export('keras.backend.gradients')
def gradients(loss, variables):
"""Returns the gradients of `loss` w.r.t. `variables`.
Arguments:
loss: Scalar tensor to minimize.
variables: List of variables.
Returns:
A gradients tensor.
"""
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
@tf_export('keras.backend.stop_gradient')
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
Arguments:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
# CONTROL FLOW
@tf_export('keras.backend.rnn')
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False):
"""Iterates over the time dimension of a tensor.
Arguments:
step_function: RNN step function.
Args;
input; Tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
states; List of tensors.
Returns;
output; Tensor with shape `(samples, output_dim)`
(no time dimension).
new_states; List of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: Tensor of temporal data of shape `(samples, time, ...)`
(at least 3D), or nested tensors, and each of which has shape
`(samples, time, ...)`.
initial_states: Tensor with shape `(samples, state_size)`
(no time dimension), containing the initial values for the states used
in the step function. In the case that state_size is in a nested
shape, the shape of initial_states will also follow the nested
structure.
go_backwards: Boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: List of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
input_length: If specified, assume time dimension is of this length.
time_major: Boolean. If true, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean. If True, the output for masked timestep
will be zeros, whereas in the False case, output from previous
timestep is returned.
Returns:
A tuple, `(last_output, outputs, new_states)`.
last_output: the latest output of the rnn, of shape `(samples, ...)`
outputs: tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
Raises:
ValueError: if input dimension is less than 3.
ValueError: if `unroll` is `True` but input timestep is not a fixed
number.
ValueError: if `mask` is provided (not `None`) but states is not provided
(`len(states)` == 0).
"""
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return array_ops.transpose(input_t, axes)
if not time_major:
inputs = nest.map_structure(swap_batch_timestep, inputs)
flatted_inputs = nest.flatten(inputs)
time_steps = flatted_inputs[0].shape[0]
batch = flatted_inputs[0].shape[1]
time_steps_t = array_ops.shape(flatted_inputs[0])[0]
for input_ in flatted_inputs:
input_.get_shape().with_rank_at_least(3)
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.shape) == 2:
mask = expand_dims(mask)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
assert not nest.is_sequence(mask_t)
assert not nest.is_sequence(input_t)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = array_ops.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return array_ops.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError('Unrolling requires a fixed number of timesteps.')
states = initial_states
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of nested
# input, the input is flattened and then transformed individually.
# The result of this will be a tuple of lists, each of the item in tuple is
# list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = array_ops.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if nest.is_sequence(inputs):
processed_input = nest.map_structure(_process_single_input_t, inputs)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return nest.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(inp, states + constants)
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where(tiled_mask_t, output, prev_output)
return_states = []
for state, new_state in zip(states, new_states):
# (see earlier comment for tile explanation)
tiled_mask_t = _expand_mask(mask_t, new_state)
return_states.append(array_ops.where(tiled_mask_t, new_state, state))
states = return_states
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
if zero_output_for_mask:
last_output = array_ops.where(
_expand_mask(mask_list[-1], last_output),
last_output,
zeros_like(last_output))
outputs = array_ops.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
zeros_like(outputs))
else:
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(inp, states + constants)
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it will
# be flattened first, and tensor array will be created one per flattened
# tensor.
input_ta = tuple(
tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name='input_ta_%s' % i)
for i, inp in enumerate(flatted_inputs))
input_ta = tuple(
ta.unstack(input_) if not go_backwards else ta
.unstack(reverse(input_, 0))
for ta, input_ in zip(input_ta, flatted_inputs))
# Get the time(0) input and compute the output for that, the output will be
# used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = nest.pack_sequence_as(inputs,
[inp[0] for inp in flatted_inputs])
# output_time_zero is used to determine the cell output shape and its dtype.
# the value is discarded.
output_time_zero, _ = step_function(input_time_zero,
initial_states + constants)
output_ta = tuple(
tensor_array_ops.TensorArray(
dtype=out.dtype,
size=time_steps_t,
tensor_array_name='output_ta_%s' % i)
for i, out in enumerate(nest.flatten(output_time_zero)))
time = constant_op.constant(0, dtype='int32', name='time')
while_loop_kwargs = {
'cond': lambda time, *_: time < time_steps_t,
'maximum_iterations': input_length,
'parallel_iterations': 32,
'swap_memory': True,
}
if mask is not None:
if not states:
raise ValueError('No initial states provided! '
'When using masking in an RNN, you should '
'provide initial states '
'(and your step function should return '
'as its first state at time `t` '
'the output at time `t-1`).')
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps_t,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
# Mask for the T output will be base on the output of T - 1. In the case
# T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(array_ops.zeros_like(o)
for o in nest.flatten(output_time_zero))
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = nest.pack_sequence_as(inputs, current_input)
mask_t = mask_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
# mask output
flat_output = nest.flatten(output)
flat_mask_output = (flat_zero_output if zero_output_for_mask
else nest.flatten(prev_output))
tiled_mask_t = tuple(_expand_mask(mask_t, o) for o in flat_output)
flat_new_output = tuple(
array_ops.where(m, o, zo) for m, o, zo in zip(
tiled_mask_t, flat_output, flat_mask_output))
# mask states
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
new_state.set_shape(state.shape)
tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_state)
flat_final_state = tuple(
array_ops.where(m, s, ps)
for m, s, ps in zip(tiled_mask_t, flat_new_state, flat_state))
new_states = nest.pack_sequence_as(new_states, flat_final_state)
output_ta_t = tuple(
ta.write(time, out)
for ta, out in zip(output_ta_t, flat_new_output))
return (time + 1, output_ta_t,
tuple(flat_new_output)) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = nest.pack_sequence_as(inputs, current_input)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
new_state.set_shape(state.shape)
flat_output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))
new_states = nest.pack_sequence_as(initial_states, flat_new_state)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = nest.pack_sequence_as(output_time_zero, outputs)
last_output = nest.pack_sequence_as(output_time_zero, last_output)
# static shape inference
def set_shape(output_):
shape = output_.shape.as_list()
shape[0] = time_steps
shape[1] = batch
output_.set_shape(shape)
return output_
outputs = nest.map_structure(set_shape, outputs)
if not time_major:
outputs = nest.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
@tf_export('keras.backend.switch')
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Arguments:
condition: tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns:
The selected tensor.
Raises:
ValueError: If rank of `condition` is greater than rank of expressions.
"""
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where(condition, then_expression, else_expression)
return x
@tf_export('keras.backend.in_train_phase')
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
if training is None:
training = learning_phase()
if training == 1 or training is True:
if callable(x):
return x()
else:
return x
elif training == 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
return x
@tf_export('keras.backend.in_test_phase')
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
@tf_export('keras.backend.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
if alpha != 0.:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _to_tensor(max_value, x.dtype.base_dtype)
zero = _to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
@tf_export('keras.backend.elu')
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of negative section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where(x > 0, res, alpha * res)
@tf_export('keras.backend.softmax')
def softmax(x, axis=-1):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
axis: The dimension softmax would be performed on.
The default is -1 which indicates the last dimension.
Returns:
A tensor.
"""
return nn.softmax(x, axis=axis)
@tf_export('keras.backend.softplus')
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
@tf_export('keras.backend.softsign')
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
@tf_export('keras.backend.categorical_crossentropy')
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
rank = len(output.shape)
axis = axis % rank
# Note: nn.softmax_cross_entropy_with_logits_v2
# expects logits, Keras expects probabilities.
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output = output / math_ops.reduce_sum(output, axis, True)
# manual computation of crossentropy
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(target * math_ops.log(output), axis)
else:
return nn.softmax_cross_entropy_with_logits_v2(labels=target, logits=output)
@tf_export('keras.backend.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
rank = len(output.shape)
axis = axis % rank
if axis != rank - 1:
permutation = list(range(axis)) + list(range(axis + 1, rank)) + [axis]
output = array_ops.transpose(output, perm=permutation)
# Note: nn.sparse_softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
output_shape = output.shape
targets = cast(flatten(target), 'int64')
logits = array_ops.reshape(output, [-1, int(output_shape[-1])])
res = nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
if len(output_shape) >= 3:
# If our output includes timesteps or spatial dimensions we need to reshape
return array_ops.reshape(res, array_ops.shape(output)[:-1])
else:
return res
@tf_export('keras.backend.binary_crossentropy')
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
# Note: nn.sigmoid_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# transform back to logits
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output / (1 - output))
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
@tf_export('keras.backend.sigmoid')
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)
@tf_export('keras.backend.hard_sigmoid')
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
x = (0.2 * x) + 0.5
zero = _to_tensor(0., x.dtype.base_dtype)
one = _to_tensor(1., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, one)
return x
@tf_export('keras.backend.tanh')
def tanh(x):
"""Element-wise tanh.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.tanh(x)
@tf_export('keras.backend.dropout')
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
Arguments:
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
Returns:
A tensor.
"""
retain_prob = 1. - level
if seed is None:
seed = np.random.randint(10e6)
# the dummy 1. works around a TF bug
# (float32_ref vs. float32 incompatibility)
return nn.dropout(x * 1., retain_prob, noise_shape, seed=seed)
@tf_export('keras.backend.l2_normalize')
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform normalization.
Returns:
A tensor.
"""
return nn.l2_normalize(x, axis=axis)
@tf_export('keras.backend.in_top_k')
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
Arguments:
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
Returns:
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv1d_input(x, data_format):
"""Transpose and cast the input before the conv1d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NWC' # to pass TF Conv2dNative operations
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC
else:
tf_data_format = 'NCW'
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
"""Transpose and cast the input before the conv2d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
force_transpose: Boolean. If True, the input will always be transposed
from NCHW to NHWC if `data_format` is `"channels_first"`.
If False, the transposition only occurs on CPU (GPU ops are
assumed to support NCHW).
Returns:
A tensor.
"""
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support() or force_transpose:
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
"""Convert keras' padding to TensorFlow's padding.
Arguments:
padding: string, one of 'same' , 'valid'
Returns:
a string, one of 'SAME', 'VALID'.
Raises:
ValueError: if invalid `padding'`
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding: ' + str(padding))
return padding
@tf_export('keras.backend.conv1d')
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first".
dilation_rate: integer dilate rate.
Returns:
A tensor, result of 1D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = kernel.shape.as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=(dilation_rate,),
strides=(strides,),
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@tf_export('keras.backend.conv2d')
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow data format
for inputs/kernels/outputs.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@tf_export('keras.backend.conv2d_transpose')
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D deconvolution (i.e.
transposed convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: Tuple of 2 integers.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == 'channels_first' and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = nn.conv2d_transpose(x, kernel, output_shape, strides,
padding=padding,
data_format=tf_data_format)
else:
assert dilation_rate[0] == dilation_rate[1]
x = nn.atrous_conv2d_transpose(
x,
kernel,
output_shape,
rate=dilation_rate[0],
padding=padding)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv1d(x,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: stride integer.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilation rate.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NWC':
spatial_start_dim = 1
strides = (1,) + strides * 2 + (1,)
else:
spatial_start_dim = 2
strides = (1, 1) + strides * 2
x = array_ops.expand_dims(x, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)
dilation_rate = (1,) + dilation_rate
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
x = array_ops.squeeze(x, [spatial_start_dim])
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@tf_export('keras.backend.separable_conv2d')
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@tf_export('keras.backend.conv3d')
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
"""3D deconvolution (i.e.
transposed convolution).
Arguments:
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
Returns:
A tensor, result of transposed 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
@tf_export('keras.backend.pool2d')
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""2D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 2D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(pool_size) != 2:
raise ValueError('`pool_size` must be a tuple of 2 integers.')
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@tf_export('keras.backend.pool3d')
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""3D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply N-D convolution with un-shared weights.
Arguments:
inputs: (N+2)-D tensor with shape
(batch_size, channels_in, d_in1, ..., d_inN)
if data_format='channels_first', or
(batch_size, d_in1, ..., d_inN, channels_in)
if data_format='channels_last'.
kernel: the unshared weight for N-D convolution,
with shape (output_items, feature_dim, channels_out), where
feature_dim = np.prod(kernel_size) * channels_in,
output_items = np.prod(output_shape).
kernel_size: a tuple of N integers, specifying the
spatial dimensions of the N-D convolution window.
strides: a tuple of N integers, specifying the strides
of the convolution along the spatial dimensions.
output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial
dimensionality of the output.
data_format: string, "channels_first" or "channels_last".
Returns:
An (N+2)-D tensor with shape:
(batch_size, channels_out) + output_shape
if data_format='channels_first', or:
(batch_size,) + output_shape + (channels_out,)
if data_format='channels_last'.
Raises:
ValueError: if `data_format` is neither
`channels_last` nor `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
channels_out = kernel_shape[-1]
ndims = len(output_shape)
spatial_dimensions = list(range(ndims))
xs = []
output_axes_ticks = [range(axis_max) for axis_max in output_shape]
for position in itertools.product(*output_axes_ticks):
slices = [slice(None)]
if data_format == 'channels_first':
slices.append(slice(None))
slices.extend([slice(position[d] * strides[d],
position[d] * strides[d] + kernel_size[d])
for d in spatial_dimensions])
if data_format == 'channels_last':
slices.append(slice(None))
xs.append(reshape(inputs[slices], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, output_shape + (-1, channels_out))
if data_format == 'channels_first':
permutation = [ndims, ndims + 1] + spatial_dimensions
else:
permutation = [ndims] + spatial_dimensions + [ndims + 1]
return permute_dimensions(output, permutation)
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
Arguments:
inputs: 3D tensor with shape:
(batch_size, steps, input_dim)
if data_format is "channels_last" or
(batch_size, input_dim, steps)
if data_format is "channels_first".
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters).
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window.
strides: a tuple of a single integer,
specifying the stride length of the convolution.
data_format: the data format, channels_first or channels_last.
Returns:
A 3d tensor with shape:
(batch_size, output_length, filters)
if data_format='channels_first'
or 3D tensor with shape:
(batch_size, filters, output_length)
if data_format='channels_last'.
"""
output_shape = (kernel.shape[0],)
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
Arguments:
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters).
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col).
data_format: the data format, channels_first or channels_last.
Returns:
A 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
"""
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@tf_export('keras.backend.bias_add')
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
# pylint: disable=g-no-augmented-assignment
if ndim(x) == 5:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1, 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
if _has_nchw_support():
x = nn.bias_add(x, bias, data_format='NCHW')
else:
x = x + reshape(bias, (1, bias_shape[0], 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = nn.bias_add(x, bias, data_format='NHWC')
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1))
else:
x = x + reshape(bias, (1, bias_shape[1], bias_shape[0]))
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
else:
x = nn.bias_add(x, bias)
# pylint: enable=g-no-augmented-assignment
return x
# RANDOMNESS
@tf_export('keras.backend.random_normal')
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: A float, mean of the normal distribution to draw samples.
stddev: A float, standard deviation of the normal distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
@tf_export('keras.backend.random_uniform')
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
@tf_export('keras.backend.random_binomial')
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
@tf_export('keras.backend.truncated_normal')
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
@tf_export('keras.backend.ctc_label_dense_to_sparse')
def ctc_label_dense_to_sparse(labels, label_lengths):
"""Converts CTC labels from dense to sparse.
Arguments:
labels: dense CTC labels.
label_lengths: length of the labels.
Returns:
A sparse tensor representation of the labels.
"""
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(_, current_input):
return array_ops.expand_dims(
math_ops.range(label_shape[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.to_int64(indices), vals_sparse, math_ops.to_int64(label_shape))
@tf_export('keras.backend.ctc_batch_cost')
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
Arguments:
y_true: tensor `(samples, max_string_length)`
containing the truth labels.
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_pred`.
label_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_true`.
Returns:
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.to_int32(array_ops.squeeze(label_length, axis=-1))
input_length = math_ops.to_int32(array_ops.squeeze(input_length, axis=-1))
sparse_labels = math_ops.to_int32(
ctc_label_dense_to_sparse(y_true, label_length))
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
@tf_export('keras.backend.ctc_decode')
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
Arguments:
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
Returns:
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
input_length = math_ops.to_int32(input_length)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = [
sparse_ops.sparse_to_dense(
st.indices, st.dense_shape, st.values, default_value=-1)
for st in decoded
]
return (decoded_dense, log_prob)
# HIGH ORDER FUNCTIONS
@tf_export('keras.backend.map_fn')
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return functional_ops.map_fn(fn, elems, name=name, dtype=dtype)
@tf_export('keras.backend.foldl')
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`.
"""
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
@tf_export('keras.backend.foldr')
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
Returns:
Same type and shape as initializer
"""
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
# Load Keras default configuration from config file if present.
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if 'KERAS_HOME' in os.environ:
_keras_dir = os.environ.get('KERAS_HOME')
else:
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
_config = json.load(open(_config_path))
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
# Save config file.
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
| {
"content_hash": "dff3f7bc8d74ff3abe96a2e2e8122a05",
"timestamp": "",
"source": "github",
"line_count": 5221,
"max_line_length": 95,
"avg_line_length": 30.37751388622869,
"alnum_prop": 0.6332620853588565,
"repo_name": "hehongliang/tensorflow",
"id": "c7654642d0178abca7cd89f6cd629060f8cefa87",
"size": "159400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "194748"
},
{
"name": "C++",
"bytes": "26947133"
},
{
"name": "CMake",
"bytes": "174938"
},
{
"name": "Go",
"bytes": "908627"
},
{
"name": "Java",
"bytes": "323804"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37293"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Protocol Buffer",
"bytes": "249901"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "22872386"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "336334"
}
],
"symlink_target": ""
} |
"""
Written by Tong He and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
"""
import os
import time
import random
import argparse
import numpy as np
from pathlib import PurePath
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
from config import config
from CBIG_model_pytorch import fnn_3l, fnn_2l, CBIG_dataset
from CBIG_mics import mics_infer_metric, mics_log
def train(args):
'''main function for FNN network and sex prediction
Args:
args: args from command line
Returns:
None
'''
t_overall = time.time()
print('\nCBIG FNN for UK Biobank and sex with argument: ' + str(args))
# set all the seed
seed = args.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# set gpu number
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load data
npz = PurePath(args.path_data, 'data_fnn_sex.npz').as_posix()
npz = np.load(npz)
train_x = npz['train_x']
train_y = npz['train_y']
valid_x = npz['valid_x']
valid_y = npz['valid_y']
test_x = npz['test_x']
test_y = npz['test_y']
# load dataset for PyTorch
dset_train = CBIG_dataset(train_x, train_y, for_sex=True)
trainloader = DataLoader(
dset_train, batch_size=args.batch_size, shuffle=True, num_workers=8)
dset_valid = CBIG_dataset(valid_x, valid_y, for_sex=True)
validLoader = DataLoader(
dset_valid, batch_size=args.batch_size, shuffle=True, num_workers=8)
dset_test = CBIG_dataset(test_x, test_y, for_sex=True)
testLoader = DataLoader(
dset_test, batch_size=args.batch_size, shuffle=False, num_workers=8)
runs = args.runs # numbers of ensemble runs
epochs = args.epochs # numbers of epochs per run
# initialization of result record
tra_los_record = np.zeros((runs, epochs))
val_los_record = np.zeros((runs, epochs))
tes_los_record = np.zeros((runs, epochs))
tra_auc_record = np.zeros((runs, epochs))
val_auc_record = np.zeros((runs, epochs))
tes_auc_record = np.zeros((runs, epochs))
tes_res_record = np.zeros((runs, epochs, test_x.shape[0], 2))
final_original = None
# Code running - with multiple ensemble runs
for run in range(runs):
# initialization of network
if args.n_layer == 2:
net = fnn_2l(
train_x.shape[1], args.n_l1, args.dropout, for_sex=True)
elif args.n_layer == 3:
net = fnn_3l(
train_x.shape[1],
args.n_l1,
args.n_l2,
args.dropout,
for_sex=True)
else:
assert False, "Only support 2 or 3 layers."
net.to(device)
# other components of network
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(
net.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
scheduler = MultiStepLR(
optimizer,
milestones=[args.scheduler_decrease, args.scheduler_decrease * 2],
gamma=0.1)
# start epoch training
for epoch in range(epochs):
scheduler.step()
# training
train_loss = 0.0
net.train(True)
for (x, y) in trainloader:
x, y = x.to(device), y.to(device)
optimizer.zero_grad()
outputs = net(x)
loss = criterion(outputs, y)
loss.backward()
optimizer.step()
train_loss += loss.item()
tra_los_record[run, epoch] = train_loss / trainloader.__len__()
net.train(False)
# Training
auc, _ = mics_infer_metric(trainloader, net, criterion, device)
tra_auc_record[run, epoch] = auc
# validation
auc, loss = mics_infer_metric(validLoader, net, criterion, device)
val_auc_record[run, epoch] = auc
val_los_record[run, epoch] = loss
# test
auc, loss, real, pred = mics_infer_metric(
testLoader, net, criterion, device, need_value=True)
if final_original is not None:
assert np.array_equal(final_original, real)
else:
final_original = real
tes_res_record[run, epoch, :, :] = pred
tes_auc_record[run, epoch] = auc
tes_los_record[run, epoch] = loss
log_args = {
'tra_los_record': tra_los_record,
'val_los_record': val_los_record,
'tes_los_record': tes_los_record,
'tra_auc_record': tra_auc_record,
'val_auc_record': val_auc_record,
'tes_auc_record': tes_auc_record,
'tes_res_record': tes_res_record,
'final_original': final_original
}
mics_log('fnn', args.out_path, index=args.index, **log_args)
print("time spent: {:.4f}".format(time.time() - t_overall))
return
def get_args():
'''function to get args from command line and return the args
Returns:
argparse.ArgumentParser: args that could be used by other function
'''
parser = argparse.ArgumentParser()
# general parameters
parser.add_argument('--path_data', type=str, default=config.UKBB_INTER_DIR)
parser.add_argument('--out_path', '-o', type=str, default=config.OUT_PATH)
parser.add_argument('--seed', type=int, default=config.RAMDOM_SEED)
parser.add_argument(
'--batch_size', type=int, default=config.UKBB_BATCH_SIZE)
parser.add_argument('--epochs', type=int, default=config.UKBB_EPOCHS)
parser.add_argument('--runs', type=int, default=config.UKBB_RUNS)
parser.add_argument('--gpu', type=int, default=0)
# hyperparameter
parser.add_argument('--index', type=int, default=None)
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=0.01)
parser.add_argument('--scheduler_decrease', type=int, default=75)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--n_layer', type=int, default=2)
parser.add_argument('--n_l1', type=int, default=8)
parser.add_argument('--n_l2', type=int, default=32)
return parser.parse_args()
if __name__ == '__main__':
train(get_args())
| {
"content_hash": "26c83e715f2530ec2eb1080c7dc503a4",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 79,
"avg_line_length": 33.353233830845774,
"alnum_prop": 0.6027744630071599,
"repo_name": "ThomasYeoLab/CBIG",
"id": "bcb3cbf38a988b428f6dde7829a9bc2dad6ba615",
"size": "6751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stable_projects/predict_phenotypes/He2019_KRDNN/cbig/He2019/CBIG_ukbb_fnn_sex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35378"
},
{
"name": "C",
"bytes": "2076236"
},
{
"name": "C++",
"bytes": "1461097"
},
{
"name": "CSS",
"bytes": "6852"
},
{
"name": "Fortran",
"bytes": "598090"
},
{
"name": "HTML",
"bytes": "287918"
},
{
"name": "Jupyter Notebook",
"bytes": "569200"
},
{
"name": "MATLAB",
"bytes": "10013692"
},
{
"name": "Makefile",
"bytes": "7902"
},
{
"name": "Objective-C",
"bytes": "77"
},
{
"name": "PostScript",
"bytes": "8416"
},
{
"name": "Python",
"bytes": "2499129"
},
{
"name": "R",
"bytes": "33929"
},
{
"name": "Shell",
"bytes": "1923688"
},
{
"name": "TeX",
"bytes": "8993"
},
{
"name": "Vim Script",
"bytes": "2859"
},
{
"name": "XSLT",
"bytes": "19506"
}
],
"symlink_target": ""
} |
import base64
import binascii
import fcntl
import itertools
import os
import stat
import struct
import sys
import tempfile
import textwrap
import time
import math
try:
from Crypto.Hash import RIPEMD
except ImportError:
RIPEMD = 'RIPEMD'
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from Crypto.Cipher import AES
from Crypto.Cipher import DES3
from Crypto.Cipher import Blowfish
from Crypto.Cipher import CAST
from Crypto.PublicKey import RSA
# We know pycrypto imports sha directly
import warnings
warnings.filterwarnings("ignore", r"the sha module is deprecated",
DeprecationWarning, "^.*Crypto\.Hash\.SHA.*$")
from Crypto.PublicKey import DSA
from Crypto.PublicKey import pubkey
from conary import constants
from conary.lib import util, digestlib
# key types defined in RFC 2440 page 49
PK_ALGO_RSA = 1
PK_ALGO_RSA_ENCRYPT_ONLY = 2 # deprecated
PK_ALGO_RSA_SIGN_ONLY = 3 # deprecated
PK_ALGO_ELGAMAL_ENCRYPT_ONLY = 16
PK_ALGO_DSA = 17
PK_ALGO_ELLIPTIC_CURVE = 18
PK_ALGO_ECDSA = 19
PK_ALGO_ELGAMAL = 20
PK_ALGO_ALL_RSA = (PK_ALGO_RSA, PK_ALGO_RSA_ENCRYPT_ONLY,
PK_ALGO_RSA_SIGN_ONLY)
PK_ALGO_ALL_ELGAMAL = (PK_ALGO_ELGAMAL_ENCRYPT_ONLY, PK_ALGO_ELGAMAL)
# packet tags are defined in RFC 2440 - 4.3. Packet Tags
PKT_RESERVED = 0 # a packet type must not have this value
PKT_PUB_SESSION_KEY = 1 # Public-Key Encrypted Session Key Packet
PKT_SIG = 2 # Signature Packet
PKT_SYM_SESSION_KEY = 3 # Symmetric-Key Encrypted Session Key Packet
PKT_ONE_PASS_SIG = 4 # One-Pass Signature Packet
PKT_SECRET_KEY = 5 # Secret Key Packet
PKT_PUBLIC_KEY = 6 # Public Key Packet
PKT_SECRET_SUBKEY = 7 # Secret Subkey Packet
PKT_COMPRESSED_DATA = 8 # Compressed Data Packet
PKT_SYM_ENCRYPTED_DATA = 9 # Symmetrically Encrypted Data Packet
PKT_MARKER = 10 # Marker Packet
PKT_LITERAL_DATA = 11 # Literal Data Packet
PKT_TRUST = 12 # Trust Packet
PKT_USERID = 13 # User ID Packet
PKT_PUBLIC_SUBKEY = 14 # Public Subkey Packet
# Additions from http://tools.ietf.org/html/draft-ietf-openpgp-rfc2440bis-22
PKT_USER_ATTRIBUTE = 17 # User Attribute Packet
PKT_DATA_PACKET = 18 # Sym. Encrypted and Integrity Protected Data Packet
PKT_MOD_DETECTION = 19 # Modification Detection Code Packet
PKT_PRIVATE1 = 60 # 60 to 63 -- Private or Experimental Values
PKT_PRIVATE2 = 61
PKT_PRIVATE3 = 62
PKT_PRIVATE4 = 63
PKT_ALL_SECRET = (PKT_SECRET_KEY, PKT_SECRET_SUBKEY)
PKT_ALL_PUBLIC = (PKT_PUBLIC_KEY, PKT_PUBLIC_SUBKEY)
PKT_ALL_KEYS = PKT_ALL_SECRET + PKT_ALL_PUBLIC
PKT_MAIN_KEYS = (PKT_SECRET_KEY, PKT_PUBLIC_KEY)
PKT_SUB_KEYS = (PKT_SECRET_SUBKEY, PKT_PUBLIC_SUBKEY)
PKT_ALL_USER = set([PKT_USERID, PKT_USER_ATTRIBUTE])
# 5.2.1 Signature Types
SIG_TYPE_BINARY_DOC = 0x00
SIG_TYPE_TEXT_DOC = 0x01
SIG_TYPE_STANDALONE = 0x02
SIG_TYPE_CERT_0 = 0x10
SIG_TYPE_CERT_1 = 0x11
SIG_TYPE_CERT_2 = 0x12
SIG_TYPE_CERT_3 = 0x13
SIG_TYPE_SUBKEY_BIND = 0x18
SIG_TYPE_PRKEY_BIND = 0x19
SIG_TYPE_DIRECT_KEY = 0x1F
SIG_TYPE_KEY_REVOC = 0x20
SIG_TYPE_SUBKEY_REVOC = 0x28
SIG_TYPE_CERT_REVOC = 0x30
SIG_TYPE_TIMESTAMP = 0x40
SIG_TYPE_THIRD_PARTY_CONFIRM = 0x50
SIG_CERTS = (SIG_TYPE_CERT_0, SIG_TYPE_CERT_1,
SIG_TYPE_CERT_2, SIG_TYPE_CERT_3, )
SIG_KEY_REVOCS = (SIG_TYPE_KEY_REVOC, SIG_TYPE_SUBKEY_REVOC)
# 5.2.3.1 Signature Subpacket Types
SIG_SUBPKT_CREATION = 2
SIG_SUBPKT_SIG_EXPIRE = 3
SIG_SUBPKT_EXPORTABLE = 4
SIG_SUBPKT_TRUST = 5
SIG_SUBPKT_REGEX = 6
SIG_SUBPKT_REVOCABLE = 7
SIG_SUBPKT_KEY_EXPIRE = 9
SIG_SUBPKT_PLACEHOLDER = 10
SIG_SUBPKT_PREF_SYM_ALGS = 11
SIG_SUBPKT_REVOC_KEY = 12
SIG_SUBPKT_ISSUER_KEYID = 16
SIG_SUBPKT_NOTATION_DATA = 20
SIG_SUBPKT_PREF_HASH_ALGS = 21
SIG_SUBPKT_PREF_COMP_ALGS = 22
SIG_SUBPKT_KEYSRVR_PREFS = 23
SIG_SUBPKT_PREF_KEYSRVR = 24
SIG_SUBPKT_PRIM_UID = 25
SIG_SUBPKT_POLICY_URL = 26
SIG_SUBPKT_KEY_FLAGS = 27
SIG_SUBPKT_SIGNERS_UID = 28
SIG_SUBPKT_REVOC_REASON = 29
SIG_SUBPKT_FEATURES = 30
SIG_SUBPKT_SIG_TARGET = 31
SIG_SUBPKT_EMBEDDED_SIG = 32
SIG_SUBPKT_INTERNAL_0 = 100
SIG_SUBPKT_INTERNAL_1 = 101
SIG_SUBPKT_INTERNAL_2 = 102
SIG_SUBPKT_INTERNAL_3 = 103
SIG_SUBPKT_INTERNAL_4 = 104
SIG_SUBPKT_INTERNAL_5 = 105
SIG_SUBPKT_INTERNAL_6 = 106
SIG_SUBPKT_INTERNAL_7 = 107
SIG_SUBPKT_INTERNAL_8 = 108
SIG_SUBPKT_INTERNAL_9 = 109
SIG_SUBPKT_INTERNAL_A = 110
# 3.6.2.1. Secret key encryption
ENCRYPTION_TYPE_UNENCRYPTED = 0x00
ENCRYPTION_TYPE_S2K_SPECIFIED = 0xff
# GPG man page hints at existence of "sha cehcksum" and claims it
# will be part of "the new forthcoming extended openpgp specs"
# for now: experimentally determined to be 0xFE
ENCRYPTION_TYPE_SHA1_CHECK = 0xfe
S2K_TYPE_SIMPLE = 0x00
S2K_TYPE_SALTED = 0x01
S2K_TYPE_ITER_SALTED = 0x03
OLD_PKT_LEN_ONE_OCTET = 0
OLD_PKT_LEN_TWO_OCTET = 1
OLD_PKT_LEN_FOUR_OCTET = 2
# User Attribute Subpackets (5.12)
USR_ATTR_SUBPKT_IMG = 1
# trust levels
TRUST_UNTRUSTED = 0
TRUST_MARGINAL = 4
TRUST_FULL = 5
TRUST_ULTIMATE = 6
TRUST_TRUSTED = 120
#trust packet headers
TRP_VERSION = chr(1)
TRP_KEY = chr(12)
TRP_USERID = chr(13)
TRUST_PACKET_LENGTH = 40
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
class PGPError(Exception):
pass
class InvalidPacketError(PGPError):
pass
class KeyringError(PGPError):
pass
class MalformedKeyRing(PGPError):
def __str__(self):
return self.error
def __init__(self, reason="Malformed Key Ring"):
self.error = "Malformed Key Ring: %s" %reason
class UnsupportedEncryptionAlgorithm(PGPError):
def __init__(self, alg):
self.alg = alg
def __str__(self):
return "Unsupported encryption algorithm code %s" % self.alg
class UnsupportedHashAlgorithm(PGPError):
def __init__(self, alg):
self.alg = alg
def __str__(self):
return "Unsupported hash algorithm code %s" % self.alg
class IncompatibleKey(PGPError):
def __str__(self):
return self.error
def __init__(self, reason="Incompatible Key"):
self.error = "Incompatible Key: %s" %reason
class InvalidKey(PGPError):
def __str__(self):
return self.error
def __init__(self, reason="Invalid Key"):
self.error = "Invalid Key: %s" %reason
class KeyNotFound(PGPError):
def __str__(self):
return self.error
def __init__(self, keyId, reason=None):
if keyId:
self.error = "OpenPGP key not found for key ID %s" %keyId
if isinstance(keyId, list):
self.keys = keyId
else:
self.keys = [keyId]
else:
self.error = "No OpenPGP keys found"
if reason:
self.error += ': %s' %reason
class BadPassPhrase(PGPError):
def __str__(self):
return self.error
def __init__(self, reason="Bad passphrase"):
self.error = reason
class BadSelfSignature(PGPError):
def __str__(self):
return "Key %s failed self signature check" % self.keyId
def __init__(self, keyId):
self.keyId = keyId
class InvalidBodyError(PGPError):
pass
class ShortReadError(InvalidBodyError):
def __init__(self, expected, actual):
self.expected = expected
self.actual = actual
class MergeError(PGPError):
pass
class SignatureError(PGPError):
pass
def getKeyId(keyRing):
pkt = PGP_Message.newPacketFromStream(keyRing, start = -1)
assert pkt is not None
return pkt.getKeyId()
def getKeyFromString(keyId, data):
msg = PGP_Message(util.ExtendedStringIO(data))
return msg.getKeyByKeyId(keyId)
def seekKeyById(keyId, keyRing):
if isinstance(keyRing, str):
try:
keyRing = util.ExtendedFile(keyRing, buffering = False)
except (IOError, OSError), e:
# if we can't read/find the key, it's not there.
return False
msg = PGP_Message(keyRing)
try:
return msg.iterByKeyId(keyId).next()
except StopIteration:
return False
def exportKey(keyId, keyRing, armored=False):
"""Export the key from the keyring, performing the necessary locking
@param keyId: the key ID
@type keyId: str
@param keyRing: the keyring from where the key is to be extracted
@type keyRing: file or path
@param armored: If True, exports the key in a Radix-64 encoding (armor)
@type armored: bool
@rtype: stream
@return: the key in a stream
"""
if isinstance(keyRing, str):
try:
keyRing = util.ExtendedFile(keyRing, buffering = False)
except (IOError, OSError), e:
# if we can't read/find the key, it's not there.
raise KeyNotFound(keyId)
msg = PGP_Message(keyRing)
key = msg.getKeyByKeyId(keyId)
# if the key we requested was a subkey, use the main key
if isinstance(key, PGP_SubKey):
key = key.getParentPacket()
sio = util.ExtendedStringIO()
key.writeAll(sio)
if armored:
sio.seek(0)
keyData = sio.read()
sio.truncate(0)
armorKeyData(keyData, sio)
sio.seek(0)
return sio
def armorKeyData(keyData, stream):
"""
Write the Radix-64 encoded version of the key data.
@param keyData: key data
@type keyData: str
@param stream: A stream open in write mode
@type stream: file
"""
assert(isinstance(keyData, str))
assert(hasattr(stream, "write"))
crc = CRC24(keyData).base64digest()
stream.write("-----BEGIN PGP PUBLIC KEY BLOCK-----\n")
stream.write("Version: Conary ")
stream.write(constants.version)
stream.write("\n\n")
for line in textwrap.wrap(base64.b64encode(keyData), 72):
stream.write(line)
stream.write('\n')
# Add the CRC
stream.write('=')
stream.write(crc)
stream.write("\n-----END PGP PUBLIC KEY BLOCK-----")
return stream
def verifySelfSignatures(keyId, stream):
msg = PGP_Message(stream, start = 0)
pkt = msg.getKeyByKeyId(keyId)
return pkt.verifySelfSignatures()
def fingerprintToInternalKeyId(fingerprint):
if len(fingerprint) == 0:
return ''
fp = fingerprint[-16:]
return binascii.unhexlify(fp)
def binSeqToString(sequence):
"""sequence is a sequence of unsigned chars.
Return the string with a corresponding char for each item"""
return "".join([ chr(x) for x in sequence ])
def stringToAscii(sequence):
"""sequence is a sequence of characters.
Return the string with the hex representation for each character"""
return "".join("%02x" % ord(c) for c in sequence).upper()
def simpleS2K(passPhrase, hashAlg, keySize):
# RFC 2440 3.6.1.1.
r = ''
iteration = 0
keyLength = ((keySize + 7) // 8)
while len(r) < keyLength:
d = hashAlg(chr(0) * iteration)
d.update(passPhrase)
r += d.digest()
iteration += 1
return r[:keyLength]
def saltedS2K(passPhrase, hashAlg, keySize, salt):
# RFC 2440 3.6.1.2.
r = ''
iteration = 0
keyLength = ((keySize + 7) // 8)
while(len(r) < keyLength):
d = hashAlg()
buf = chr(0) * iteration
buf += salt + passPhrase
d.update(buf)
r += d.digest()
iteration += 1
return r[:keyLength]
def iteratedS2K(passPhrase, hashAlg, keySize, salt, count):
# RFC 2440 3.6.1.3.
r=''
iteration = 0
count=(16 + (count & 15)) << ((count >> 4) + 6)
buf = salt + passPhrase
keyLength = (keySize + 7) // 8
while(len(r) < keyLength):
d = hashAlg()
d.update(iteration * chr(0))
total = 0
while (count - total) > len(buf):
d.update(buf)
total += len(buf)
if total:
d.update(buf[:count-total])
else:
d.update(buf)
r += d.digest()
iteration += 1
return r[:keyLength]
def getUserIdsFromString(keyId, data):
keyRing = util.ExtendedStringIO(data)
key = seekKeyById(keyId, keyRing)
if key is None:
return []
return list(key.getUserIds())
def getFingerprint(keyId, keyFile=''):
if keyFile == '':
if 'HOME' not in os.environ:
keyFile = None
else:
keyFile=os.environ['HOME'] + '/.gnupg/pubring.gpg'
try:
keyRing = util.ExtendedFile(keyFile, buffering = False)
except IOError:
raise KeyNotFound(keyId, "Couldn't open keyring")
keyRing.seek(0, SEEK_END)
limit = keyRing.tell()
if limit == 0:
# no keys in a zero length file
raise KeyNotFound(keyId, "Couldn't open keyring")
keyRing.seek(0, SEEK_SET)
msg = PGP_Message(keyRing)
pkt = msg.getKeyByKeyId(keyId)
return pkt.getKeyFingerprint()
def addKeys(keys, fpath):
"""Add keys to the file"""
return addPackets(keys, fpath, "getKeyFingerprint",
PGP_Message, "iterMainKeys")
def addKeyTimestampPackets(pkts, fpath):
"""Add key timestamp packets to the file"""
return addPackets(pkts, fpath, "getKeyId",
TimestampPacketDatabase, "iterTrustPackets")
def addPackets(pkts, fpath, pktIdFunc, messageFactory, streamIterFunc):
"""Add packets to the file. Return the packet IDs for the added packets"""
# This code really expects the stream to be based on a file, since we need
# a fileno() too
pktsDict = {}
for k in pkts:
pktId = getattr(k, pktIdFunc)()
if pktId in pktsDict:
pktsDict[pktId].merge(k)
else:
pktsDict[pktId] = k
if not pktsDict:
return []
tmpfd, tmpfname = tempfile.mkstemp(prefix=os.path.basename(fpath),
dir=os.path.dirname(fpath))
tempf = util.ExtendedFdopen(tmpfd)
pktIds = []
# Lock the stream
try:
stream = file(fpath, "r+")
while 1:
streamfd = stream.fileno()
fcntl.lockf(streamfd, fcntl.LOCK_EX)
# We got the lock. Did the file change?
# There is a possibility that another writer that was previously
# holding the lock replaced the file, which would mean our current
# stream is stale.
newstream = file(fpath, "r+")
oStat = os.fstat(streamfd)
nStat = os.fstat(newstream.fileno())
if (oStat.st_dev, oStat.st_ino) == (nStat.st_dev, nStat.st_ino):
# Same file descriptor, we can continue
del oStat, nStat
break
# Replace our existing stream and continue
fcntl.lockf(streamfd, fcntl.LOCK_UN)
stream = newstream
# At this point, we have an exclusive lock on the stream
msg = messageFactory(stream, start = 0)
for ipkt in getattr(msg, streamIterFunc)():
iPktId = getattr(ipkt, pktIdFunc)()
if iPktId in pktsDict:
ipkt.merge(pktsDict[iPktId])
pktIds.append(iPktId)
del pktsDict[iPktId]
ipkt.writeAll(tempf)
# Add the rest of the packets
for pkt in pkts:
pktId = getattr(pkt, pktIdFunc)()
if pktId not in pktsDict:
continue
pkt.writeAll(tempf)
del pktsDict[pktId]
pktIds.append(pktId)
# Now copy the keyring back
tempf.close()
os.rename(tmpfname, fpath)
return pktIds
finally:
fcntl.lockf(streamfd, fcntl.LOCK_UN)
def verifyRFC2440Checksum(data):
# RFC 2440 5.5.3 - Secret Key Packet Formats documents the checksum
if len(data) < 2:
return 0
checksum = [ ord(x) for x in data[-2:] ]
checksum = int2FromBytes(*checksum)
runningCount = computeRFC2440Checksum(data[:-2])
return (runningCount == checksum)
def computeRFC2440Checksum(data):
runningCount=0
for c in data:
runningCount += ord(c)
runningCount %= 65536
return runningCount
def verifySHAChecksum(data):
if len(data) < 20:
return 0
digest = computeSHAChecksum(data[:-20])
return digest == data[-20:]
def computeSHAChecksum(data):
m = digestlib.sha1()
m.update(data)
return m.digest()
def xorStr(str1, str2):
return ''.join(chr(ord(x) ^ ord(y)) for x, y in zip(str1, str2))
def countKeys(keyRing):
# counts the public and private keys in a key ring (does not count subkeys)
msg = PGP_Message(keyRing)
return len([pkt for pkt in msg.iterPackets()
if pkt.tag in (PKT_SECRET_KEY, PKT_PUBLIC_KEY)])
def getFingerprints(keyRing):
# returns the fingerprints for all keys in a key ring file
msg = PGP_Message(keyRing)
return [ x.getKeyFingerprint() for x in msg.iterKeys() ]
def readSignature(fileobj):
"""
Read a signature packet from a stream.
@param fileobj: a stream to read the key from
@type fileobj: stream
@rtype: PGP_Signature
@return: the Signature packet
@raise InvalidPacketError: if the file object does not contain a valid
packet
"""
sio = util.ExtendedStringIO()
hasData = parseAsciiArmor(fileobj, sio)
if not hasData:
raise InvalidPacketError("No data found")
msg = PGP_Message(sio, start = 0)
# We can't really have a StopIteration here, if there was no packet in the
# message we would have failed in parseAsciiArmor
try:
pkt = msg.iterPackets().next()
except ShortReadError:
raise InvalidPacketError("Error reading signature packet")
if not isinstance(pkt, PGP_Signature):
raise InvalidPacketError("Not a signature packet")
return pkt
def parseAsciiArmor(asciiData, dest):
"""
Parse an armored (Radix-64 encoded) PGP message.
@param asciiData: the Radix-64 encoded PGP message
@type asciiData: string or stream
@param dest: a stream to deposit the message into
@type dest: stream
@return: True if data was decoded, False otherwise
@rtype: bool
@raise PGPError: if the CRC does not match the message
"""
if hasattr(asciiData, 'read'):
data = asciiData
else:
data = StringIO(asciiData)
crc = None
nextLine = data.read(1)
if nextLine and (ord(nextLine[0]) & 0x80):
# This is already a binary packet
dest.write(nextLine)
PGP_BasePacket._copyStream(data, dest)
return True
try:
while(nextLine[0] != '-'):
nextLine = data.readline()
while (nextLine[0] != "\r") and (nextLine[0] != "\n"):
nextLine = data.readline()
buf = ""
nextLine = data.readline()
while(nextLine[0] != '=' and nextLine[0] != '-'):
buf = buf + nextLine
nextLine = data.readline()
if nextLine[0] == '=':
# This is the CRC
crc = nextLine.strip()[1:]
except IndexError:
return False
try:
keyData = base64.b64decode(buf)
except TypeError:
return False
if crc:
crcobj = CRC24(keyData)
ccrc = crcobj.base64digest()
if crc != ccrc:
raise PGPError("Message does not verify CRC checksum", crc, ccrc)
dest.write(keyData)
return True
def parseAsciiArmorKey(asciiData):
"""
Parse an armored (Radix-64 encoded) PGP message.
@param asciiData: the Radix-64 encoded PGP message
@type asciiData: string or stream
@return: the unencoded PGP messsage, or None if the encoded message was
incorrect
@rtype: string or None
@raise PGPError: if the CRC does not match the message
"""
sio = util.ExtendedStringIO()
ret = parseAsciiArmor(asciiData, sio)
if not ret:
return None
sio.seek(0)
return sio.read()
class CRC24(object):
__slots__ = [ '_crc' ]
CRC24_INIT = 0xb704ce
CRC24_POLY = 0x1864cfb
def __init__(self, data=''):
self._crc = self.CRC24_INIT
self.update(data)
def update(self, data):
crc = self._crc
for ch in data:
crc ^= (ord(ch) << 16)
for i in range(8):
crc <<= 1
if crc & 0x1000000:
crc ^= self.CRC24_POLY
self._crc = crc
def digest(self):
r = self._crc & 0xffffff
return chr((r >> 16) & 0xff) + chr((r >> 8) & 0xff) + chr(r & 0xff)
def base64digest(self):
return base64.b64encode(self.digest())
def _crc24(stream):
if isinstance(stream, str):
stream = StringIO(stream)
crc = CRC24()
while 1:
buf = stream.read(8192)
if not buf:
break
crc.update(buf)
return crc
def crc24(stream):
return _crc24(stream).digest()
def crc24base64(stream):
return _crc24(stream).base64digest()
class PacketTypeDispatcher(object):
_registry = {}
@classmethod
def addPacketType(cls, klass):
cls._registry[klass.tag] = klass
@classmethod
def getClass(cls, tag):
return cls._registry.get(tag, PGP_Packet)
class PGP_Message(object):
__slots__ = ['_f', 'pos']
PacketDispatcherClass = PacketTypeDispatcher
def __init__(self, message, start = -1):
self._f = self._openStream(message)
self.pos = start
@classmethod
def _openStream(cls, message):
if hasattr(message, "pread"):
return message
if isinstance(message, str):
# Assume a path
return util.ExtendedFile(message, buffering = False)
# Be tolerant, accept non-Extended objects
if hasattr(message, 'fileno') and not hasattr(message, "pread"):
# Try to reopen as an ExtendedFile. We have to dup the file
# descriptor, otherwise it gets closed unexpectedly when the
# original message object gets out of scope
f = util.ExtendedFdopen(os.dup(message.fileno()))
f.seek(message.tell())
return f
raise MalformedKeyRing("Not an ExtendedFile object")
def _getPacket(self):
pkt = self.newPacketFromStream(self._f, start = self.pos)
return pkt
def iterPackets(self):
pkt = self._getPacket()
while 1:
if pkt is None:
break
yield pkt
pkt = pkt.next()
def iterTrustPackets(self):
"""Iterate over all trust packets"""
for pkt in self.iterPackets():
if isinstance(pkt, PGP_Trust):
yield pkt
def iterKeys(self):
"""Iterate over all keys"""
for pkt in self.iterMainKeys():
yield pkt
for subkey in pkt.iterSubKeys():
yield subkey
def iterMainKeys(self):
"""Iterate over main keys"""
for pkt in self.iterPackets():
if isinstance(pkt, PGP_MainKey):
try:
pkt.initSubPackets()
except InvalidBodyError:
# Skip this key
continue
yield pkt
def iterByKeyId(self, keyId):
"""Iterate over the keys with this key ID"""
for pkt in self.iterKeys():
if pkt.hasKeyId(keyId):
yield pkt
def getKeyByKeyId(self, keyId):
try:
return self.iterByKeyId(keyId).next()
except StopIteration:
raise KeyNotFound(keyId)
def seekParentKey(self, keyId):
"""Get a parent key with this keyId or with a subkey with this
keyId"""
for pkt in self.iterKeys():
if isinstance(pkt, PGP_MainKey):
if pkt.getKeyFingerprint().endswith(keyId.upper()):
# This is a main key and it has the keyId we need
return pkt
elif isinstance(pkt, PGP_SubKey):
if pkt.getKeyFingerprint().endswith(keyId.upper()):
# This is a subkey, return the main key
return pkt.getMainKey()
@classmethod
def newPacketFromStream(cls, stream, start = -1):
stream = cls._openStream(stream)
return PGP_PacketFromStream(cls).read(stream, start = start)
@classmethod
def newPacket(cls, tag, bodyStream, newStyle = False, minHeaderLen = 2):
"""Create a new Packet"""
typeDispatcher = cls.PacketDispatcherClass
klass = typeDispatcher.getClass(tag)
pkt = klass(bodyStream, newStyle = newStyle, minHeaderLen = minHeaderLen)
if not hasattr(pkt, 'tag'): # No special class for this packet
pkt.setTag(tag)
pkt._msgClass = cls
return pkt
class PGP_PacketFromStream(object):
__slots__ = ['_f', 'tag', 'headerLength', 'bodyLength', '_msgClass']
def __init__(self, msgClass):
self.tag = None
self.headerLength = self.bodyLength = 0
self._f = None
self._msgClass = msgClass
def read(self, fileobj, start = -1):
"""Create packet from stream
Return a PGP_Packet instance"""
self._f = util.SeekableNestedFile(fileobj, 1, start)
first = self._f.read(1)
if not first:
# No more packets to read from this file object
return
first = ord(first)
if not (first & 0x80):
raise InvalidPacketError("First bit not 1")
if first & 0x40:
newStyle = True
self._newHeader(first)
else:
newStyle = False
self._oldHeader(first)
if self.bodyLength is None:
# Indeterminate length; assume end of file
fileobj.seek(0, 2)
self.bodyLength = fileobj.tell() - (self._f.start + self.headerLength)
_bodyStream = util.SeekableNestedFile(self._f.file,
self.bodyLength, self._f.start + self.headerLength)
if self.bodyLength:
# Read one octet from the end
data = _bodyStream.pread(1, self.bodyLength - 1)
if not data:
raise ShortReadError(self.bodyLength, -1)
_bodyStream.seek(0)
nextStreamPos = self._f.start + self.headerLength + self.bodyLength
pkt = self._msgClass.newPacket(self.tag, _bodyStream,
newStyle = newStyle,
minHeaderLen = self.headerLength)
pkt.setNextStream(fileobj, nextStreamPos)
return pkt
def _oldHeader(self, first):
self.tag = (first & 0x3C) >> 2
lengthType = first & 0x03
if lengthType in (0, 1, 2):
headerLength = lengthType + 2
if lengthType == 2:
headerLength += 1
blLen = headerLength - 1
# Extend file
self._f.__init__(self._f.file, headerLength, self._f.start)
self._f.seek(1)
else:
if self.tag != PKT_COMPRESSED_DATA:
raise NotImplementedError("Indeterminate length not supported")
self.headerLength = 1
self.bodyLength = None
return
self.headerLength = headerLength
bbytes = PGP_BasePacket._readBin(self._f, blLen)
bodyLength = 0
for i in bbytes:
bodyLength <<= 8
bodyLength += i
self.bodyLength = bodyLength
def _newHeader(self, first):
# New style
self.tag = (first & 0x3F)
# Extend by one more byte
self._f.__init__(self._f.file, 2, self._f.start)
self._f.seek(1)
body1, = PGP_BasePacket._readBin(self._f, 1)
if body1 & 0xC0 == 0:
# 4.2.2.1. One-Octet Lengths (less than 192)
self.headerLength = 2
self.bodyLength = body1
return
if 192 <= body1 < 223:
# 4.2.2.2. Two-Octet Lengths (between 192 and 223):
self.headerLength = 3
self._f.__init__(self._f.file, self.headerLength, self._f.start)
self._f.seek(2)
body2, = PGP_BasePacket._readBin(self._f, 1)
self.bodyLength = len2bytes(body1, body2)
return
if body1 == 0xFF:
# 4.2.2.3. Five-Octet Lengths (exactly 255)
self.headerLength = 6
self._f.__init__(self._f.file, self.headerLength, self._f.start)
self._f.seek(2)
rest = PGP_BasePacket._readBin(self._f, 4)
self.bodyLength = int4FromBytes(*rest)
return
# 4.2.2.4. Partial Body Lengths
partialBodyLength = 1 << (body1 & 0x1F)
raise NotImplementedError("Patial body lengths not implemented")
class PGP_BasePacket(object):
__slots__ = ['_bodyStream', 'headerLength', 'bodyLength',
'_newStyle', '_nextStream', '_nextStreamPos',
'_parentPacket', '_msgClass']
tag = None
BUFFER_SIZE = 16384
_parentPacketTypes = set()
def __init__(self, bodyStream, newStyle = False, minHeaderLen = 2):
assert hasattr(bodyStream, 'pread')
self._newStyle = newStyle
self._bodyStream = bodyStream
self.bodyLength = self._getBodyLength()
self.headerLength = self._getHeaderLength(minHeaderLen = minHeaderLen)
# Keep a reference to the next stream we link to
self._nextStream = None
self._nextStreamPos = 0
self._parentPacket = None
self.setUp()
self.initialize()
def setNextStream(self, stream, pos):
if stream:
assert hasattr(stream, 'pread')
self._nextStream = stream
self._nextStreamPos = pos
def setParentPacket(self, pkt, clone = True):
"""Add a parent packet to this packet"""
if pkt is None:
self._parentPacket = None
return
assert pkt.tag in self._parentPacketTypes
if clone:
self._parentPacket = pkt.clone()
else:
self._parentPacket = pkt
def getParentPacket(self):
return self._parentPacket
def clone(self):
"""Produce another packet identical with this one"""
# Create new body stream sharing the same file
newBodyStream = util.SeekableNestedFile(self._bodyStream.file,
self._bodyStream.size, self._bodyStream.start)
newPkt = self._msgClass.newPacket(self.tag, newBodyStream,
newStyle = self._newStyle, minHeaderLen = self.headerLength)
newPkt.setNextStream(self._nextStream, self._nextStreamPos)
newPkt.setParentPacket(self.getParentPacket(), clone = False)
return newPkt
def setUp(self):
"""setUp is executed at object creation time."""
pass
def initialize(self):
"""initialize is executed at object creation time, after setUp(),
and generally everywhere the state has to be reset.
To be overridden by various subclasses"""
pass
def _getHeaderLength(self, minHeaderLen = 2):
# bsrepr is the body size representation
if self._newStyle:
# For new style, we can't really force the minimum header length
if self.bodyLength < 192:
return 2
if 192 <= self.bodyLength < 8384:
return 3
return 6
if minHeaderLen > 3 or self.bodyLength > 65535:
# 4-byte packet length field
bsrepr = 4
elif minHeaderLen > 2 or self.bodyLength > 255:
# 2-byte packet length field
bsrepr = 2
else:
# 1 byte packet-length field
bsrepr = 1
return bsrepr + 1
def _getBodyLength(self):
"""Determine the body length"""
pos = self._bodyStream.tell()
self._bodyStream.seek(0, SEEK_END)
blen = self._bodyStream.tell()
self._bodyStream.seek(pos, SEEK_SET)
return blen
def writeHeader(self, stream):
# Generate packet header
if self._newStyle:
return self._writeHeaderNewStyle(stream)
return self._writeHeaderOldStyle(stream)
def _writeHeaderNewStyle(self, stream):
# bit 7 is set, bit 6 is set (new packet format)
fbyte = 0xC0
# Add the tag.
fbyte |= self.tag
stream.write(chr(fbyte))
if self.headerLength == 6:
# 5-byte body length length, first byte is 255
stream.write(chr(255))
blen = self.bodyLength & 0xffffffff
self._writeBin(stream, len4ToBytes(blen))
return
if self.headerLength == 3:
# 2-byte body length length
if not (192 <= self.bodyLength < 8384):
raise InvalidPacketError("Invalid body length %s for "
"header length %s" % (self.bodyLength, self.headerLength))
self._writeBin(stream, len2ToBytes(self.bodyLength))
return
if self.headerLength == 2:
# 1-byte body length length
if not (self.bodyLength < 192):
raise InvalidPacketError("Invalid body length %s for "
"header length %s" % (self.bodyLength, self.headerLength))
stream.write(chr(self.bodyLength))
return
raise InvalidPacketError("Invalid header length %s" % self.headerLength)
def _writeHeaderOldStyle(self, stream):
# bit 7 is set, bit 6 is not set (old packet format)
fbyte = 0x80
# Add the tag, bits 5432. For old-style headers, they are represented
# on 4 bits only.
fbyte |= (0x0f & self.tag) << 2
# bsrepr is the body size representation
if self.headerLength == 5:
# 4-byte packet length field
fbyte |= 2
bsrepr = 4
elif self.headerLength == 3:
# 2-byte packet length field
fbyte |= 1
bsrepr = 2
else:
# 1 byte packet-length field (no changes to first byte needed)
bsrepr = 1
stream.write(chr(fbyte))
# prepare the size octets
for i in range(1, bsrepr + 1):
stream.write(chr((self.bodyLength >> ((bsrepr - i) << 3)) & 0xff))
def writeBody(self, stream):
self.resetBody()
self._copyStream(self._bodyStream, stream)
def write(self, stream):
self.writeHeader(stream)
self.writeBody(stream)
def writeAll(self, stream):
# Write this packet and all subpackets
self.write(stream)
for pkt in self.iterSubPackets():
pkt.write(stream)
def resetBody(self):
self._bodyStream.seek(0)
def readBody(self, bytes = -1):
"""Read bytes from stream"""
return self._bodyStream.pread(bytes, 0)
def seek(self, pos, whence = SEEK_SET):
return self._bodyStream.seek(pos, whence)
@staticmethod
def _readExact(stream, bytes):
"""Read bytes from stream, checking that enough bytes were read"""
data = stream.read(bytes)
if bytes > 0 and len(data) != bytes:
raise ShortReadError(bytes, len(data))
return data
@staticmethod
def _readBin(stream, bytes):
"""Read bytes from stream, checking that enough bytes were read.
Return a list of bytes"""
return [ ord(x) for x in PGP_BasePacket._readExact(stream, bytes) ]
def readExact(self, bytes):
"""Read bytes from stream, checking that enough bytes were read"""
return self._readExact(self._bodyStream, bytes)
def readBin(self, bytes):
"""Read bytes from stream, checking that enough bytes were read.
Return a list of bytes"""
return self._readBin(self._bodyStream, bytes)
@staticmethod
def _writeBin(stream, bytes):
"""Write the bytes in binary format"""
for b in bytes:
stream.write(chr(b))
@staticmethod
def _copyStream(src, dst):
"""Copy stream src into dst"""
while 1:
buf = src.read(PGP_BasePacket.BUFFER_SIZE)
if not buf:
break
dst.write(buf)
@staticmethod
def _updateHash(hashObj, stream):
"""Update the hash object with data from the stream"""
while 1:
buf = stream.read(PGP_BasePacket.BUFFER_SIZE)
if not buf:
break
hashObj.update(buf)
@staticmethod
def _updateHashBin(hashObj, binSeq):
"""
Update the hash object with binary octets from a sequence of octets
@param hashObj: a hash object
@param binSeq: a sequence of bytes
"""
for b in binSeq:
hashObj.update(chr(b))
@staticmethod
def checkStreamLength(stream, length):
"""Checks that the stream has exactly the length specified extra
bytes from the current position"""
pos = stream.tell()
stream.seek(0, SEEK_END)
if length != stream.tell() - pos:
raise ShortReadError(length, stream.tell() - pos)
# SeekableNestedFiles will happily pass the previous test, so be more
# devious: seek to the (end - 1), try to read one byte
# Determining the actual length is hard, but worth it
i = stream.tell() - 1
while i > pos:
stream.seek(i, SEEK_SET)
if len(stream.read(1)) == 1:
break
i -= 1
if length != stream.tell() - pos:
raise ShortReadError(length, stream.tell() - pos)
# Rewind
stream.seek(pos)
@staticmethod
def readTimestamp(stream):
"""Reads a timestamp from the stream"""
stream.seek(0)
PGP_BasePacket.checkStreamLength(stream, 4)
return len4bytes(*PGP_BasePacket._readBin(stream, 4))
def isEmpty(self):
return self.headerLength == 0
def next(self):
if self._nextStream is None:
raise StopIteration()
newPkt = self._msgClass.newPacketFromStream(self._nextStream,
self._nextStreamPos)
if newPkt is None:
raise StopIteration()
return newPkt
def getBodyStream(self):
return self._bodyStream
def _iterSubPackets(self, limitTags):
"""Iterate over the packets following this packet, until we reach a
packet of the specified type as the limit"""
pkt = self.next()
while not pkt.isEmpty() and pkt.tag not in limitTags:
yield pkt
pkt = pkt.next()
@staticmethod
def _hashSet(items):
"""Hashes the items in items through sha, and return a set of the
computed digests.
Each item is expected to be a stream"""
ret = set([])
for stream in items:
stream.seek(0)
hobj = digestlib.sha1()
PGP_BasePacket._updateHash(hobj, stream)
ret.add(hobj.digest())
return ret
class PGP_Packet(PGP_BasePacket):
"""Anonymous PGP packet"""
__slots__ = ['tag']
def setTag(self, tag):
self.tag = tag
class PGP_BaseKeySig(PGP_BasePacket):
"""Base class for keys and signatures"""
__slots__ = []
def _getMPICount(self, algType):
"""This returns the right number of MPIs for converting a private key
to a public key. Overwrite in subclasses for any other usage"""
if algType in PK_ALGO_ALL_RSA:
numMPI = 2
elif algType in PK_ALGO_ALL_ELGAMAL:
numMPI = 3
elif algType == PK_ALGO_DSA:
numMPI = 4
else:
# unhandled algorithm
raise UnsupportedEncryptionAlgorithm(algType)
return numMPI
def _readMPIs(self, stream, algType, discard = True):
"""Read the corresponding number of MPIs for the specified algorithm
type from the stream
@raise UnsupportedEncryptionAlgorithm:
"""
numMPI = self._getMPICount(algType)
return self._readCountMPIs(stream, numMPI, discard = discard)
@staticmethod
def _readCountMPIs(stream, count, discard = True):
"""Read count MPIs from the current position in stream.
@raise UnsupportedEncryptionAlgorithm:
"""
ret = []
for i in range(count):
buf = PGP_BaseKeySig._readBin(stream, 2)
mLen = (int2FromBytes(*buf) + 7) // 8
if discard:
# Skip the MPI len
PGP_BaseKeySig._readExact(stream, mLen)
ret.append(None)
else:
data = PGP_BaseKeySig._readBin(stream, mLen)
r = 0L
for i in data:
r = r * 256 + i
ret.append(r)
return ret
@staticmethod
def _writeMPI(stream, mpi):
bytes = []
while mpi != 0:
bytes.append(mpi & 0xFF)
mpi >>= 8
# Compute length in bits
if not bytes:
# Zero length
bitlen = 0
else:
# The only variable part can be the one in the most significant
# octet, which is the last
bitlen = 8 * (len(bytes) - 1) + num_bitLen(bytes[-1])
PGP_BaseKeySig._writeBin(stream, int2ToBytes(bitlen))
PGP_BaseKeySig._writeBin(stream, reversed(bytes))
def skipMPIs(self, stream, algType):
self._readMPIs(stream, algType, discard = True)
def readMPIs(self, stream, algType):
return self._readMPIs(stream, algType, discard = False)
class PGP_Signature(PGP_BaseKeySig):
__slots__ = ['version', 'sigType', 'pubKeyAlg', 'hashAlg', 'hashSig',
'mpiFile', 'signerKeyId', 'hashedFile', 'unhashedFile',
'creation', '_parsed', '_sigDigest', '_parentPacket',
'_hashedSubPackets', '_unhashedSubPackets', '_verifies',]
tag = PKT_SIG
_parentPacketTypes = set(PKT_ALL_KEYS).union(PKT_ALL_USER)
HashAlgList = {
1: digestlib.md5,
2: digestlib.sha1,
8: digestlib.sha256,
9: digestlib.sha384,
10: digestlib.sha512,
11: digestlib.sha224,
}
# hashPads from RFC2440 section 5.2.2
HashAlgPads = {
1: "\x30\x20\x30\x0C\x06\x08\x2A\x86"
"\x48\x86\xF7\x0D\x02\x05\x05\x00"
"\x04\x10",
2: "\x30\x21\x30\x09\x06\x05\x2b\x0E"
"\x03\x02\x1A\x05\x00\x04\x14",
8: "\x30\x31\x30\x0d\x06\x09\x60\x86"
"\x48\x01\x65\x03\x04\x02\x01\x05"
"\x00\x04\x20",
9: "\x30\x41\x30\x0d\x06\x09\x60\x86"
"\x48\x01\x65\x03\x04\x02\x02\x05"
"\x00\x04\x30",
10: "\x30\x51\x30\x0d\x06\x09\x60\x86"
"\x48\x01\x65\x03\x04\x02\x03\x05"
"\x00\x04\x40",
11: "\x30\x31\x30\x0d\x06\x09\x60\x86"
"\x48\x01\x65\x03\x04\x02\x04\x05"
"\x00\x04\x1C",
}
def initialize(self):
self.version = self.sigType = self.pubKeyAlg = self.hashAlg = None
self.hashSig = self.mpiFile = self.signerKeyId = None
self.hashedFile = self.unhashedFile = None
self.creation = None
self._parsed = False
self._sigDigest = None
self._hashedSubPackets = None
self._unhashedSubPackets = None
self._verifies = None
def parse(self, force = False):
"""Parse the signature body and initializes the internal data
structures for other operations"""
if self._parsed and not force:
return
self.resetBody()
# Reset all internal state
self.initialize()
sigVersion, = self.readBin(1)
if sigVersion not in [2, 3, 4]:
raise InvalidBodyError("Invalid signature version %s" % sigVersion)
self.version = sigVersion
# Version 2 signatures are documented in RFC1991, and are identical to
# version 3 signatures
if sigVersion in [2, 3]:
self._readSigV3()
else:
self._readSigV4()
self._parsed = True
def _getMPICount(self, algType):
if algType in PK_ALGO_ALL_RSA:
numMPI = 1
elif algType in PK_ALGO_ALL_ELGAMAL:
numMPI = 2
elif algType == PK_ALGO_DSA:
numMPI = 2
else:
# unhandled algorithm
raise UnsupportedEncryptionAlgorithm(algType)
return numMPI
def parseMPIs(self):
self.parse()
assert hasattr(self, 'mpiFile') and self.mpiFile is not None
self.mpiFile.seek(0)
return self.readMPIs(self.mpiFile, self.pubKeyAlg)
def _readSigV3(self):
hLen, sigType = self.readBin(2)
if hLen != 5:
raise PGPError('Expected 5 octets of length of hashed material, '
'got %d' % hLen)
self.creation = self.readBin(4)
self.signerKeyId = self.readBin(8)
pkAlg, hashAlg = self.readBin(2)
hashSig = self.readExact(2)
# MPI data
mpiFile = util.SeekableNestedFile(self._bodyStream,
self.bodyLength - self._bodyStream.tell())
self.sigType = sigType
self.pubKeyAlg = pkAlg
self.hashAlg = hashAlg
self.hashSig = hashSig
self.mpiFile = mpiFile
def _readSigV4(self):
sigType, pkAlg, hashAlg = self.readBin(3)
# Hashed subpacket data length
arr = self.readBin(2)
hSubPktLen = (arr[0] << 8) + arr[1]
hSubpktsFile = util.SeekableNestedFile(self._bodyStream, hSubPktLen)
# Skip over the packets, we've decoded them already
self.seek(hSubPktLen, SEEK_CUR)
# Unhashed subpacket data length
arr = self.readBin(2)
uSubPktLen = (arr[0] << 8) + arr[1]
uSubpktsFile = util.SeekableNestedFile(self._bodyStream, uSubPktLen)
# Skip over the packets, we've decoded them already
self.seek(uSubPktLen, SEEK_CUR)
# Two-octet field holding left 16 bits of signed hash value.
hashSig = self.readExact(2)
# MPI data
mpiFile = util.SeekableNestedFile(self._bodyStream,
self.bodyLength - self._bodyStream.tell())
self.sigType = sigType
self.pubKeyAlg = pkAlg
self.hashAlg = hashAlg
self.mpiFile = mpiFile
self.hashSig = hashSig
self.hashedFile = hSubpktsFile
self.unhashedFile = uSubpktsFile
def _writeSigV4(self):
self.parse()
stream = util.ExtendedStringIO()
self.hashedFile.seek(0, SEEK_END)
self.unhashedFile.seek(0, SEEK_END)
unhashedLen = self.unhashedFile.tell()
self._writeBin(stream, [4, self.sigType, self.pubKeyAlg, self.hashAlg])
for sstream in [ self.hashedFile, self.unhashedFile ]:
# Determine length
sstream.seek(0, SEEK_END)
slen = sstream.tell()
# subpackets data length
self._writeBin(stream, int2ToBytes(slen))
# And the stream itself
sstream.seek(0)
self._copyStream(sstream, stream)
# 2-octet hash sig
stream.write(self.hashSig)
# MPI file
self.mpiFile.seek(0)
self._copyStream(self.mpiFile, stream)
return stream
def _getSubpacketInt4(self, subpacketTypes):
stream = self._getSubpacketStream(subpacketTypes)
if stream is None:
return None
return int4FromBytes(*self._readBin(stream, 4))
def _getSubpacketStream(self, subpacketTypes):
pkts = [ x[1] for x in self.decodeHashedSubpackets()
if x[0] in subpacketTypes ]
if not pkts:
return None
pkt = pkts[0]
pkt.seek(0, SEEK_SET)
return pkt
def getCreation(self):
"""Return the signature creation timestamp, or 0 if no creation time
is available"""
if self.creation is not None:
return self.creation
creation = self._getSubpacketInt4([SIG_SUBPKT_CREATION])
if creation is None:
creation = 0
self.creation = creation
return self.creation
def getExpiration(self):
"""Return the expiration offset, or None if the signature does not
expire"""
return self._getSubpacketInt4([SIG_SUBPKT_SIG_EXPIRE])
def getKeyExpiration(self):
"""Return the key expiration offset, or None if the signature does not
contain one"""
return self._getSubpacketInt4([SIG_SUBPKT_KEY_EXPIRE])
def getTrust(self):
"""Return the trust level, the trust amount and the trust regex for
this signature"""
spktTypes = set([SIG_SUBPKT_TRUST, 0x80 | SIG_SUBPKT_TRUST])
stream = self._getSubpacketStream(spktTypes)
if stream is None:
return None, None, None
tlevel, tamt = self._readBin(stream, 2)
# Look for a trust regex
# critical packets are ANDed with 0x80
spktTypes = set([SIG_SUBPKT_REGEX, 0x80 | SIG_SUBPKT_REGEX])
stream = self._getSubpacketStream(spktTypes)
if stream is None:
return tlevel, tamt, None
# Trust packet is NULL-terminated
tregex = stream.read()[:-1]
return tlevel, tamt, tregex
def rewriteBody(self):
"""Re-writes the body after the signature has been modified"""
if not (isinstance(self.unhashedFile, util.ExtendedStringIO) or
isinstance(self.hashedFile, util.ExtendedStringIO)):
# Not changed
return
# Re-write ourselves
bodyStream = self._writeSigV4()
ns, nsp = self._nextStream, self._nextStreamPos
parentPkt = self._parentPacket
self.__init__(bodyStream, newStyle = self._newStyle)
self.setNextStream(ns, nsp)
self.setParentPacket(parentPkt)
self.initialize()
def getSigId(self):
"""Get the key ID of the issuer for this signature.
Return None if the packet did not contain an issuer key ID"""
self.parse()
if self.version in [2, 3]:
assert self.signerKeyId is not None
return binSeqToString(self.signerKeyId)
# Version 3 packets should have already set signerKeyId
assert self.version == 4
for spktType, dataf in self.decodeUnhashedSubpackets():
if spktType != SIG_SUBPKT_ISSUER_KEYID:
continue
# Verify it only contains 8 bytes
dataf.seek(0, SEEK_SET)
try:
self.checkStreamLength(dataf, 8)
except ShortReadError, e:
raise InvalidPacketError("Expected %s bytes, got %s instead" %
(e.expected, e.actual))
self.signerKeyId = self._readBin(dataf, 8)
return binSeqToString(self.signerKeyId)
def getSignerKeyId(self):
return stringToAscii(self.getSigId())
def decodeHashedSubpackets(self):
self.parse()
if self._hashedSubPackets is None:
self._hashedSubPackets = list(self._decodeSigSubpackets(self.hashedFile))
return self._hashedSubPackets
def decodeUnhashedSubpackets(self):
self.parse()
if self._unhashedSubPackets is None:
self._unhashedSubPackets = list(self._decodeSigSubpackets(self.unhashedFile))
return self._unhashedSubPackets
@staticmethod
def _decodeSigSubpackets(fobj):
fobj.seek(0, SEEK_END)
ocount = fobj.tell()
fobj.seek(0)
while fobj.tell() < ocount:
yield PGP_Signature._getNextSubpacket(fobj)
@staticmethod
def _getNextSubpacket(fobj):
len0, = PGP_BaseKeySig._readBin(fobj, 1)
if len0 < 0xC0:
pktlenlen = 1
pktlen = len0
elif len0 == 0xFF:
pktlenlen = 5
data = PGP_BaseKeySig._readBin(fobj, 4)
pktlen = len4bytes(*data)
else:
pktlenlen = 2
len1, = PGP_BaseKeySig._readBin(fobj, 1)
pktlen = len2bytes(len0, len1)
spktType, = PGP_BaseKeySig._readBin(fobj, 1)
# The packet length includes the subpacket type
dataf = util.SeekableNestedFile(fobj, pktlen - 1)
# Do we have enough data?
try:
PGP_Signature.checkStreamLength(dataf, pktlen - 1)
except ShortReadError, e:
raise ShortReadError(pktlen + pktlenlen, e.actual + pktlenlen + 1)
dataf.seek(0, SEEK_SET)
# Skip the data
fobj.seek(pktlen - 1, SEEK_CUR)
return spktType, dataf
def _writeSigPacketsToStream(self):
self.parse()
sio = util.ExtendedStringIO()
parentPacket = self.getParentPacket()
# XXX we could probably rewrite this if/then/else
if isinstance(parentPacket, PGP_MainKey):
parentPacket.toPublicKey(minHeaderLen = 3).write(sio)
elif isinstance(parentPacket, (PGP_SubKey, PGP_UserID)):
pkpkt = parentPacket.getParentPacket().toPublicKey(minHeaderLen = 3)
pkpkt.write(sio)
if isinstance(parentPacket, PGP_UserID):
parentPacket.writeHash(sio, keyVersion = self.version)
else:
parentPacket.toPublicKey(minHeaderLen = 3).write(sio)
else:
raise InvalidPacketError("Unexpected parent", self._parentPacket)
return sio
def resetSignatureHash(self):
self._sigDigest = None
self._verifies = None
def getSignatureHash(self):
"""Compute the signature digest"""
if self._sigDigest is not None:
return self._sigDigest
sio = self._writeSigPacketsToStream()
self._sigDigest = self._computeSignatureHash(sio)
return self._sigDigest
def getDocumentHash(self, stream):
"""
Compute the hash over the supplied document.
@param stream: stream containing the document to be hashed
@type stream: file
@rtype: str
@return: the hash of the supplied document
"""
self.parse()
if self.sigType != SIG_TYPE_BINARY_DOC:
raise PGPError("Non-binary documents not supported")
digest = self._computeSignatureHash(stream)
return digest
def getShortSigHash(self):
"""Return the 16-leftmost bits for the signature hash"""
self.parse()
return self.hashSig
def setShortSigHash(self, val):
"""Set the 16-leftmost bits"""
assert(len(val) == 2)
self.hashSig = val
def merge(self, other):
"""Merge this signature with the other signature.
Returns True if it modified the current packet"""
assert self.tag == other.tag
# The signed part of the signature is immutable, there is no way we
# can merge it. The only things we might be able to merge are the
# unhashed signature subpackets
# However, gpg does not do that, so we will not do that either
if self.hashSig != other.hashSig:
raise MergeError("Signature packets with different hash")
if self.getSignatureHash() != other.getSignatureHash():
raise MergeError("Signature packets with different hash")
# Not much more to do here
return False
def _prepareSubpackets(self):
# XXX this is most likely going to change
if self._unhashedSubPackets is not None:
stream = util.ExtendedStringIO()
for spktType, spktStream in self._unhashedSubPackets:
self._writeSubpacket(stream, spktType, spktStream)
self.unhashedFile = stream
if self._hashedSubPackets is not None:
stream = util.ExtendedStringIO()
for spktType, spktStream in self._hashedSubPackets:
self._writeSubpacket(stream, spktType, spktStream)
self.hashedFile = stream
self._parsed = True
@staticmethod
def _writeSubpacket(stream, spktType, spktStream):
"""Write the subpacket into the stream"""
# First, determine the subpacket length
spktStream.seek(0, SEEK_END)
spktLen = spktStream.tell()
spktStream.seek(0, SEEK_SET)
# The subpacket length includes the type octet
spktLen += 1
header = []
if spktLen < 192:
# 1-octet length
header.append(spktLen)
elif spktLen < 16320:
# 2-octet length
header.extend(len2ToBytes(spktLen))
else:
# 5-octet length
header.append(255)
header.extend(len4ToBytes(spktLen))
for d in header:
stream.write(chr(d))
# Type
stream.write(chr(spktType))
PGP_Signature._copyStream(spktStream, stream)
def _completeHashV3(self, hashObj):
self._updateHashBin(hashObj, [self.sigType])
self._updateHashBin(hashObj, self.creation)
def _completeHashV4(self, hashObj):
# (re)compute the hashed packet subpacket data length
self.hashedFile.seek(0, SEEK_END)
hSubPktLen = self.hashedFile.tell()
self.hashedFile.seek(0, SEEK_SET)
# Write signature version, sig type, pub alg, hash alg
self._updateHashBin(hashObj, [ self.version, self.sigType,
self.pubKeyAlg, self.hashAlg ])
# Write hashed data length
self._updateHashBin(hashObj, int2ToBytes(hSubPktLen))
# Write the hashed data
self._updateHash(hashObj, self.hashedFile)
# We've added 6 bytes for the header
dataLen = hSubPktLen + 6
# Append trailer - 6-byte trailer
self._updateHashBin(hashObj, [ 0x04, 0xFF,
(dataLen // 0x1000000) & 0xFF, (dataLen // 0x10000) & 0xFF,
(dataLen // 0x100) & 0xFF, dataLen & 0xFF ])
def _computeSignatureHash(self, dataFile):
"""Compute the signature digest for this signature, using the
key serialized in dataFile"""
self.parse()
hashFunc = self.HashAlgList.get(self.hashAlg)
if hashFunc is None:
raise UnsupportedHashAlgorithm(self.hashAlg)
hashObj = hashFunc()
dataFile.seek(0, SEEK_SET)
self._updateHash(hashObj, dataFile)
if self.version == 3:
self._completeHashV3(hashObj)
elif self.version == 4:
self._completeHashV4(hashObj)
else:
raise InvalidKey("Signature is not a V3 or V4 signature")
sigDigest = hashObj.digest()
return sigDigest
@staticmethod
def finalizeSignature(sigString, cryptoKey, pubKeyAlg, hashAlg):
# if this is an RSA signature, it needs to properly padded
# RFC 2440 5.2.2 and RFC 2313 10.1.2
if pubKeyAlg in PK_ALGO_ALL_RSA:
hashPad = PGP_Signature.HashAlgPads[hashAlg]
padLen = (len(hex(cryptoKey.n)) - 5 -
2 * (len(sigString) + len(hashPad) + 1)) // 2 - 1
sigString = chr(1) + chr(0xFF) * padLen + chr(0) + hashPad + sigString
return sigString
def verify(self, cryptoKey, keyId):
"""Verify the signature as generated with cryptoKey"""
# Compute the signature digest
sigString = self.getSignatureHash()
# Validate it against the short digest
if sigString[:2] != self.hashSig:
raise BadSelfSignature(keyId)
digSig = self.parseMPIs()
if not self.verifySignature(sigString, cryptoKey, digSig,
self.pubKeyAlg, self.hashAlg):
self.setVerifies(False)
raise BadSelfSignature(keyId)
self.setVerifies(True)
def verifyDocument(self, cryptoKey, stream):
"""
Verify the signature on the supplied document stream
"""
digest = self.getDocumentHash(stream)
keyId = self.getSignerKeyId()
# Per FIPS-180-3 DSA keys require the following digest truncation
# A succinct description http://tools.ietf.org/html/rfc4880#section-13.6
if self.pubKeyAlg == PK_ALGO_DSA:
# get q bit length
qLen = int(math.ceil(math.log(cryptoKey.q, 2)))
# 384 is not required by the standard, but we add it since
# our testsuite has this case
assert qLen in [ 160, 224, 256, 384 ]
if int(math.ceil(math.log(pubkey.bytes_to_long(digest), 2))) > qLen:
digest = digest[:(qLen/8)]
# Validate it against the short digest
if digest[:2] != self.hashSig:
raise SignatureError(keyId)
digSig = self.parseMPIs()
if not self.verifySignature(digest, cryptoKey, digSig,
self.pubKeyAlg, self.hashAlg):
self.setVerifies(False)
raise SignatureError(keyId)
self.setVerifies(True)
@staticmethod
def verifySignature(sigString, cryptoKey, signature, pubKeyAlg, hashAlg):
"""Verify the signature on sigString generated with cryptoKey"""
sigString = PGP_Signature.finalizeSignature(sigString, cryptoKey,
pubKeyAlg, hashAlg)
return cryptoKey.verify(sigString, signature)
def initSubPackets(self):
self._hashedSubPackets = []
self._unhashedSubPackets = []
# Handling signature generation
def addTrust(self, level, amount, regexLimit = None):
"""Mark this signature packet as being a trust signature"""
stream = util.ExtendedStringIO()
stream.write(chr(level))
stream.write(chr(amount))
self._hashedSubPackets.append((SIG_SUBPKT_TRUST, stream))
if regexLimit:
stream = util.ExtendedStringIO()
stream.write(regexLimit)
stream.write('\x00')
# Mark this packet as critical
self._hashedSubPackets.append((0x80 | SIG_SUBPKT_REGEX, stream))
def addIssuerKeyId(self, keyId):
stream = util.ExtendedStringIO()
stream.write(fingerprintToInternalKeyId(keyId))
# The key ID is part of the unhashed data
self._unhashedSubPackets.append((SIG_SUBPKT_ISSUER_KEYID, stream))
def addCreation(self, timestamp = None):
"""Add a creation timestamp sub-packet"""
if timestamp is None:
timestamp = time.time()
self._hashedSubPackets.append((SIG_SUBPKT_CREATION,
self._addInt4(timestamp)))
def addExpiration(self, seconds):
"""Add an expiration sub-packet"""
self._hashedSubPackets.append((SIG_SUBPKT_SIG_EXPIRE,
self._addInt4(seconds)))
def _addInt4(self, int4):
int4 = int(int4)
stream = util.ExtendedStringIO()
self._writeBin(stream, int4ToBytes(int4))
return stream
def setVerifies(self, flag=True):
self._verifies = flag
def getVerifies(self):
return self._verifies
PacketTypeDispatcher.addPacketType(PGP_Signature)
class PGP_UserID(PGP_BasePacket):
__slots__ = ['id', 'signatures', '_parentPacket']
tag = PKT_USERID
_parentPacketTypes = set(PKT_MAIN_KEYS)
# Constant used for signing. See #5.2.4
signingConstant = 0xB4
def initialize(self):
self.resetBody()
self.parseBody()
# Signatures for this user ID
self.signatures = None
self._parentPacket = None
def parseBody(self):
# A user ID's data is just the user ID
self.id = self.readBody()
def toString(self):
return self.id
def addSignatures(self, signatures):
"""Add signatures to this UserID"""
if self.signatures is None:
self.signatures = []
for sig in signatures:
assert isinstance(sig, PGP_Signature)
# No circular reference here, setParentPacket does a clone
sig.setParentPacket(self)
self.signatures.append(sig)
def adoptSignature(self, sig):
"""Adopt the signature, if it's not ours already"""
pp = sig.getParentPacket()
if isinstance(pp, self.__class__) and self.id == pp.id:
return
sig.resetSignatureHash()
sig.setParentPacket(self)
def iterSignatures(self):
"""Iterate over this user's UserID"""
if self.signatures is not None:
return iter(self.signatures)
raise PGPError("Key packet not parsed")
iterSubPackets = iterSignatures
def iterKeySignatures(self, keyId):
intKeyId = fingerprintToInternalKeyId(keyId)
# Look for a signature by this key
for pkt in self.iterSignatures():
if intKeyId != pkt.getSigId():
continue
yield pkt
def iterCertifications(self):
for pkt in self.iterSignatures():
pkt.parse()
if pkt.sigType not in SIG_CERTS:
continue
yield pkt
def writeHash(self, stream, keyVersion):
"""Write a UserID packet in a stream, in order to be hashed.
Described in RFC 4880 5.2.4 computing signatures."""
if keyVersion == 4:
stream.write(chr(self.signingConstant))
stream.write(struct.pack("!I", self.bodyLength))
self.writeBody(stream)
def merge(self, other):
"""Merges this UserID packet to the other one.
Returns True if it changed the current packet"""
assert self.tag == other.tag
if self.id != other.id:
raise MergeError("User packets with different identifier")
finalsigs = _mergeSignatures(self.iterSignatures(),
other.iterSignatures())
if self.signatures == finalsigs:
return False
self.signatures = finalsigs
return True
def getExpiration(self):
"""Return the key expiration offset, or None if the key does not
expire.
If the key is revoked, -1 is returned"""
# Iterate over all self signatures
key = self.getParentPacket()
selfSigs = [ x for x in self.iterKeySignatures(key.getKeyId()) ]
if not selfSigs:
raise PGPError("User packet with no self signature")
revocs = []
certs = []
for sig in selfSigs:
sig.parse()
if sig.sigType == SIG_TYPE_CERT_REVOC:
revocs.append(sig)
elif sig.sigType in SIG_CERTS:
certs.append(sig)
# If we have a revocation, return a negative
if revocs:
return -1
# Sort signatures by creation time, and reverse them
certs.sort(key = lambda x: x.getCreation(), reverse = True)
# Walk the signatures, grab the first one that has a key expiration in
# it
for sig in certs:
exps = [ x[1] for x in sig.decodeHashedSubpackets()
if x[0] == SIG_SUBPKT_KEY_EXPIRE ]
if not exps:
continue
expstr = exps[0]
expstr.seek(0, SEEK_SET)
return int4FromBytes(*self._readBin(expstr, 4))
# No expiration
return None
PacketTypeDispatcher.addPacketType(PGP_UserID)
class PGP_UserAttribute(PGP_UserID):
__slots__ = ['id', 'signatures', 'subpackets']
tag = PKT_USER_ATTRIBUTE
signingConstant = 0xD1
def parseBody(self):
# Digest the packet
m = digestlib.sha1()
self._updateHash(m, self.getBodyStream())
self.id = '[image, digest = %s]' % m.hexdigest().upper()
PacketTypeDispatcher.addPacketType(PGP_UserAttribute)
class PGP_Key(PGP_BaseKeySig):
__slots__ = ['_parsed', 'version', 'createdTimestamp', 'pubKeyAlg',
'mpiFile', 'mpiLen', 'daysValid', '_keyId']
# Base class for public/secret keys/subkeys
tag = None
def initialize(self):
self.version = self.createdTimestamp = self.pubKeyAlg = None
self.mpiFile = self.mpiLen = None
self.daysValid = None
# Cache
self._keyId = None
self._parsed = False
def parse(self, force = False):
"""Parse the signature body and initializes the internal data
structures for other operations"""
if self._parsed and not force:
return
self.resetBody()
# Reset all internal state
self.initialize()
keyVersion, = self.readBin(1)
if keyVersion not in [3, 4]:
raise InvalidBodyError("Invalid key version %s" % keyVersion)
self.version = keyVersion
if keyVersion == 3:
self._readKeyV3()
else:
self._readKeyV4()
self._parsed = True
def _readKeyV3(self):
# RFC 2440, sect. 5.5.2
# We only support V4 keys
self.createdTimestamp = len4bytes(*self._readBin(self._bodyStream, 4))
## daysValid
data = self.readBin(2)
self.daysValid = int2FromBytes(*data)
## Public key algorithm
self.pubKeyAlg, = self.readBin(1)
# Record current position in body
mpiStart = self._bodyStream.tell()
## Read and discard 2 MPIs
self.skipMPIs(self._bodyStream, self.pubKeyAlg)
self.mpiLen = self._bodyStream.tell() - mpiStart
self.mpiFile = util.SeekableNestedFile(self._bodyStream, self.mpiLen,
start = mpiStart)
def _readKeyV4(self):
# RFC 2440, sect. 5.5.2
# Key creation
self.createdTimestamp = len4bytes(*self._readBin(self._bodyStream, 4))
# Public key algorithm
self.pubKeyAlg, = self.readBin(1)
# Record current position in body
mpiStart = self._bodyStream.tell()
# Skip over the MPIs
self.skipMPIs(self._bodyStream, self.pubKeyAlg)
self.mpiLen = self._bodyStream.tell() - mpiStart
self.mpiFile = util.SeekableNestedFile(self._bodyStream, self.mpiLen,
start = mpiStart)
def rewrite(self, stream):
"""
Rewrite this key to a different stream, usually after it was
modified (otherwise write is more efficient)"""
self._rewritePacket()
self.write(stream)
def rewriteAll(self, stream):
"""
Rewrite this key to a different stream, usually after it was
modified (otherwise write is more efficient)"""
self._rewritePacket()
self.writeAll(stream)
def _rewritePacket(self):
bodyStream = util.ExtendedStringIO()
self.rewriteBody(bodyStream)
self._bodyStream = bodyStream
self.bodyLength = self._getBodyLength()
# We invalidated this packet
self._parsed = False
def rewriteBody(self, stream):
# We only write keys version 4
# Write key version
self._writeBin(stream, [ 4 ])
self._writeKeyV4(stream)
def _writeKeyV4(self, stream):
self._writeBin(stream, int4ToBytes(self.createdTimestamp))
self._writeBin(stream, [ self.pubKeyAlg ])
self.mpiFile.seek(0)
self._copyStream(self.mpiFile, stream)
def getKeyFingerprint(self):
if self._keyId is not None:
if self.version == 3:
return self._keyId[0]
return self._keyId
self.parse()
if self.version == 3:
# See section "Key IDs and Fingerprints" for a description of how
# v3 fingerprints and key IDs are different
# Key ID is low 64 bits of the modulus
self.mpiFile.seek(0)
self._readCountMPIs(self.mpiFile, 1, discard = True)
end1 = self.mpiFile.tell()
octets = self.mpiFile.pread(8, end1 - 8)
# The fingerprint of a V3 key is formed by hashing the body (but
# not the two-octet length) of the MPIs that form the key material
# (public modulus n, followed by exponent e) with MD5.
self._readCountMPIs(self.mpiFile, 1, discard = True)
end2 = self.mpiFile.tell()
fpr = digestlib.md5()
# Skip the 2-octet length
fpr.update(self.mpiFile.pread(end1 - 2, 2))
fpr.update(self.mpiFile.pread((end2 - end1) - 2, end1 + 2))
fpr = fpr.hexdigest().upper()
self._keyId = fpr, stringToAscii(octets)
return fpr
# Convert to public key
pkt = self.toPublicKey(minHeaderLen = 3)
# Why minHeaderLen = 3?
# This is a holdover from the days of PGP 2.6.2
# RFC 2440 section 11.2 does a really bad job of explaining this.
# RFC 2440 section 5.2.4 refers to this for self signature computation.
# One of the least documented gotchas of Key fingerprints:
# they're ALWAYS calculated as if they were a public key main key block.
# this means private keys will be treated as public keys, and subkeys
# will be treated as main keys for the purposes of this test.
# Furthermore if the length was one byte long it must be translated
# into a 2 byte long length (upper octet is 0)
# not doing this will result in key fingerprints which do not match the
# output produced by OpenPGP compliant programs.
# this will result in the first octet ALWYAS being 0x99
# in binary 10 0110 01
# 10 indicates old style PGP packet
# 0110 indicates public key
# 01 indicates 2 bytes length
m = digestlib.sha1()
sio = util.ExtendedStringIO()
# Write only the header, we can copy the body directly from the
# body stream
pkt.writeHeader(sio)
m.update(sio.getvalue())
pkt.resetBody()
self._updateHash(m, pkt.getBodyStream())
self._keyId = m.hexdigest().upper()
return self._keyId
def getKeyId(self):
if self.version == 3:
self.getKeyFingerprint()
return self._keyId[1]
return self.getKeyFingerprint()[-16:]
def hasKeyId(self, keyId):
keyId = keyId.upper()
if self.version == 3:
if self.getKeyId().endswith(keyId):
return True
return self.getKeyFingerprint().endswith(keyId)
def hasKeyId(self, keyId):
keyId = keyId.upper()
if self.version == 3:
if self.getKeyId().endswith(keyId):
return True
return self.getKeyFingerprint().endswith(keyId)
def getCreatedTimestamp(self):
self.parse()
return self.createdTimestamp
def iterSelfSignatures(self):
return self._iterSelfSignatures(self.getKeyId())
def _iterSelfSignatures(self, keyId):
"""Iterate over all the self-signatures"""
self.parse()
intKeyId = fingerprintToInternalKeyId(keyId)
# Look for a self signature
for pkt in self.iterSignatures():
if intKeyId != pkt.getSigId():
continue
yield pkt
def iterAllSelfSignatures(self):
"""Iterate over direct signatures and UserId signatures"""
return self._iterAllSelfSignatures(self.getKeyFingerprint())
def _iterAllSelfSignatures(self, keyId):
for pkt in self.iterSelfSignatures():
yield pkt
intKeyId = fingerprintToInternalKeyId(keyId)
for uid in self.iterUserIds():
for pkt in uid.iterSignatures():
if intKeyId != pkt.getSigId():
continue
yield pkt
def assertSigningKey(self):
# Find self signature of this key
# first search for the public key algortihm octet. if the key is really
# old, this might be the only hint that it's legal to use this key to
# make digital signatures.
self.parse()
if self.pubKeyAlg in (PK_ALGO_RSA_SIGN_ONLY, PK_ALGO_DSA):
# the public key algorithm octet satisfies this test. no more
# checks required.
return True
keyId = self.getKeyFingerprint()
# If it's a subkey, look for the master key
if self.tag in PKT_SUB_KEYS:
pkt = self.getMainKey()
return pkt.assertSigningKey()
# Look for a self signature
for pkt in self.iterAllSelfSignatures():
# We know it's a ver4 packet, otherwise getSigId would have failed
for spktType, dataf in pkt.decodeHashedSubpackets():
if spktType == SIG_SUBPKT_KEY_FLAGS:
# RFC 2440, sect. 5.2.3.20
dataf.seek(0, SEEK_SET)
foct, = self._readBin(dataf, 1)
if foct & 0x02:
return True
# No subpacket or no key flags
raise IncompatibleKey('Key %s is not a signing key.'% keyId)
def getPublicKeyTuple(self):
"""Return the key material"""
self.parse()
self.mpiFile.seek(0, SEEK_SET)
return self.readMPIs(self.mpiFile, self.pubKeyAlg)
def makePgpKey(self, passPhrase = None):
assert passPhrase is None
pkTuple = self.getPublicKeyTuple()
if self.pubKeyAlg in PK_ALGO_ALL_RSA:
n, e = pkTuple
return RSA.construct((n, e))
if self.pubKeyAlg == PK_ALGO_DSA:
p, q, g, y = pkTuple
return DSA.construct((y, g, p, q))
raise MalformedKeyRing("Can't use El-Gamal keys in current version")
def getCryptoKey(self, passPhrase = None):
assert passPhrase is None
self.verifySelfSignatures()
return self.makePgpKey()
def adoptSignature(self, sig):
"""Adopt the signature, if it's not ours already"""
pp = sig.getParentPacket()
if isinstance(pp, self.__class__) and \
pp.getKeyFingerprint() == self.getKeyFingerprint():
return
sig.resetSignatureHash()
sig.setParentPacket(self)
def getEndOfLife(self):
"""Parse self signatures to find timestamp(s) of key expiration.
Also seek out any revocation timestamps.
We don't need to actually verify these signatures, but we do require
that they were verified previously using verifySelfSignatures().
Signatures that do not validate are ignored for the purposes of
calculating the revocation and expiration
Returns bool, timestamp (is revoked, expiration)
"""
expireTimestamp = revocTimestamp = 0
# Key creation time
cTimestamp = self.createdTimestamp
# Creation time for the signature that exposed most recently a key
# expiration subpacket
sigExpCreationTimestamp = 0
# Note that in this respect we are bug-compatible with gpg. A
# key that does not expire has signatures with no key expiration
# subpackets. This means once you've set an expiration on a key and
# published it, you can not make it never expire.
for sig in self.iterSelfSigCertifications():
verifies = sig.getVerifies()
assert verifies is not None, "Please verify signatures first"
# If the sig doesn't verify, skip it
if not verifies:
continue
eTimestamp = sig.getKeyExpiration()
if eTimestamp is None:
continue
sigCreation = sig.getCreation()
if sigCreation <= sigExpCreationTimestamp:
# This signature has the same or an earlier creation than
# the one that supplied an expiration. Skip it. This works
# across different uids too
continue
if eTimestamp > 0:
eTimestamp += cTimestamp
expireTimestamp = eTimestamp
sigExpCreationTimestamp = sigCreation
# Now iterate over direct signatures, looking for a key revocation
for sig in self.iterSelfSigRevocations():
verifies = sig.getVerifies()
assert verifies is not None, "Please verify signatures first"
# If the sig doesn't verify, skip it
if not verifies:
continue
sigCreation = sig.getCreation()
if revocTimestamp == 0 or sigCreation < revocTimestamp:
revocTimestamp = sigCreation
return (revocTimestamp, expireTimestamp)
class PGP_MainKey(PGP_Key):
def initSubPackets(self):
if hasattr(self, "subkeys"):
# Already processed
return
self.parse()
self.revsigs = []
self.uids = []
self.subkeys = []
subpkts = [ x for x in self._iterSubPackets(PKT_MAIN_KEYS) ]
# Start reading signatures until we hit a UserID or another key
limit = set(PKT_SUB_KEYS)
limit.add(PKT_USERID)
limit.add(PKT_USER_ATTRIBUTE)
i = 0
for pkt in subpkts:
if pkt.tag in limit:
# UserID or subkey
break
i += 1
if not isinstance(pkt, PGP_Signature):
continue
pkt.parse()
if pkt.sigType in (SIG_TYPE_KEY_REVOC, SIG_TYPE_DIRECT_KEY):
# Key revocation
# No circular reference here, setParentPacket does a clone
pkt.setParentPacket(self)
self.revsigs.append(pkt)
continue
# According to sect. 10.1, there should not be other signatures
# here.
assert False, "Unexpected signature type %s" % pkt.sigType
sigLimit = i
# Read until we hit a subkey
limit = set(PKT_SUB_KEYS)
i = 0
for pkt in subpkts[sigLimit:]:
if pkt.tag in limit:
break
i += 1
# Certification revocations live together with regular signatures
# or so is the RFC saying
if isinstance(pkt, PGP_UserID):
# No circular reference here, setParentPacket does a clone
pkt.setParentPacket(self)
self.uids.append(pkt)
continue
if isinstance(pkt, PGP_Signature):
# This can't be the first packet, or we wouldn't have stopped
# in the previous loop
# Add this signature to the last user id we found
self.uids[-1].addSignatures([pkt])
continue
# We ignore other packets (like trust)
uidLimit = sigLimit + i
# Read until the end
# We don't want to point back to ourselves, or we'll create a
# circular loop.
for pkt in subpkts[uidLimit:]:
if isinstance(pkt, PGP_SubKey):
# No circular reference here, setParentPacket does a clone
pkt.setParentPacket(self)
self.subkeys.append(pkt)
continue
if isinstance(pkt, PGP_Signature):
# This can't be the first packet, or we wouldn't have stopped
# in the previous loop
subkey = self.subkeys[-1]
pkt.parse()
if pkt.sigType == SIG_TYPE_SUBKEY_REVOC:
subkey.setRevocationSig(pkt)
continue
if pkt.sigType == SIG_TYPE_SUBKEY_BIND:
subkey.setBindingSig(pkt)
continue
# There should not be any other type of signature here
continue
# Ignore other packets
def iterUserIds(self):
self.initSubPackets()
return iter(self.uids)
def iterSubPackets(self):
for sig in self.iterSignatures():
yield sig
for uid in self.iterUserIds():
yield uid
for sig in uid.iterSignatures():
yield sig
for subkey in self.iterSubKeys():
yield subkey
for pkt in subkey.iterSubPackets():
yield pkt
def iterSignatures(self):
"""Iterate over all signature packets"""
self.initSubPackets()
return iter(self.revsigs)
def iterCertifications(self):
"""Iterate over all certification signatures (on user IDs)"""
for uid in self.iterUserIds():
for sig in uid.iterCertifications():
yield sig
def iterSubKeys(self):
self.initSubPackets()
return iter(self.subkeys)
def verifySelfSignatures(self):
"""
Verify the self signatures on this key.
If successful, returns the public key packet associated with this key,
and crypto key.
@return: (pubKeyPacket, cryptoKey)
@raises BadSelfSignature:
"""
self.parse()
if self.version not in [3, 4]:
raise InvalidKey("Version %s keys not supported" % self.version)
# Convert to a public key (even if it's already a public key)
pkpkt = self.toPublicKey(minHeaderLen = 3)
keyFpr = pkpkt.getKeyFingerprint()
keyId = pkpkt.getKeyId()
pgpKey = pkpkt.makePgpKey()
for sig in self.iterSelfSignatures():
self.adoptSignature(sig)
sig.verify(pgpKey, keyFpr)
for uid in self.iterUserIds():
verified = False
for sig in uid.iterKeySignatures(keyId):
uid.adoptSignature(sig)
try:
sig.verify(pgpKey, keyFpr)
except BadSelfSignature:
continue
verified = True
if not verified:
# No signature. Not good, according to our standards
raise BadSelfSignature(keyFpr)
return pkpkt, pgpKey
def isSupersetOf(self, key):
"""Check if this key is a superset of key
We try to make sure that:
- the keys have the same ID
- this key's set of revocation signatures is a superset of the other
key's revocations
- this key's set of subkeys is a superset of the other key's subkeys
- this key's set of userids is a superset of the other key's userids
"""
if self.tag != key.tag:
raise IncompatibleKey("Attempting to compare different key types")
if self.getKeyFingerprint() != key.getKeyFingerprint():
raise IncompatibleKey("Attempting to compare different keys")
thisSubkeyIds = dict((x.getKeyFingerprint(), x) for x in self.iterSubKeys())
otherSubkeyIds = dict((x.getKeyFingerprint(), x) for x in key.iterSubKeys())
if not set(thisSubkeyIds).issuperset(otherSubkeyIds):
# Missing subkey
return False
thisUids = dict((x.id, x) for x in self.iterUserIds())
otherUids = dict((x.id, x) for x in key.iterUserIds())
if not set(thisUids).issuperset(otherUids):
# Missing uid
return False
thisRevSigs = set(x.getSignatureHash() for x in self.revsigs)
otherRevSigs = set(x.getSignatureHash() for x in key.revsigs)
if not thisRevSigs.issuperset(otherRevSigs):
# Missing revocation signature
return False
# XXX More work to be done here, we would have to verify that
# signatures don't change. This is what the old code was doing (and it
# wasn't actually verifying user ids either ) -- misa
return True
def getUserIds(self):
return [ pkt.id for pkt in self.iterUserIds() ]
def merge(self, other):
"""Merge this key with the other key
Return True if the key was modified"""
assert self.tag == other.tag
if self.getKeyFingerprint() != other.getKeyFingerprint():
raise MergeError("Merging keys with a different ID")
# Both keys must verify their self-signing signatures
self.verifySelfSignatures()
other.verifySelfSignatures()
# Merge revocations / direct keys
finalsigs = _mergeSignatures(self.iterSignatures(),
other.iterSignatures())
changed = False
if self.revsigs != finalsigs:
changed = True
self.revsigs = finalsigs
# Now merge user ids
changed = self._mergeUserIds(other) or changed
# And merge subkeys
changed = self._mergeSubkeys(other) or changed
return changed
def _mergeUserIds(self, other):
luids = {}
# Preserve order
finaluids = []
changed = False
for uid in itertools.chain(self.iterUserIds(), other.iterUserIds()):
luidlist = luids.setdefault(uid.id, [])
# We may have UserID and UserAttribute packets that can collide
# (though it's very unlikely)
for luid in luidlist:
if uid.tag == luid.tag:
changed = luid.merge(uid) or changed
break
else: # for
luidlist.append(uid)
finaluids.append(uid)
if self.uids == finaluids and not changed:
return False
self.uids = finaluids
return True
def _mergeSubkeys(self, other):
# Subkeys can only have one revocation (revoking a subkey effectively
# invalidates the key)
lkids = {}
# Preserve order
finalkeys = []
changed = False
for skey in itertools.chain(self.iterSubKeys(), other.iterSubKeys()):
# Verify self signatures
skey.verifySelfSignatures()
keyId = skey.getKeyFingerprint()
if keyId not in lkids:
lkids[keyId] = skey
finalkeys.append(skey)
continue
changed = lkids[keyId].merge(skey) or changed
if self.subkeys == finalkeys and not changed:
return False
self.subkeys = finalkeys
return True
def iterSelfSigCertifications(self):
"""Iterate over all self signature certifications"""
keyId = self.getKeyId()
for uid in self.uids:
# Uhm. We may have to ignore expirations that exist on revoked
# users (users with a revoked self signature - 5.2.3.3)
for pkt in uid.iterKeySignatures(keyId):
if pkt.sigType not in SIG_CERTS:
continue
yield pkt
def iterSelfSigRevocations(self):
"""Iterate over all self signature revocations"""
for sig in self.iterSelfSignatures():
if sig.sigType != SIG_TYPE_KEY_REVOC:
continue
yield sig
class PGP_PublicAnyKey(PGP_Key):
__slots__ = []
pubTag = None
def toPublicKey(self, minHeaderLen = 2):
return self._msgClass.newPacket(self.pubTag, self._bodyStream,
minHeaderLen = minHeaderLen)
class PGP_PublicKey(PGP_PublicAnyKey, PGP_MainKey):
__slots__ = []
tag = PKT_PUBLIC_KEY
pubTag = PKT_PUBLIC_KEY
class PGP_SecretAnyKey(PGP_Key):
__slots__ = ['s2k', 'symmEncAlg', 's2kType', 'hashAlg', 'salt',
'count', 'encMpiFile']
pubTag = None
_hashes = [ 'Unknown', digestlib.md5, digestlib.sha1, RIPEMD, 'Double Width SHA',
'MD2', 'Tiger/192', 'HAVAL-5-160' ]
# Ciphers and their associated key sizes
_ciphers = [ ('Unknown', 0), ('IDEA', 0), (DES3, 192), (CAST, 128),
(Blowfish, 128), ('SAFER-SK128', 0), ('DES/SK', 0),
(AES, 128), (AES, 192), (AES, 256), ]
_legalCiphers = set([ 2, 3, 4, 7, 8, 9 ])
def initialize(self):
PGP_Key.initialize(self)
self.s2k = self.symmEncAlg = self.s2kType = None
self.hashAlg = self.salt = self.count = None
# We do not store the initial vector, it is part of the encoded file
self.encMpiFile = None
def parse(self, force = False):
PGP_Key.parse(self, force = force)
# Seek to the end of the MPI file, just to be safe (we should be there
# already)
self._bodyStream.seek(self.mpiFile.start + self.mpiLen, SEEK_SET)
self.s2k, = self.readBin(1)
if self.s2k in [ENCRYPTION_TYPE_SHA1_CHECK,
ENCRYPTION_TYPE_S2K_SPECIFIED]:
self.symmEncAlg, self.s2kType, self.hashAlg = self.readBin(3)
if self.s2kType:
if 100 <= self.s2kType <= 110:
# Private/Experimental s2k
pass
else:
if self.s2kType not in (S2K_TYPE_SALTED, S2K_TYPE_ITER_SALTED):
raise IncompatibleKey('Unknown string-to-key type %s' %
self.s2kType)
self.salt = self.readExact(8)
if self.s2kType == S2K_TYPE_ITER_SALTED:
self.count, = self.readBin(1)
# The Initial Vector is part of the encrypted MPI file
# The MPIs are most likely encrypted, we'll just have to trust that
# there are enough of them for now.
dataLen = self._bodyStream.size - self._bodyStream.tell()
self.encMpiFile = util.SeekableNestedFile(self._bodyStream, dataLen)
def rewriteBody(self, stream):
PGP_Key.rewriteBody(self, stream)
self._writeBin(stream, [ self.s2k ])
if self.s2k in [ENCRYPTION_TYPE_SHA1_CHECK,
ENCRYPTION_TYPE_S2K_SPECIFIED]:
self._writeBin(stream, [ self.symmEncAlg, self.s2kType,
self.hashAlg ])
if self.s2kType in (0x01, 0x03):
assert len(self.salt) == 8
stream.write(self.salt)
if self.s2kType == 0x03:
self._writeBin(stream, [ self.count ])
self.encMpiFile.seek(0)
self._copyStream(self.encMpiFile, stream)
def _getSecretMPICount(self):
if self.pubKeyAlg in PK_ALGO_ALL_RSA:
return 4
if self.pubKeyAlg == PK_ALGO_DSA:
return 1
if self.pubKeyAlg in PK_ALGO_ALL_ELGAMAL:
return 1
raise PGPError("Unsupported public key algorithm %s" % self.pubKeyAlg)
def toPublicKey(self, minHeaderLen = 2):
self.parse()
# Create a nested file starting at the beginning of the body's and
# with the length equal to the position in the body up to the MPIs
io = util.SeekableNestedFile(self._bodyStream,
self.mpiFile.start + self.mpiLen, start = 0)
pkt = self._msgClass.newPacket(self.pubTag, io,
minHeaderLen = minHeaderLen)
return pkt
def recrypt(self, oldPassPhrase, newPassPhrase = None, _salt = None):
if newPassPhrase is None and self.s2k == ENCRYPTION_TYPE_UNENCRYPTED:
# Nothing to do here
return False
if oldPassPhrase == newPassPhrase:
# Nothing to do either
return False
# decrypt the MPIs
mpis = self.decrypt(oldPassPhrase)
stream = util.ExtendedStringIO()
for mpi in mpis:
self._writeMPI(stream, mpi)
if newPassPhrase is None:
self.s2k = ENCRYPTION_TYPE_UNENCRYPTED
self.salt = None
self._writeBin(stream,
int2ToBytes(computeRFC2440Checksum(stream.getvalue())))
else:
self.s2k = ENCRYPTION_TYPE_SHA1_CHECK
stream.write(computeSHAChecksum(stream.getvalue()))
# DES3
self.symmEncAlg = 2
# We force iterated + salted
self.s2kType = 0x03
# SHA1 protection
self.hashAlg = 2
# Iterate 96 times (seems pretty standard)
self.count = 96
if isinstance(self, PGP_SecretSubKey):
# Use the same salt as the main key
self.salt = _salt
else:
self.salt = file("/dev/urandom").read(8)
stream.seek(0)
io = self._encryptStream(stream, newPassPhrase)
stream = io
self.encMpiFile = stream
stream.seek(0)
# Re-crypt subkeys too
for sk in self.iterSubKeys():
if isinstance(sk, PGP_SecretSubKey):
sk.recrypt(oldPassPhrase, newPassPhrase, _salt = self.salt)
sk._rewritePacket()
return True
def decrypt(self, passPhrase):
self.parse()
self.encMpiFile.seek(0, SEEK_SET)
if passPhrase is None:
passPhrase = ''
if self.s2k == ENCRYPTION_TYPE_UNENCRYPTED:
return self._readCountMPIs(self.encMpiFile,
self._getSecretMPICount(), discard = False)
io = self._decryptStream(self.encMpiFile, passPhrase)
unenc = io.read()
if self.s2k == ENCRYPTION_TYPE_S2K_SPECIFIED:
check = verifyRFC2440Checksum(unenc)
else:
check = verifySHAChecksum(unenc)
if not check:
raise BadPassPhrase('Pass phrase incorrect')
io.seek(0)
return self._readCountMPIs(io, self._getSecretMPICount(),
discard = False)
def _getCipher(self, passPhrase):
if self.symmEncAlg not in self._legalCiphers:
# We only support the algorithms in self._legalCiphers
if self.symmEngAlg >= len(self._ciphers):
raise IncompatibleKey("Unknown cipher %s" %
self.symmEngAlg)
raise IncompatibleKey("Cipher %s is unusable" % self.symmEncAlg)
if self.hashAlg >= len(self._hashes):
raise IncompatibleKey("Unknown hash algorithm %s" % self.hashAlg)
hashAlg = self._hashes[self.hashAlg]
if isinstance(hashAlg, str):
raise IncompatibleKey('Hash algorithm %s is not implemented. '
'Key not readable' % hashAlg)
cipherAlg, cipherKeySize = self._ciphers[self.symmEncAlg]
if self.s2kType == 0x00:
key = simpleS2K(passPhrase, hashAlg, cipherKeySize)
elif self.s2kType == 0x01:
key = saltedS2K(passPhrase, hashAlg, cipherKeySize, self.salt)
elif self.s2kType == 0x03:
key = iteratedS2K(passPhrase, hashAlg, cipherKeySize, self.salt,
self.count)
if self.symmEncAlg > 6:
cipherBlockSize = 16
else:
cipherBlockSize = 8
cipher = cipherAlg.new(key,1)
return cipher, cipherBlockSize
def _decryptStream(self, stream, passPhrase):
cipher, cipherBlockSize = self._getCipher(passPhrase)
cryptFunc = cipher.encrypt
io = util.ExtendedStringIO()
block = self._readExact(self.encMpiFile, cipherBlockSize)
FRE = cryptFunc(block)
while 1:
block = stream.read(cipherBlockSize)
io.write(xorStr(FRE, block))
if len(block) != cipherBlockSize:
break
FRE = cryptFunc(block)
io.seek(0)
return io
def _encryptStream(self, stream, passPhrase):
cipher, cipherBlockSize = self._getCipher(passPhrase)
io = util.ExtendedStringIO()
cryptFunc = cipher.encrypt
# Initial vector is random data
block = file("/dev/urandom").read(cipherBlockSize)
FRE = cryptFunc(block)
io.write(FRE)
FRE = cryptFunc(FRE)
while 1:
block = stream.read(cipherBlockSize)
block = xorStr(FRE, block)
io.write(block)
if len(block) != cipherBlockSize:
break
FRE = cryptFunc(block)
io.seek(0)
return io
def makePgpKey(self, passPhrase = None):
assert passPhrase is not None
# Secret keys have to be signing keys
self.assertSigningKey()
pkTuple = self.getPublicKeyTuple()
secMPIs = self.decrypt(passPhrase)
if self.pubKeyAlg in PK_ALGO_ALL_RSA:
n, e = pkTuple
d, p, q, u = secMPIs
return RSA.construct((n, e, d, p, q, u))
if self.pubKeyAlg == PK_ALGO_DSA:
p, q, g, y = pkTuple
x, = secMPIs
return DSA.construct((y, g, p, q, x))
raise MalformedKeyRing("Can't use El-Gamal keys in current version")
def getCryptoKey(self, passPhrase):
try:
self.verifySelfSignatures()
except BadSelfSignature:
# XXX Make this a callback
sys.stderr.write("Warning: self-signature on private key does not verify\n")
return self.makePgpKey(passPhrase)
def sign(self, packet, passwordCallback, sigType = None, creation = None,
expiration = None, trustLevel = None, trustAmount = None,
trustRegex = None, **kwargs):
"""Sign packet (user packet only).
If expiration is None, the signature will expire when the key expire,
if the key expires, otherwise it does not expire either.
To produce a signature that does not expire, regardless of the key's
expiration, use -1 for the expiration"""
# We can only sign user IDs for now
assert(isinstance(packet, PGP_UserID))
# We need a key linked to this user
parentPacket = packet.getParentPacket()
assert(isinstance(parentPacket, PGP_MainKey))
if creation is None:
creation = time.time()
if (trustLevel is None) ^ (trustAmount is None):
raise Exception("both trustLevel and trustAmount should be "
"specified")
if expiration is None:
keyExpiration = packet.getExpiration()
if keyExpiration is None:
# Key does not expire
expiration = -1
elif keyExpiration < 0:
# Key is revoked
raise SignatureError("Signing a revoked key")
else:
expiration = (parentPacket.getCreatedTimestamp() +
keyExpiration - creation)
# We may have to change this default
if sigType is None:
sigType = SIG_TYPE_CERT_0
# Fetch the crypto key
cryptoKey = self.makePgpKey(passPhrase = passwordCallback())
# See comment in OpenPGPKey.getType
keyType = key_type(cryptoKey)
if keyType == 'DSA':
pkAlg = PK_ALGO_DSA
# Pick a random number that is relatively prime with the crypto
# key's q
relprime = cryptoKey.q + 1
while relprime > cryptoKey.q:
relprime = num_getRelPrime(cryptoKey.q)
elif keyType == 'RSA':
pkAlg = PK_ALGO_RSA
# RSA doesn't need a prime for signing
relprime = 0
else:
# Maybe we need a different exception?
raise UnsupportedEncryptionAlgorithm(keyType)
hashAlg = 2 # sha
# Create signature packet
sigp = PGP_Signature(util.ExtendedStringIO())
# Link it to this user packet (which should be linked to a key)
sigp.setParentPacket(packet)
sigp.version = 4
sigp.sigType = sigType
sigp.pubKeyAlg = pkAlg
sigp.hashAlg = hashAlg
sigp.initSubPackets()
sigp.addCreation(creation)
if expiration >= 0:
sigp.addExpiration(expiration)
if trustLevel:
sigp.addTrust(trustLevel, trustAmount, trustRegex)
sigp.addIssuerKeyId(self.getKeyFingerprint())
# Prepare the subpacket streams
sigp._prepareSubpackets()
# Add the short sig hash (we can compute the real sig hash now)
sighash = sigp.getSignatureHash()
sigp.setShortSigHash(sighash[:2])
sigString = sigp.finalizeSignature(sighash, cryptoKey, sigp.pubKeyAlg,
sigp.hashAlg)
mpis = cryptoKey.sign(sigString, relprime)
# Write MPIs
stream = util.ExtendedStringIO()
sigp.mpiFile = stream
for mpi in mpis:
PGP_Signature._writeMPI(stream, mpi)
sigp.rewriteBody()
packet.signatures.append(sigp)
return sigp
class PGP_SecretKey(PGP_SecretAnyKey, PGP_MainKey):
__slots__ = []
tag = PKT_SECRET_KEY
pubTag = PKT_PUBLIC_KEY
class PGP_SubKey(PGP_Key):
# Subkeys are promoted to main keys when converted to public keys
pubTag = PKT_PUBLIC_KEY
_parentPacketTypes = set(PKT_MAIN_KEYS)
def setUp(self):
self.bindingSig = None
self.revocationSig = None
def setBindingSig(self, sig):
self.bindingSig = sig
# No circular reference here
self.bindingSig.setParentPacket(self)
sig.resetSignatureHash()
def setRevocationSig(self, sig):
self.revocationSig = sig
# No circular reference here
self.revocationSig.setParentPacket(self)
sig.resetSignatureHash()
def iterSubPackets(self):
# Stop at another key
if self.bindingSig:
yield self.bindingSig
if self.revocationSig:
yield self.revocationSig
def iterCertifications(self):
return []
def iterUserIds(self):
# Subkeys don't have user ids
return []
def iterSelfSignatures(self):
return self._iterSelfSignatures(self.getMainKey().getKeyFingerprint())
def iterAllSelfSignatures(self):
"""Iterate over direct signatures and UserId signatures"""
return self._iterAllSelfSignatures(self.getMainKey().getKeyFingerprint())
def getMainKey(self):
"""Return the main key for this subkey"""
return self.getParentPacket()
def verifySelfSignatures(self):
# Get the main key associated with this subkey
mainKey = self.getParentPacket()
# since this is a subkey, let's go ahead and make sure the
# main key is valid before we continue
mainpkpkt, mainPgpKey = mainKey.verifySelfSignatures()
# Convert this subkey to a public key
pkpkt = self.toPublicKey(minHeaderLen = 3)
keyId = pkpkt.getKeyFingerprint()
# We should have a binding signature or a revocation
if self.bindingSig is None and self.revocationSig is None:
raise BadSelfSignature(keyId)
# Only verify direct signatures
verified = False
for sig in self.iterSelfSignatures():
# We verify both the key binding and the revocation, if available
# Also make sure we're verifying the right key
self.adoptSignature(sig)
sig.verify(mainPgpKey, keyId)
verified = True
if not verified:
# No signatures on the subkey
raise BadSelfSignature(keyId)
if self.bindingSig is None:
# No binding sig to further check (must have been revoked)
return
# Iterate over the unhashed packets of the binding signature, there
# may be a SIG_TYPE_PRKEY_BIND (0x19) embedded signature. See #12.1
# (Enhanced Key Formats) from the draft spec for details
embeddedSigs = [ x[1]
for x in self.bindingSig.decodeUnhashedSubpackets()
if x[0] == SIG_SUBPKT_EMBEDDED_SIG ]
if not embeddedSigs:
return
for sigStream in embeddedSigs:
sig = PGP_Signature(bodyStream = sigStream)
sig.parse()
if sig.sigType != SIG_TYPE_PRKEY_BIND:
# Non-signing keys can have this packet missing
continue
intKeyId = fingerprintToInternalKeyId(keyId)
if sig.getSigId() != intKeyId:
continue
self.adoptSignature(sig)
# Verify the signature with the subkey's public key
sig.verify(self.toPublicKey().makePgpKey(), keyId)
def iterSubKeys(self):
# Nothing to iterate over, subkeys don't have subkeys
return []
def iterSignatures(self):
for pkt in self.iterSubPackets():
yield pkt
def merge(self, other):
"""Merge this subkey with the other key"""
assert self.tag == other.tag
# Subkeys MUST have a key binding signature (unless it's been revoked,
# in which case only the revocation
# They MAY also have an optional revocation.
# Revoking a subkey effectively terminates that key. Reconciling
# revocation signatures is therefore not a big issue - probably
# keeping one of the revocations would be enough -- misa
if other.revocationSig is not None:
# The other key is revoked.
if self.bindingSig is None:
if self.revocationSig.getShortSigHash() == \
other.revocationSig.getShortSigHash():
# Same key
return False
# Our key verifies, so it must have a revocation (since it
# doesn't have a key binding sig)
assert(self.revocationSig is not None)
# we already have a revocation, keep ours
return False
# Prefer our own revocation
changed = False
if self.revocationSig is None:
self.revocationSig = other.revocationSig
changed = True
if changed:
# While we are at it, drop the binding key too, it's not
# needed
self.bindingSig = None
# We modified the key
return True
return False
# We verified the other key before we tried to merge, so this should
# not be possible
assert(other.bindingSig is not None)
if self.revocationSig is not None:
if self.bindingSig is not None:
# Drop the binding signature
self.bindingSig = None
return True
# This key is revoked, nothing else to do
return False
# self.revocationSig is None, we verified the key, so we must have a
# binding sig.
assert(self.bindingSig is not None)
if self.bindingSig.getSignatureHash() != other.bindingSig.getSignatureHash():
# This is very unlikely, since the binding signature is produced
# at the time the subkey is created, there should be only one
raise MergeError("Different binding signatures")
# Same binding sig, and no revocation
return False
def iterSelfSigCertifications(self):
return [self.bindingSig]
def iterSelfSigRevocations(self):
if self.revocationSig:
return [self.revocationSig]
return []
def getEndOfLife(self):
"""Parse self signatures to find timestamp(s) of key expiration.
Also seek out any revocation timestamps.
We don't need to actually verify these signatures.
See verifySelfSignatures()
Returns bool, timestamp (is revoked, expiration)
"""
parentRevoked, parentExpire = self.getMainKey().getEndOfLife()
revoked, expire = PGP_Key.getEndOfLife(self)
if revoked and parentRevoked:
revoked = min(revoked, parentRevoked)
else:
revoked = max(revoked, parentRevoked)
# Subkeys don't depend on the main key's expiration date
return revoked, expire
class PGP_PublicSubKey(PGP_SubKey, PGP_PublicAnyKey):
__slots__ = []
tag = PKT_PUBLIC_SUBKEY
class PGP_SecretSubKey(PGP_SubKey, PGP_SecretAnyKey):
__slots__ = []
tag = PKT_SECRET_SUBKEY
# Register class processors
for klass in [PGP_PublicKey, PGP_SecretKey, PGP_PublicSubKey, PGP_SecretSubKey]:
PacketTypeDispatcher.addPacketType(klass)
class PGP_Trust(PGP_BasePacket):
__slots__ = []
tag = PKT_TRUST
PacketTypeDispatcher.addPacketType(PGP_Trust)
class PGP_CompressedData(PGP_BasePacket):
__slots__ = []
tag = PKT_COMPRESSED_DATA
PacketTypeDispatcher.addPacketType(PGP_CompressedData)
def newKeyFromString(data):
"""Create a new (main) key from the data
Returns None if a key was not found"""
return newKeyFromStream(util.ExtendedStringIO(data))
def newKeyFromStream(stream):
"""Create a new (main) key from the stream
Returns None if a key was not found"""
pkt = PGP_Message.newPacketFromStream(stream)
if pkt is None:
return None
if not isinstance(pkt, PGP_MainKey):
return None
try:
pkt.initSubPackets()
except InvalidBodyError:
return None
return pkt
def _mergeSignatures(*sources):
# Merge all signatures from the specified sources
lsigs = {}
# Preserve order
finalsigs = []
for sig in itertools.chain(*sources):
lsiglist = lsigs.setdefault(sig.getShortSigHash(), [])
# Do we already have this sig?
for lsig in lsiglist:
if sig.getSignatureHash() == lsig.getSignatureHash():
lsig.merge(sig)
break
else: # for
# This signature was not found; add it
lsiglist.append(sig)
finalsigs.append(sig)
return finalsigs
def len2bytes(v1, v2):
"""Return the packet body length when represented on 2 bytes"""
return ((v1 - 192) << 8) + v2 + 192
def len4bytes(v1, v2, v3, v4):
"""Return the packet body length when represented on 4 bytes"""
return (v1 << 24) | (v2 << 16) | (v3 << 8) | v4
def len2ToBytes(v):
return (((v - 192) >> 8) & 0xFF) + 192, (v - 192) & 0xFF
def len4ToBytes(v):
return int4ToBytes(v)
def int2FromBytes(v1, v2):
return (v1 << 8) + v2
def int4FromBytes(v1, v2, v3, v4):
return len4bytes(v1, v2, v3, v4)
def int2ToBytes(v):
return (v >> 8) & 0xFF, v & 0xFF
def int4ToBytes(v):
b0, b1 = (v >> 24) & 0xFF, (v >> 16) & 0xFF
b2, b3 = (v >> 8) & 0xFF, v & 0xFF
return b0, b1, b2, b3
def num_gcd(a, b):
while b:
a, b = b, a % b
return a
def num_bitLen(a):
r=0
while a:
a, r = a/2, r+1
return r
def num_getRelPrime(q):
# Use os module to ensure reads are unbuffered so as not to
# artifically deflate entropy
randFD = os.open('/dev/urandom', os.O_RDONLY)
b = num_bitLen(q)/8 + 1
r = 0L
while r < 2:
for i in range(b):
r = r*256 + ord(os.read(randFD, 1))
r %= q
while num_gcd(r, q-1) != 1:
r = (r+1) % q
os.close(randFD)
return r
class TimestampPacketDispatcher(PacketTypeDispatcher):
_registry = {}
class TimestampPacketDatabase(PGP_Message):
PacketDispatcherClass = TimestampPacketDispatcher
class KeyTimestampPacket(PGP_Trust):
"""This packet is associated with a particular (main) key in
order to track its "freshness".
"""
__slots__ = ['_trustPacketVersion', '_keyId', '_refreshTimestamp',
'_parsed']
def setUp(self):
self._trustPacketVersion = 1
self._keyId = None
self._refreshTimestamp = None
self._parsed = False
def initialize(self):
self.setUp()
def iterSubPackets(self):
return []
def parse(self, force = False):
"""Parse the body and initializes the internal data
structures for other operations"""
if self._parsed and not force:
return
self.resetBody()
# Reset all internal state
self.initialize()
# Key ID
self._trustPacketVersion = self.readBin(1)[0]
if self._trustPacketVersion != 1:
raise PGPError("Unknown trust packet version %s" % self._trustPacketVersion)
self._keyId = self.readExact(8)
self._refreshTimestamp = int4FromBytes(*self.readBin(4))
self._parsed = True
def getKeyId(self):
self.parse()
return stringToAscii(self._keyId)
def setKeyId(self, keyId):
assert(len(keyId) >= 16)
self._keyId = fingerprintToInternalKeyId(keyId)
def getRefreshTimestamp(self):
return self._refreshTimestamp
def setRefreshTimestamp(self, ts):
self._refreshTimestamp = ts
def rewriteBody(self):
"""Re-writes the body"""
# Re-write ourselves
bodyStream = self._writeBodyV1()
ns, nsp = self._nextStream, self._nextStreamPos
parentPkt = self._parentPacket
self.__init__(bodyStream, newStyle = self._newStyle)
self.setNextStream(ns, nsp)
self.setParentPacket(parentPkt)
self.initialize()
def _writeBodyV1(self):
stream = util.ExtendedStringIO()
stream.write(binSeqToString([self._trustPacketVersion]))
stream.write(self._keyId)
stream.write(binSeqToString(int4ToBytes(self._refreshTimestamp)))
# Write padding
stream.write('\0' * 25)
stream.seek(0)
return stream
def merge(self, other):
assert self.tag == other.tag
ns, nsp = self._nextStream, self._nextStreamPos
parentPkt = self._parentPacket
self.__init__(other.getBodyStream(), newStyle = self._newStyle)
self.setNextStream(ns, nsp)
self.setParentPacket(parentPkt)
self.initialize()
TimestampPacketDispatcher.addPacketType(KeyTimestampPacket)
class PublicKeyring(object):
"""A representation of a public keyring."""
def __init__(self, keyringPath, tsDbPath):
self._keyringPath = keyringPath
self._tsDbPath = tsDbPath
self._tsDbTimestamp = None
self._cache = {}
# For debugging purposes only
self._timeIncrement = 1
@staticmethod
def _createFile(fileName):
try:
util.mkdirChain(os.path.dirname(fileName))
file(fileName, "a+")
except (IOError, OSError), e:
raise KeyringError(e.errno, e.strerror, e.filename)
def addKeys(self, keys, timestamp = None):
self._createFile(self._keyringPath)
# Expand generators
if hasattr(keys, 'next'):
keys = list(keys)
for key in keys:
assert(isinstance(key, PGP_MainKey))
keyFingerprints = addKeys(keys, self._keyringPath)
self.updateTimestamps(keyFingerprints, timestamp = timestamp)
return keyFingerprints
def _extractKey(self, key):
if not key:
return ""
if ord(key[0]) & 0x80:
# Most likely already binary
return key
return parseAsciiArmorKey(key)
def addKeysAsStrings(self, keys, timestamp = None):
sio = util.ExtendedStringIO()
for k in keys:
assert(isinstance(k, str))
sio.write(self._extractKey(k))
msg = PGP_Message(sio, start = 0)
return self.addKeys(msg.iterMainKeys(), timestamp = timestamp)
def updateTimestamps(self, keyIds, timestamp = None):
self._createFile(self._tsDbPath)
# Expand generators
if hasattr(keyIds, 'next'):
keyIds = list(keyIds)
for keyId in keyIds:
assert(len(keyId) >= 16)
if timestamp is None:
timestamp = int(time.time())
pkts = []
for keyId in keyIds:
pkt = KeyTimestampPacket(util.ExtendedStringIO())
pkt.setKeyId(keyId)
pkt.setRefreshTimestamp(timestamp)
pkt.rewriteBody()
pkts.append(pkt)
mtime0 = os.stat(self._tsDbPath)[stat.ST_MTIME]
addKeyTimestampPackets(pkts, self._tsDbPath)
mtime1 = os.stat(self._tsDbPath)[stat.ST_MTIME]
if mtime0 == mtime1:
# Cheat, and set the mtime to be a second larger
os.utime(self._tsDbPath, (mtime1, mtime1 + self._timeIncrement))
elif self._timeIncrement > 1:
os.utime(self._tsDbPath, (mtime1, mtime1 + self._timeIncrement))
# We know for a fact we've touched the file.
# In order to prevent sub-second updates from not being noticed, reset
# the mtime.
self._tsDbTimestamp = None
def _parseTsDb(self):
# Stat the timestamp database
stream = file(self._tsDbPath)
streamfd = stream.fileno()
mtime = os.fstat(streamfd).st_mtime
if self._tsDbTimestamp == mtime:
# Database hasn't changed
return
allKeys = self._getAllKeys()
self._tsDbTimestamp = mtime
self._cache.clear()
for pkt in TimestampPacketDatabase(stream).iterTrustPackets():
pkt.parse()
mainKeyId = pkt.getKeyId()
ts = pkt.getRefreshTimestamp()
self._cache[mainKeyId] = ts
for sk in allKeys.get(mainKeyId, []):
self._cache[sk] = ts
def getKeyTimestamp(self, keyId):
assert(len(keyId) >= 16)
self._parseTsDb()
# XXX for v3 keys, trimming to the last 8 bytes is not the valid way
# to get the key ID. But it's just a cache.
return self._cache.get(keyId[-16:], None)
def getKey(self, keyId):
"""
Retrieve the key.
@param keyId: the key ID.
@type keyId: str
@rtype: PGP_Key
@return: a key with the specified key ID
@raise KeyNotFound: if the key was not found
"""
# exportKey will return a fresh file object
retStream = exportKey(keyId, self._keyringPath)
# Note that exportKey will export both the main key and the subkeys.
# Because of this, we can't blindly grab the first key in the new
# keyring.
msg = PGP_Message(retStream)
return msg.iterByKeyId(keyId).next()
def _getAllKeys(self):
# Return all keys and subkeys
# We need them in order to handle subkeys too
ret = {}
try:
msg = PGP_Message(self._keyringPath)
except (OSError, IOError):
# We could not read the keyring - no keys found
return ret
for pk in msg.iterMainKeys():
fp = pk.getKeyId()
ret[fp] = set(x.getKeyId() for x in pk.iterSubKeys())
return ret
def key_type(cryptoKey):
# pycrypto's API has no consistent way to tell what kind of key we
# have. This is apparently the least awful way to do it.
keyName = cryptoKey.__class__.__name__
if 'RSA' in keyName:
return 'RSA'
elif 'DSA' in keyName:
return 'DSA'
else:
raise TypeError("Unrecognized key type: " + keyName)
| {
"content_hash": "0b0c43d764dd39ac5060310d5bdfdc48",
"timestamp": "",
"source": "github",
"line_count": 3604,
"max_line_length": 89,
"avg_line_length": 34.06326304106548,
"alnum_prop": 0.5918510312469454,
"repo_name": "fedora-conary/conary",
"id": "afef956bfab3b6b397c4bec76780b8a60ad26d46",
"size": "123351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conary/lib/openpgpfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "481681"
},
{
"name": "C++",
"bytes": "8244"
},
{
"name": "CSS",
"bytes": "3920"
},
{
"name": "Erlang",
"bytes": "477"
},
{
"name": "Perl",
"bytes": "45629"
},
{
"name": "Python",
"bytes": "10586616"
},
{
"name": "Shell",
"bytes": "4657"
},
{
"name": "Standard ML",
"bytes": "2756"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from itsdangerous import URLSafeTimedSerializer
from .. import app
ts = URLSafeTimedSerializer(app.config["SECRET_KEY"])
| {
"content_hash": "0fc81ed97cd4af10ccf4ad3398ff2461",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 53,
"avg_line_length": 24.428571428571427,
"alnum_prop": 0.7602339181286549,
"repo_name": "qisanstudio/qsapp-suibe",
"id": "a6f335c1dd6f7c193c60bfa490f79493826432cc",
"size": "196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/suibe/contrib/security.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7089"
},
{
"name": "HTML",
"bytes": "27153"
},
{
"name": "JavaScript",
"bytes": "4660"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "39295"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('curling', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='session',
name='IsClosed',
field=models.BooleanField(default=0),
),
migrations.AlterField(
model_name='club',
name='LastUpdated',
field=models.DateTimeField(default=datetime.datetime(2017, 2, 4, 23, 7, 15, 980002, tzinfo=utc)),
),
migrations.AlterField(
model_name='session',
name='Initiated',
field=models.DateTimeField(default=datetime.datetime(2017, 2, 4, 23, 7, 15, 993696, tzinfo=utc)),
),
]
| {
"content_hash": "3866ce826de4f4974231e9714dde7412",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 109,
"avg_line_length": 28.4,
"alnum_prop": 0.5915492957746479,
"repo_name": "zbassett/curling-robot",
"id": "ca9324e504e3b5c4ec5db57b5cab464054b74081",
"size": "925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RaspberryPi/DjangoSite/mysite/curling/migrations/0002_auto_20170204_2307.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "23791"
},
{
"name": "C++",
"bytes": "28526"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "20489"
},
{
"name": "JavaScript",
"bytes": "1581"
},
{
"name": "Makefile",
"bytes": "2494"
},
{
"name": "Python",
"bytes": "69600"
}
],
"symlink_target": ""
} |
import sqlalchemy.types as sqltypes
from sqlalchemy.sql import operators, expression
from sqlalchemy.sql import default_comparator
from sqlalchemy.ext.mutable import Mutable
import geojson
class MutableList(Mutable, list):
@classmethod
def coerce(cls, key, value):
""" Convert plain list to MutableList """
if not isinstance(value, MutableList):
if isinstance(value, list):
return MutableList(value)
elif value is None:
return value
else:
return MutableList([value])
else:
return value
def __init__(self, initval=None):
list.__init__(self, initval or [])
def __setitem__(self, key, value):
list.__setitem__(self, key, value)
self.changed()
def __eq__(self, other):
return list.__eq__(self, other)
def append(self, item):
list.append(self, item)
self.changed()
def insert(self, idx, item):
list.insert(self, idx, item)
self.changed()
def extend(self, iterable):
list.extend(self, iterable)
self.changed()
def pop(self, index=-1):
list.pop(self, index)
self.changed()
def remove(self, item):
list.remove(self, item)
self.changed()
class MutableDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutableDict."
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __init__(self, initval=None, to_update=None, root_change_key=None):
initval = initval or {}
self._changed_keys = set()
self._deleted_keys = set()
self._overwrite_key = root_change_key
self.to_update = self if to_update is None else to_update
for k in initval:
initval[k] = self._convert_dict(initval[k],
overwrite_key=k if self._overwrite_key is None else self._overwrite_key
)
dict.__init__(self, initval)
def __setitem__(self, key, value):
value = self._convert_dict(value, key if self._overwrite_key is None else self._overwrite_key)
dict.__setitem__(self, key, value)
self.to_update.on_key_changed(
key if self._overwrite_key is None else self._overwrite_key
)
def __delitem__(self, key):
dict.__delitem__(self, key)
# add the key to the deleted keys if this is the root object
# otherwise update on root object
if self._overwrite_key is None:
self._deleted_keys.add(key)
self.changed()
else:
self.to_update.on_key_changed(self._overwrite_key)
def on_key_changed(self, key):
self._deleted_keys.discard(key)
self._changed_keys.add(key)
self.changed()
def _convert_dict(self, value, overwrite_key):
if isinstance(value, dict) and not isinstance(value, MutableDict):
return MutableDict(value, self.to_update, overwrite_key)
return value
def __eq__(self, other):
return dict.__eq__(self, other)
class _Craty(sqltypes.UserDefinedType):
cache_ok = True
class Comparator(sqltypes.TypeEngine.Comparator):
def __getitem__(self, key):
return default_comparator._binary_operate(self.expr,
operators.getitem,
key)
def get_col_spec(self):
return 'OBJECT'
type = MutableDict
comparator_factory = Comparator
Object = Craty = MutableDict.as_mutable(_Craty)
class Any(expression.ColumnElement):
"""Represent the clause ``left operator ANY (right)``. ``right`` must be
an array expression.
copied from postgresql dialect
.. seealso::
:class:`sqlalchemy.dialects.postgresql.ARRAY`
:meth:`sqlalchemy.dialects.postgresql.ARRAY.Comparator.any`
ARRAY-bound method
"""
__visit_name__ = 'any'
inherit_cache = True
def __init__(self, left, right, operator=operators.eq):
self.type = sqltypes.Boolean()
self.left = expression.literal(left)
self.right = right
self.operator = operator
class _ObjectArray(sqltypes.UserDefinedType):
cache_ok = True
class Comparator(sqltypes.TypeEngine.Comparator):
def __getitem__(self, key):
return default_comparator._binary_operate(self.expr,
operators.getitem,
key)
def any(self, other, operator=operators.eq):
"""Return ``other operator ANY (array)`` clause.
Argument places are switched, because ANY requires array
expression to be on the right hand-side.
E.g.::
from sqlalchemy.sql import operators
conn.execute(
select([table.c.data]).where(
table.c.data.any(7, operator=operators.lt)
)
)
:param other: expression to be compared
:param operator: an operator object from the
:mod:`sqlalchemy.sql.operators`
package, defaults to :func:`.operators.eq`.
.. seealso::
:class:`.postgresql.Any`
:meth:`.postgresql.ARRAY.Comparator.all`
"""
return Any(other, self.expr, operator=operator)
type = MutableList
comparator_factory = Comparator
def get_col_spec(self, **kws):
return "ARRAY(OBJECT)"
ObjectArray = MutableList.as_mutable(_ObjectArray)
class Geopoint(sqltypes.UserDefinedType):
cache_ok = True
class Comparator(sqltypes.TypeEngine.Comparator):
def __getitem__(self, key):
return default_comparator._binary_operate(self.expr,
operators.getitem,
key)
def get_col_spec(self):
return 'GEO_POINT'
def bind_processor(self, dialect):
def process(value):
if isinstance(value, geojson.Point):
return value.coordinates
return value
return process
def result_processor(self, dialect, coltype):
return tuple
comparator_factory = Comparator
class Geoshape(sqltypes.UserDefinedType):
cache_ok = True
class Comparator(sqltypes.TypeEngine.Comparator):
def __getitem__(self, key):
return default_comparator._binary_operate(self.expr,
operators.getitem,
key)
def get_col_spec(self):
return 'GEO_SHAPE'
def result_processor(self, dialect, coltype):
return geojson.GeoJSON.to_instance
comparator_factory = Comparator
| {
"content_hash": "36cd0c3e87b39199ea965947ea34ffd7",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 115,
"avg_line_length": 29.169354838709676,
"alnum_prop": 0.5616533038429637,
"repo_name": "crate/crate-python",
"id": "1a3d7a0655bcd114447211a558cc72b539cd5ec3",
"size": "8257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/crate/client/sqlalchemy/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "228149"
},
{
"name": "Shell",
"bytes": "5367"
}
],
"symlink_target": ""
} |
from nose.tools import * # noqa
from query.helpers import setup_demo_env
import os
def test_demo_setup():
setup_demo_env()
assert os.environ.get("QUERY_DB_DRIVER") is "sqlite"
assert os.environ.get("QUERY_DB_HOST") is None
assert os.environ.get("QUERY_DB_PORT") is None
# Test override of existing host/port params
os.environ["QUERY_DB_HOST"] = "bad_host"
os.environ["QUERY_DB_PORT"] = "9999"
setup_demo_env()
assert os.environ.get("QUERY_DB_HOST") is None
assert os.environ.get("QUERY_DB_PORT") is None
| {
"content_hash": "c9504fbe1d8cf8013be6a58035ed5433",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 56,
"avg_line_length": 32.23529411764706,
"alnum_prop": 0.677007299270073,
"repo_name": "boydgreenfield/query",
"id": "fe7475b36f23ebc5ccbe4d80bd6f9fe6f4c4152f",
"size": "548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/helpers_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23709"
},
{
"name": "Shell",
"bytes": "116"
}
],
"symlink_target": ""
} |
import glob
import multiprocessing
import os
import platform
import shlex
import subprocess
import sys
from collections import namedtuple
from contextlib import contextmanager
from datetime import date
from distutils import log, sysconfig
from distutils.spawn import find_executable
from textwrap import dedent
import setuptools
import setuptools.command.build_ext
import setuptools.command.build_py
import setuptools.command.develop
TOP_DIR = os.path.realpath(os.path.dirname(__file__))
SRC_DIR = os.path.join(TOP_DIR, "onnx")
TP_DIR = os.path.join(TOP_DIR, "third_party")
CMAKE_BUILD_DIR = os.path.join(TOP_DIR, ".setuptools-cmake-build")
PACKAGE_NAME = "onnx"
WINDOWS = os.name == "nt"
CMAKE = find_executable("cmake3") or find_executable("cmake")
MAKE = find_executable("make")
install_requires = []
setup_requires = []
tests_require = []
extras_require = {}
################################################################################
# Global variables for controlling the build variant
################################################################################
# Default value is set to TRUE\1 to keep the settings same as the current ones.
# However going forward the recommended way to is to set this to False\0
ONNX_ML = not bool(os.getenv("ONNX_ML") == "0")
ONNX_VERIFY_PROTO3 = bool(os.getenv("ONNX_VERIFY_PROTO3") == "1")
ONNX_NAMESPACE = os.getenv("ONNX_NAMESPACE", "onnx")
ONNX_BUILD_TESTS = bool(os.getenv("ONNX_BUILD_TESTS") == "1")
ONNX_DISABLE_EXCEPTIONS = bool(os.getenv("ONNX_DISABLE_EXCEPTIONS") == "1")
USE_MSVC_STATIC_RUNTIME = bool(os.getenv("USE_MSVC_STATIC_RUNTIME", "0") == "1")
DEBUG = bool(os.getenv("DEBUG", "0") == "1")
COVERAGE = bool(os.getenv("COVERAGE", "0") == "1")
################################################################################
# Version
################################################################################
try:
git_version = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=TOP_DIR)
.decode("ascii")
.strip()
)
except (OSError, subprocess.CalledProcessError):
git_version = None
with open(os.path.join(TOP_DIR, "VERSION_NUMBER")) as version_file:
VERSION_NUMBER = version_file.read().strip()
if "--weekly_build" in sys.argv:
today_number = date.today().strftime("%Y%m%d")
VERSION_NUMBER += ".dev" + today_number
PACKAGE_NAME = "onnx-weekly"
sys.argv.remove("--weekly_build")
VersionInfo = namedtuple("VersionInfo", ["version", "git_version"])(
version=VERSION_NUMBER, git_version=git_version
)
################################################################################
# Pre Check
################################################################################
assert CMAKE, "Could not find cmake executable!"
################################################################################
# Utilities
################################################################################
@contextmanager
def cd(path):
if not os.path.isabs(path):
raise RuntimeError(f"Can only cd to absolute path, got: {path}")
orig_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(orig_path)
################################################################################
# Customized commands
################################################################################
class ONNXCommand(setuptools.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
class create_version(ONNXCommand):
def run(self):
with open(os.path.join(SRC_DIR, "version.py"), "w") as f:
f.write(
dedent(
"""\
# This file is generated by setup.py. DO NOT EDIT!
version = "{version}"
git_version = "{git_version}"
""".format(
**dict(VersionInfo._asdict())
)
)
)
class cmake_build(setuptools.Command):
"""
Compiles everything when `python setup.py build` is run using cmake.
Custom args can be passed to cmake by specifying the `CMAKE_ARGS`
environment variable.
The number of CPUs used by `make` can be specified by passing `-j<ncpus>`
to `setup.py build`. By default all CPUs are used.
"""
user_options = [("jobs=", "j", "Specifies the number of jobs to use with make")]
built = False
def initialize_options(self):
self.jobs = None
def finalize_options(self):
self.set_undefined_options("build", ("parallel", "jobs"))
if self.jobs is None and os.getenv("MAX_JOBS") is not None:
self.jobs = os.getenv("MAX_JOBS")
self.jobs = multiprocessing.cpu_count() if self.jobs is None else int(self.jobs)
def run(self):
if cmake_build.built:
return
cmake_build.built = True
if not os.path.exists(CMAKE_BUILD_DIR):
os.makedirs(CMAKE_BUILD_DIR)
with cd(CMAKE_BUILD_DIR):
build_type = "Release"
# configure
cmake_args = [
CMAKE,
f"-DPYTHON_INCLUDE_DIR={sysconfig.get_python_inc()}",
f"-DPYTHON_EXECUTABLE={sys.executable}",
"-DBUILD_ONNX_PYTHON=ON",
"-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
f"-DONNX_NAMESPACE={ONNX_NAMESPACE}",
f"-DPY_EXT_SUFFIX={sysconfig.get_config_var('EXT_SUFFIX') or ''}",
]
if COVERAGE:
cmake_args.append("-DONNX_COVERAGE=ON")
if COVERAGE or DEBUG:
# in order to get accurate coverage information, the
# build needs to turn off optimizations
build_type = "Debug"
cmake_args.append(f"-DCMAKE_BUILD_TYPE={build_type}")
if WINDOWS:
cmake_args.extend(
[
# we need to link with libpython on windows, so
# passing python version to window in order to
# find python in cmake
f"-DPY_VERSION={'{}.{}'.format(*sys.version_info[:2])}",
]
)
if USE_MSVC_STATIC_RUNTIME:
cmake_args.append("-DONNX_USE_MSVC_STATIC_RUNTIME=ON")
if platform.architecture()[0] == "64bit":
cmake_args.extend(["-A", "x64", "-T", "host=x64"])
else:
cmake_args.extend(["-A", "Win32", "-T", "host=x86"])
if ONNX_ML:
cmake_args.append("-DONNX_ML=1")
if ONNX_VERIFY_PROTO3:
cmake_args.append("-DONNX_VERIFY_PROTO3=1")
if ONNX_BUILD_TESTS:
cmake_args.append("-DONNX_BUILD_TESTS=ON")
if ONNX_DISABLE_EXCEPTIONS:
cmake_args.append("-DONNX_DISABLE_EXCEPTIONS=ON")
if "CMAKE_ARGS" in os.environ:
extra_cmake_args = shlex.split(os.environ["CMAKE_ARGS"])
# prevent crossfire with downstream scripts
del os.environ["CMAKE_ARGS"]
log.info(f"Extra cmake args: {extra_cmake_args}")
cmake_args.extend(extra_cmake_args)
cmake_args.append(TOP_DIR)
log.info(f"Using cmake args: {cmake_args}")
if "-DONNX_DISABLE_EXCEPTIONS=ON" in cmake_args:
raise RuntimeError(
"-DONNX_DISABLE_EXCEPTIONS=ON option is only available for c++ builds. Python binding require exceptions to be enabled."
)
subprocess.check_call(cmake_args)
build_args = [CMAKE, "--build", os.curdir]
if WINDOWS:
build_args.extend(["--config", build_type])
build_args.extend(["--", f"/maxcpucount:{self.jobs}"])
else:
build_args.extend(["--", "-j", str(self.jobs)])
subprocess.check_call(build_args)
class build_py(setuptools.command.build_py.build_py):
def run(self):
self.run_command("create_version")
self.run_command("cmake_build")
generated_python_files = glob.glob(
os.path.join(CMAKE_BUILD_DIR, "onnx", "*.py")
) + glob.glob(os.path.join(CMAKE_BUILD_DIR, "onnx", "*.pyi"))
for src in generated_python_files:
dst = os.path.join(TOP_DIR, os.path.relpath(src, CMAKE_BUILD_DIR))
self.copy_file(src, dst)
# TODO (https://github.com/pypa/setuptools/issues/3606)
# Review the command customisations to enable editable_mode
self.editable_mode = False
return setuptools.command.build_py.build_py.run(self)
class develop(setuptools.command.develop.develop):
def run(self):
self.run_command("build_py")
setuptools.command.develop.develop.run(self)
class build_ext(setuptools.command.build_ext.build_ext):
def run(self):
self.run_command("cmake_build")
setuptools.command.build_ext.build_ext.run(self)
def build_extensions(self):
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = os.path.basename(self.get_ext_filename(fullname))
lib_path = CMAKE_BUILD_DIR
if os.name == "nt":
debug_lib_dir = os.path.join(lib_path, "Debug")
release_lib_dir = os.path.join(lib_path, "Release")
if os.path.exists(debug_lib_dir):
lib_path = debug_lib_dir
elif os.path.exists(release_lib_dir):
lib_path = release_lib_dir
src = os.path.join(lib_path, filename)
dst = os.path.join(os.path.realpath(self.build_lib), "onnx", filename)
self.copy_file(src, dst)
class mypy_type_check(ONNXCommand):
description = "Run MyPy type checker"
def run(self):
"""Run command."""
onnx_script = os.path.realpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "tools/mypy-onnx.py"
)
)
returncode = subprocess.call([sys.executable, onnx_script])
sys.exit(returncode)
cmdclass = {
"create_version": create_version,
"cmake_build": cmake_build,
"build_py": build_py,
"develop": develop,
"build_ext": build_ext,
"typecheck": mypy_type_check,
}
################################################################################
# Extensions
################################################################################
ext_modules = [setuptools.Extension(name="onnx.onnx_cpp2py_export", sources=[])]
################################################################################
# Packages
################################################################################
# Add package directories here if you want to package them with the source
# TODO try to remove unnecessary .cpp files
include_dirs = [
"onnx.backend.test.data.*",
"onnx.common",
"onnx.defs.*",
"onnx.examples*",
"onnx.shape_inference",
"onnx.test.cpp",
"onnx.version_converter*",
]
packages = setuptools.find_packages() + setuptools.find_namespace_packages(
include=include_dirs
)
requirements_file = "requirements.txt"
requirements_path = os.path.join(os.getcwd(), requirements_file)
if not os.path.exists(requirements_path):
this = os.path.dirname(__file__)
requirements_path = os.path.join(this, requirements_file)
if not os.path.exists(requirements_path):
raise FileNotFoundError("Unable to find " + requirements_file)
with open(requirements_path) as f:
install_requires = f.read().splitlines()
################################################################################
# Test
################################################################################
setup_requires.append("pytest-runner")
tests_require.append("pytest")
tests_require.append("nbval")
tests_require.append("tabulate")
extras_require["lint"] = [
"clang-format==13.0.0",
"flake8>=5.0.2",
"mypy>=0.971",
"types-protobuf==3.18.4",
"black>=22.3",
"isort[colors]>=5.10",
]
################################################################################
# Final
################################################################################
setuptools.setup(
name=PACKAGE_NAME,
version=VersionInfo.version,
description="Open Neural Network Exchange",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=packages,
license="Apache License v2.0",
include_package_data=True,
package_data={"onnx": ["py.typed", "*.pyi"]},
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require=extras_require,
author="ONNX",
author_email="[email protected]",
url="https://github.com/onnx/onnx",
entry_points={
"console_scripts": [
"check-model = onnx.bin.checker:check_model",
"check-node = onnx.bin.checker:check_node",
"backend-test-tools = onnx.backend.test.cmd_tools:main",
]
},
)
| {
"content_hash": "9665fac600984ab5377101b9fca2ef9d",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 140,
"avg_line_length": 35.031413612565444,
"alnum_prop": 0.5348976236735914,
"repo_name": "onnx/onnx",
"id": "e78d09804a19551a971454a08c403e5fa9dfffdc",
"size": "13421",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "546"
},
{
"name": "C",
"bytes": "2062"
},
{
"name": "C++",
"bytes": "2003844"
},
{
"name": "CMake",
"bytes": "32553"
},
{
"name": "Jupyter Notebook",
"bytes": "29310"
},
{
"name": "PowerShell",
"bytes": "1157"
},
{
"name": "Python",
"bytes": "2073844"
},
{
"name": "Shell",
"bytes": "2918"
}
],
"symlink_target": ""
} |
'''
Simple interface for reporting metrics to DataDog.
'''
from __future__ import print_function
from functools import wraps
import time
class DataDogMetrics(object):
'''
Datadog supports printing to stdout to report metrics. This only gives us
counts and gauges:
https://www.datadoghq.com/blog/monitoring-lambda-functions-datadog
Another method would be via the API but that one only supports gauges and
requires auth, which I'd rather not do until they've added support for
histograms and counts.
'''
def __init__(self, service_prefix, stats_group=None):
self.service_prefix = service_prefix
self.default_tags = ['group:%s' % stats_group] if stats_group is not None else []
def incr(self, metric_name, count=1, tags=None):
'''
Incr - Increment a counter metric, providing a count of occurrences per
second.
'''
full_metric_name = self._build_metric_name(metric_name)
all_tags = self._build_tags(tags)
return self._print_metric('count', full_metric_name, count, all_tags)
def gauge(self, metric_name, value, tags=None):
'''
Gauge - Gauges are a constant data type. They are not subject to
averaging, and they don't change unless you change them. That is, once
you set a gauge value, it will be a flat line on the graph until you
change it again.
'''
full_metric_name = self._build_metric_name(metric_name)
all_tags = self._build_tags(tags)
return self._print_metric('gauge', full_metric_name, value, all_tags)
def histogram(self, metric_name, value, tags=None):
'''
Histogram - Send a histogram, tracking multiple samples of a metric
'''
full_metric_name = self._build_metric_name(metric_name)
all_tags = self._build_tags(tags)
return self._print_metric('histogram', full_metric_name, value, all_tags)
def timer(self, metric_name, tags=None):
'''
Timer - A convenient decorator that automatically records the runtime
of your function and reports it as a histogram.
'''
def decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
start_time = time.time()
ret_val = function(*args, **kwargs)
duration = time.time() - start_time
self.histogram(metric_name, duration, tags)
return ret_val
return wrapper
return decorator
def timing(self, metric_name, delta, tags=None):
'''
Timing - Track a duration event
'''
return self.histogram(metric_name, delta, tags)
def set(self, metric_name, value, tags=None):
'''
Set - Send a metric that tracks the number of unique items in a set
'''
# NOT SUPPORTED YET
def _build_tags(self, tags=None):
return (tags or []) + self.default_tags
def _build_metric_name(self, metric_name):
return '{0}.{1}'.format(self.service_prefix, metric_name)
def _print_metric(self, metric_type, metric_name, value, tags):
unix_epoch_timestamp = int(time.time())
metric = 'MONITORING|{0}|{1}|{2}|{3}'.format(
unix_epoch_timestamp,
value,
metric_type,
metric_name)
if tags:
metric += '|#{}'.format(','.join(tags))
print(metric)
return metric
| {
"content_hash": "c9eef4b3a921c1fe8d937575b6080c6c",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 89,
"avg_line_length": 36,
"alnum_prop": 0.606815578465063,
"repo_name": "500px/lambda_dd_metrics",
"id": "59d34429388629420d3b0870dc99391fbf8c2cbd",
"size": "3514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lambda_dd_metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6474"
}
],
"symlink_target": ""
} |
from ctypes import Structure, c_char_p, c_int, c_void_p, CDLL
import ctypes.util
import os, sys
class error(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class datum(Structure):
_fields_ = [
('dptr', c_char_p),
('dsize', c_int),
]
def __init__(self, text):
if not isinstance(text, str):
raise TypeError("datum: expected string, not %s" % type(text))
Structure.__init__(self, text, len(text))
class dbm(object):
def __init__(self, dbmobj):
self._aobj = dbmobj
def close(self):
if not self._aobj:
raise error('DBM object has already been closed')
getattr(lib, funcs['close'])(self._aobj)
self._aobj = None
def __del__(self):
if self._aobj:
self.close()
def keys(self):
if not self._aobj:
raise error('DBM object has already been closed')
allkeys = []
k = getattr(lib, funcs['firstkey'])(self._aobj)
while k.dptr:
allkeys.append(k.dptr[:k.dsize])
k = getattr(lib, funcs['nextkey'])(self._aobj)
return allkeys
def get(self, key, default=None):
if not self._aobj:
raise error('DBM object has already been closed')
dat = datum(key)
k = getattr(lib, funcs['fetch'])(self._aobj, dat)
if k.dptr:
return k.dptr[:k.dsize]
if getattr(lib, funcs['error'])(self._aobj):
getattr(lib, funcs['clearerr'])(self._aobj)
raise error("")
return default
def __len__(self):
return len(self.keys())
def __getitem__(self, key):
value = self.get(key)
if value is None:
raise KeyError(key)
return value
def __setitem__(self, key, value):
if not self._aobj:
raise error('DBM object has already been closed')
dat = datum(key)
data = datum(value)
status = getattr(lib, funcs['store'])(self._aobj, dat, data, lib.DBM_REPLACE)
if getattr(lib, funcs['error'])(self._aobj):
getattr(lib, funcs['clearerr'])(self._aobj)
raise error("")
return status
def setdefault(self, key, default=''):
if not self._aobj:
raise error('DBM object has already been closed')
dat = datum(key)
k = getattr(lib, funcs['fetch'])(self._aobj, dat)
if k.dptr:
return k.dptr[:k.dsize]
data = datum(default)
status = getattr(lib, funcs['store'])(self._aobj, dat, data, lib.DBM_INSERT)
if status < 0:
getattr(lib, funcs['clearerr'])(self._aobj)
raise error("cannot add item to database")
return default
def __contains__(self, key):
if not self._aobj:
raise error('DBM object has already been closed')
dat = datum(key)
k = getattr(lib, funcs['fetch'])(self._aobj, dat)
if k.dptr:
return True
return False
has_key = __contains__
def __delitem__(self, key):
if not self._aobj:
raise error('DBM object has already been closed')
dat = datum(key)
status = getattr(lib, funcs['delete'])(self._aobj, dat)
if status < 0:
raise KeyError(key)
### initialization: Berkeley DB versus normal DB
def _init_func(name, argtypes=None, restype=None):
try:
func = getattr(lib, '__db_ndbm_' + name)
funcs[name] = '__db_ndbm_' + name
except AttributeError:
func = getattr(lib, 'dbm_' + name)
funcs[name] = 'dbm_' + name
if argtypes is not None:
func.argtypes = argtypes
if restype is not None:
func.restype = restype
if sys.platform != 'darwin':
libpath = ctypes.util.find_library('db')
if not libpath:
# XXX this is hopeless...
libpath = ctypes.util.find_library('db-4.5')
if not libpath:
raise ImportError("Cannot find dbm library")
lib = CDLL(libpath) # Linux
_platform = 'bdb'
else:
lib = CDLL("/usr/lib/libdbm.dylib") # OS X
_platform = 'osx'
library = "GNU gdbm"
funcs = {}
_init_func('open', (c_char_p, c_int, c_int), restype=c_void_p)
_init_func('close', (c_void_p,), restype=c_void_p)
_init_func('firstkey', (c_void_p,), restype=datum)
_init_func('nextkey', (c_void_p,), restype=datum)
_init_func('fetch', (c_void_p, datum), restype=datum)
_init_func('store', (c_void_p, datum, datum, c_int), restype=c_int)
_init_func('error', (c_void_p,), restype=c_int)
_init_func('delete', (c_void_p, datum), restype=c_int)
lib.DBM_INSERT = 0
lib.DBM_REPLACE = 1
def open(filename, flag='r', mode=0666):
"open a DBM database"
if not isinstance(filename, str):
raise TypeError("expected string")
openflag = 0
try:
openflag = {
'r': os.O_RDONLY,
'rw': os.O_RDWR,
'w': os.O_RDWR | os.O_CREAT,
'c': os.O_RDWR | os.O_CREAT,
'n': os.O_RDWR | os.O_CREAT | os.O_TRUNC,
}[flag]
except KeyError, e:
raise error("arg 2 to open should be 'r', 'w', 'c', or 'n'")
a_db = getattr(lib, funcs['open'])(filename, openflag, mode)
if a_db == 0:
raise error("Could not open file %s.db" % filename)
return dbm(a_db)
__all__ = ('datum', 'dbm', 'error', 'funcs', 'open', 'library')
| {
"content_hash": "3c2a6aeab74acfc3d28d04e141760dc5",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 85,
"avg_line_length": 30.593220338983052,
"alnum_prop": 0.554016620498615,
"repo_name": "sauloal/pycluster",
"id": "6839a4d1a8e4194d0842051991f485d952c51fe9",
"size": "5415",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pypy-1.9_64/lib_pypy/dbm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "126041"
},
{
"name": "C++",
"bytes": "45234"
},
{
"name": "JavaScript",
"bytes": "423397"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "23164908"
},
{
"name": "Shell",
"bytes": "7769"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""
Given a table of changes (gotten from mesh_changes.py, create the change in the working owl file (doid-edit.owl), using
owltools, then create a new branch and commit it to github. Then create a pull request with a comment containing the
metadata.
"""
import pandas as pd
import requests
from requests.auth import HTTPBasicAuth
from scheduled_bots.disease_ontology.mesh_changes import main
from scheduled_bots.local import GITHUB_PASS
from tabulate import tabulate
import subprocess
import os
GIT_LOCAL_BASE = "/home/gstupp/projects/HumanDiseaseOntology/src/ontology"
GITHUB_USER = "stuppie"
REMOTE_GIT = "stuppie" # change to "DiseaseOntology" to use the official DO repo. "stuppie" is my fork (for testing)
# REMOTE_GIT = "DiseaseOntology"
# assumes there exists a file "doid-edit.owl" in the current folder
# in my case, this is a softlink to the file in the cloned git repo
def add_xref(owl_path, doid, ext_id, relation="oboInOwl:hasDbXref"):
# make sure the skos prefix def is in the owl_file
if not any(line.strip() == "Prefix(skos:=<http://www.w3.org/2004/02/skos/core#>)" for line in open(owl_path)):
print("adding skos prefix")
lines = list(open(owl_path).readlines())
lines.insert(0, "Prefix(skos:=<http://www.w3.org/2004/02/skos/core#>)\n")
with open(owl_path, 'w') as f:
f.writelines(lines)
# needs owltools in path
if os.path.exists("tmp.ttl"):
os.remove("tmp.ttl")
prefix = """
@prefix : <http://purl.obolibrary.org/obo/doid.owl#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix obo: <http://purl.obolibrary.org/obo/> .
@prefix skos: <http://www.w3.org/2004/02/skos/core#> .
"""
ttl = 'obo:DOID_{} {} "{}"^^xsd:string .'.format(doid.split(":")[1], relation, ext_id)
# if relation == "skos:exactMatch":
# # to maintain backward compatibility with hasDbXref, add this also if the relation is an exact match
# ttl += '\nobo:DOID_{} {} "{}"^^xsd:string .'.format(doid.split(":")[1], "oboInOwl:hasDbXref", ext_id)
with open("tmp.ttl", "w") as f:
print(prefix, file=f)
print(ttl, file=f)
subprocess.check_call("owltools {} --merge tmp.ttl -o -f ofn {}".format(owl_path, owl_path), shell=True)
lines = [line for line in open(owl_path) if
'Annotation(rdfs:comment "Includes Ontology(OntologyID(Anonymous' not in line]
with open(owl_path, 'w') as f:
f.writelines(lines)
def commit_and_push_changes(branch_id, to_add="doid-edit.owl", msg='add xref'):
"""
git checkout tmp
git add doid-edit.owl
git commit -m "add DOID_0060330 MESH:C535289 xref"
git push --set-upstream origin tmp
"""
cd = os.getcwd()
os.chdir(GIT_LOCAL_BASE)
subprocess.check_call(["git", "checkout", "-b", branch_id])
subprocess.check_call(["git", "add", to_add])
subprocess.check_call(["git", "commit", "-m", "'{}'".format(msg)])
subprocess.check_call("git push --set-upstream origin {}".format(branch_id), shell=True)
subprocess.check_call(["git", "checkout", "master"])
os.chdir(cd)
def create_pullrequest(title, body, branch_id):
data = {
"title": title,
"body": body,
"head": branch_id,
"base": "master"
}
url = "https://api.github.com/repos/{}/HumanDiseaseOntology/pulls".format(REMOTE_GIT)
r = requests.post(url, json=data, auth=HTTPBasicAuth(GITHUB_USER, GITHUB_PASS))
assert r.status_code == 201, r.text
return r
if __name__ == "__main__":
df, df_fmt = main("2017-11-28")
df.to_csv('df_2017-11-28.csv')
df_fmt.to_csv('df_fmt_2017-11-28.csv')
# df = pd.read_csv('df_2017-11-28.csv', index_col=0)
# df_fmt = pd.read_csv('df_fmt_2017-11-28.csv', index_col=0)
df_fmt = df_fmt.rename(columns={'doid': 'DOID', 'do_label': 'DOID Label',
'do_def': 'DOID Description', 'mesh': 'MeSH ID',
'mesh_label': 'MeSH Label', 'mesh_descr': 'MeSH Description',
'mesh_synonyms': 'MeSH Synonyms',
'qid': 'Wikidata QID', 'wd_label': 'Wikidata Label',
'relation': 'Relation'})
for idx in range(len(df)):
# break
row = df.iloc[idx]
doid = row.doid
ext_id = "MESH:" + row.mesh
# if row.relation == "oboInOwl:hasDbXref":
# row.relation = "skos:exactMatch"
# df_fmt.iloc[idx:idx + 1].Relation = "skos:exactMatch"
relation = row.relation
branch_id = "_".join([doid, ext_id, relation])
branch_id = branch_id.replace(":", "_") # can't have : in branch names
table = df_fmt.iloc[idx:idx + 1].transpose()
table.columns = ["Value"]
add_xref("doid-edit.owl", doid, ext_id, relation)
msg = "add xref: {} {}".format(doid, ext_id)
commit_and_push_changes(branch_id=branch_id, msg=msg)
t = tabulate(table, headers='keys', tablefmt='pipe')
create_pullrequest(title=msg, body=t, branch_id=branch_id)
| {
"content_hash": "b9825807d19c9f92de0c2a7d3d2ba676",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 119,
"avg_line_length": 43.472,
"alnum_prop": 0.6076555023923444,
"repo_name": "SuLab/scheduled-bots",
"id": "51522f2ef7f5e81b0180b4e6402b4a5c0b2c36f4",
"size": "5434",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scheduled_bots/disease_ontology/disease_ontology/deprecated_code/pull_requester.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1296"
},
{
"name": "Jupyter Notebook",
"bytes": "1049300"
},
{
"name": "Python",
"bytes": "709603"
},
{
"name": "Shell",
"bytes": "5313"
}
],
"symlink_target": ""
} |
from datetime import datetime
from datetime import timedelta
import time
import json
import googlemaps
import os
HOMEDIR=os.path.expanduser('~')
def commuteEstimate(client, subject, d1, d2, time_threshold):
now = datetime.now()
alternates=False
routes = client.directions(d1, d2,
mode="driving",
traffic_model="best_guess",
departure_time=now)
route = routes[0]
distance_to_work=route['legs'][0]['distance']['text'].replace('mi', 'miles')
time_to_work=route['legs'][0]['duration']['text'].replace('mins', 'minutes')
time_int = int(float(time_to_work.split(" ")[0]))
time_str='{0} - Approximately {1} to work. {2} total.'.format(subject, time_to_work, distance_to_work)
if(time_int >= time_threshold):
time_str = time_str + " Recommend you take an alternate route."
alternates=True
#if(len(route['warnings']) == 0):
# warnings='No warnings'
#else:
# warnings=route['warnings']
send_str = time_str# + " " + warnings
voicedir = os.path.abspath(HOMEDIR + '/voices/fel')
cmd_str = ('echo "{0}" | ' + voicedir + ' >/dev/null').format(send_str)
print(time_str)
os.system(cmd_str)
if alternates:
alternatives(client, d1, d2)
def alternatives(client, d1, d2):
routes = client.directions(d1, d2,
alternatives=True)
for route in routes:
summary=route['summary']
duration=route['legs'][0]['duration']['text'].replace('mins', 'minutes')
alternatives=summary + " " + duration
print(alternatives)
if __name__=='__main__':
api_key=''
with open(os.path.abspath(HOMEDIR+'/.credentials/trafficAPIKey2.txt')) as myfile:
api_key=myfile.read().rstrip()
client = googlemaps.Client(key=api_key)
d1='PVCC Charlottesville, VA 22902'
d2=''
commuteEstimate(client, 'driver1', d1, '1954 Swanson Drive Charlottesville, VA 22901', 20)
commuteEstimate(client, 'driver2', d1, '3263 Proffit Rd, Charlottesville, VA 22911', 30)
| {
"content_hash": "41ef04f1f4e763a1d93cc675435e9756",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 106,
"avg_line_length": 36.43103448275862,
"alnum_prop": 0.6062470421202082,
"repo_name": "jfaulkner/googleAPIUtil",
"id": "ce3b12ea42bb750215f47079fbd9227633e871e6",
"size": "2184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traffic/trafficNotifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11508"
}
],
"symlink_target": ""
} |
import belaylibs.models as bcap
from django.db import models
class BelayAccount(bcap.Grantable):
station_url = models.CharField(max_length=200)
class PendingLogin(bcap.Grantable):
# Key is for this server to trust the openID provider's request
key = models.CharField(max_length=36)
# ClientKey is a secret provided by the client to trust that new
# windows were served from this server
clientkey = models.CharField(max_length=36)
class PltCredentials(bcap.Grantable):
username = models.CharField(max_length=200)
salt = models.CharField(max_length=200)
hashed_password = models.CharField(max_length=200)
account = models.ForeignKey(BelayAccount)
class GoogleCredentials(bcap.Grantable):
identity = models.CharField(max_length=200)
account = models.ForeignKey(BelayAccount)
class BelaySession(bcap.Grantable):
session_id = models.CharField(max_length=200)
account = models.ForeignKey(BelayAccount)
class Stash(bcap.Grantable):
stashed_content = models.TextField(max_length=1000)
class PendingAccount(bcap.Grantable):
email = models.TextField(max_length=100)
| {
"content_hash": "9f785be2f1c025523a94eef39211bf29",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 66,
"avg_line_length": 33.21212121212121,
"alnum_prop": 0.7782846715328468,
"repo_name": "brownplt/k3",
"id": "1bd4e4cde648002802df7c3fb3837d086c05bd19",
"size": "1096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dj-plt-belay/pltbelay/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "492630"
},
{
"name": "Python",
"bytes": "218929"
},
{
"name": "Shell",
"bytes": "1918"
}
],
"symlink_target": ""
} |
import boto
import boto.s3.connection
from django.conf import settings
import logging
log = logging.getLogger(__name__)
def get_s3_connection():
if settings.S3_ACCESS_KEY and settings.S3_SECRET_KEY and settings.S3_HOST:
log.debug('Connecting to {}, with secure connection is {}'.
format(settings.S3_HOST, settings.S3_SECURE_CONNECTION))
return boto.connect_s3(
aws_access_key_id=settings.S3_ACCESS_KEY,
aws_secret_access_key=settings.S3_SECRET_KEY,
host=settings.S3_HOST,
is_secure=settings.S3_SECURE_CONNECTION,
calling_format=boto.s3.connection.OrdinaryCallingFormat())
return None
def get_or_create_bucket(s3_connection):
bucket = s3_connection.get_bucket(settings.S3_BUCKET_NAME)
if bucket is None:
bucket = s3_connection.create_bucket(settings.S3_BUCKET_NAME)
return bucket
| {
"content_hash": "f382259788780dd0057c1a10f9a8c47a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 32.42857142857143,
"alnum_prop": 0.6806167400881057,
"repo_name": "2gis/badger-api",
"id": "47e7862c43cb73c1e5f5914222fcb5111e48eeb8",
"size": "908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1597"
},
{
"name": "Python",
"bytes": "241445"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name = "a10-neutron-lbaas",
version = "1.5.2a",
packages = find_packages(),
author = "A10 Networks",
author_email = "[email protected]",
description = "A10 Networks Openstack LBaaS Driver Middleware",
license = "Apache",
keywords = "a10 axapi acos adc slb load balancer openstack neutron lbaas",
url = "https://github.com/a10networks/a10-neutron-lbaas",
long_description = open('README.md').read(),
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet',
],
package_data={'': [
'a10_neutron_lbaas/db/migration/alembic.ini'
]},
include_package_data=True,
scripts=['scripts/a10-manage', 'scripts/a10-neutron-lbaas-db-manage'],
install_requires = ['acos-client>=1.3.5', 'a10-openstack-lib>=0.1.0']
)
| {
"content_hash": "64440cba72e0bc718df9db7d84530839",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 31.264705882352942,
"alnum_prop": 0.6274694261523989,
"repo_name": "Cedev/a10-neutron-lbaas",
"id": "9d8a6b860cbfdf2111eb331f5e7b0aa28addab0a",
"size": "1101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "323518"
},
{
"name": "Shell",
"bytes": "6670"
}
],
"symlink_target": ""
} |
"""Groebner bases algorithms. """
from __future__ import print_function, division
from sympy.polys.monomials import monomial_mul, monomial_lcm, monomial_divides, term_div
from sympy.polys.orderings import lex
from sympy.polys.polyerrors import DomainError
from sympy.polys.polyconfig import query
from sympy.core.symbol import Dummy
from sympy.core.compatibility import xrange
def groebner(seq, ring, method=None):
"""
Computes Groebner basis for a set of polynomials in `K[X]`.
Wrapper around the (default) improved Buchberger and the other algorithms
for computing Groebner bases. The choice of algorithm can be changed via
``method`` argument or :func:`setup` from :mod:`sympy.polys.polyconfig`,
where ``method`` can be either ``buchberger`` or ``f5b``.
"""
if method is None:
method = query('groebner')
_groebner_methods = {
'buchberger': _buchberger,
'f5b': _f5b,
}
try:
_groebner = _groebner_methods[method]
except KeyError:
raise ValueError("'%s' is not a valid Groebner bases algorithm (valid are 'buchberger' and 'f5b')" % method)
domain, orig = ring.domain, None
if not domain.has_Field or not domain.has_assoc_Field:
try:
orig, ring = ring, ring.clone(domain=domain.get_field())
except DomainError:
raise DomainError("can't compute a Groebner basis over %s" % domain)
else:
seq = [ s.set_ring(ring) for s in seq ]
G = _groebner(seq, ring)
if orig is not None:
G = [ g.clear_denoms()[1].set_ring(orig) for g in G ]
return G
def _buchberger(f, ring):
"""
Computes Groebner basis for a set of polynomials in `K[X]`.
Given a set of multivariate polynomials `F`, finds another
set `G`, such that Ideal `F = Ideal G` and `G` is a reduced
Groebner basis.
The resulting basis is unique and has monic generators if the
ground domains is a field. Otherwise the result is non-unique
but Groebner bases over e.g. integers can be computed (if the
input polynomials are monic).
Groebner bases can be used to choose specific generators for a
polynomial ideal. Because these bases are unique you can check
for ideal equality by comparing the Groebner bases. To see if
one polynomial lies in an ideal, divide by the elements in the
base and see if the remainder vanishes.
They can also be used to solve systems of polynomial equations
as, by choosing lexicographic ordering, you can eliminate one
variable at a time, provided that the ideal is zero-dimensional
(finite number of solutions).
References
==========
1. [Bose03]_
2. [Giovini91]_
3. [Ajwa95]_
4. [Cox97]_
Algorithm used: an improved version of Buchberger's algorithm
as presented in T. Becker, V. Weispfenning, Groebner Bases: A
Computational Approach to Commutative Algebra, Springer, 1993,
page 232.
"""
order = ring.order
domain = ring.domain
monomial_mul = ring.monomial_mul
monomial_div = ring.monomial_div
monomial_lcm = ring.monomial_lcm
def select(P):
# normal selection strategy
# select the pair with minimum LCM(LM(f), LM(g))
pr = min(P, key=lambda pair: order(monomial_lcm(f[pair[0]].LM, f[pair[1]].LM)))
return pr
def normal(g, J):
h = g.rem([ f[j] for j in J ])
if not h:
return None
else:
h = h.monic()
if not h in I:
I[h] = len(f)
f.append(h)
return h.LM, I[h]
def update(G, B, ih):
# update G using the set of critical pairs B and h
# [BW] page 230
h = f[ih]
mh = h.LM
# filter new pairs (h, g), g in G
C = G.copy()
D = set()
while C:
# select a pair (h, g) by popping an element from C
ig = C.pop()
g = f[ig]
mg = g.LM
LCMhg = monomial_lcm(mh, mg)
def lcm_divides(ip):
# LCM(LM(h), LM(p)) divides LCM(LM(h), LM(g))
m = monomial_lcm(mh, f[ip].LM)
return monomial_div(LCMhg, m)
# HT(h) and HT(g) disjoint: mh*mg == LCMhg
if monomial_mul(mh, mg) == LCMhg or (
not any(lcm_divides(ipx) for ipx in C) and
not any(lcm_divides(pr[1]) for pr in D)):
D.add((ih, ig))
E = set()
while D:
# select h, g from D (h the same as above)
ih, ig = D.pop()
mg = f[ig].LM
LCMhg = monomial_lcm(mh, mg)
if not monomial_mul(mh, mg) == LCMhg:
E.add((ih, ig))
# filter old pairs
B_new = set()
while B:
# select g1, g2 from B (-> CP)
ig1, ig2 = B.pop()
mg1 = f[ig1].LM
mg2 = f[ig2].LM
LCM12 = monomial_lcm(mg1, mg2)
# if HT(h) does not divide lcm(HT(g1), HT(g2))
if not monomial_div(LCM12, mh) or \
monomial_lcm(mg1, mh) == LCM12 or \
monomial_lcm(mg2, mh) == LCM12:
B_new.add((ig1, ig2))
B_new |= E
# filter polynomials
G_new = set()
while G:
ig = G.pop()
mg = f[ig].LM
if not monomial_div(mg, mh):
G_new.add(ig)
G_new.add(ih)
return G_new, B_new
# end of update ################################
if not f:
return []
# replace f with a reduced list of initial polynomials; see [BW] page 203
f1 = f[:]
while True:
f = f1[:]
f1 = []
for i in range(len(f)):
p = f[i]
r = p.rem(f[:i])
if r:
f1.append(r.monic())
if f == f1:
break
I = {} # ip = I[p]; p = f[ip]
F = set() # set of indices of polynomials
G = set() # set of indices of intermediate would-be Groebner basis
CP = set() # set of pairs of indices of critical pairs
for i, h in enumerate(f):
I[h] = i
F.add(i)
#####################################
# algorithm GROEBNERNEWS2 in [BW] page 232
while F:
# select p with minimum monomial according to the monomial ordering
h = min([f[x] for x in F], key=lambda f: order(f.LM))
ih = I[h]
F.remove(ih)
G, CP = update(G, CP, ih)
# count the number of critical pairs which reduce to zero
reductions_to_zero = 0
while CP:
ig1, ig2 = select(CP)
CP.remove((ig1, ig2))
h = spoly(f[ig1], f[ig2], ring)
# ordering divisors is on average more efficient [Cox] page 111
G1 = sorted(G, key=lambda g: order(f[g].LM))
ht = normal(h, G1)
if ht:
G, CP = update(G, CP, ht[1])
else:
reductions_to_zero += 1
######################################
# now G is a Groebner basis; reduce it
Gr = set()
for ig in G:
ht = normal(f[ig], G - set([ig]))
if ht:
Gr.add(ht[1])
Gr = [f[ig] for ig in Gr]
# order according to the monomial ordering
Gr = sorted(Gr, key=lambda f: order(f.LM), reverse=True)
return Gr
def spoly(p1, p2, ring):
"""
Compute LCM(LM(p1), LM(p2))/LM(p1)*p1 - LCM(LM(p1), LM(p2))/LM(p2)*p2
This is the S-poly provided p1 and p2 are monic
"""
LM1 = p1.LM
LM2 = p2.LM
LCM12 = ring.monomial_lcm(LM1, LM2)
m1 = ring.monomial_div(LCM12, LM1)
m2 = ring.monomial_div(LCM12, LM2)
s1 = p1.mul_monom(m1)
s2 = p2.mul_monom(m2)
s = s1 - s2
return s
# F5B
# convenience functions
def Sign(f):
return f[0]
def Polyn(f):
return f[1]
def Num(f):
return f[2]
def sig(monomial, index):
return (monomial, index)
def lbp(signature, polynomial, number):
return (signature, polynomial, number)
# signature functions
def sig_cmp(u, v, order):
"""
Compare two signatures by extending the term order to K[X]^n.
u < v iff
- the index of v is greater than the index of u
or
- the index of v is equal to the index of u and u[0] < v[0] w.r.t. order
u > v otherwise
"""
if u[1] > v[1]:
return -1
if u[1] == v[1]:
#if u[0] == v[0]:
# return 0
if order(u[0]) < order(v[0]):
return -1
return 1
def sig_key(s, order):
"""
Key for comparing two signatures.
s = (m, k), t = (n, l)
s < t iff [k > l] or [k == l and m < n]
s > t otherwise
"""
return (-s[1], order(s[0]))
def sig_mult(s, m):
"""
Multiply a signature by a monomial.
The product of a signature (m, i) and a monomial n is defined as
(m * t, i).
"""
return sig(monomial_mul(s[0], m), s[1])
# labeled polynomial functions
def lbp_sub(f, g):
"""
Subtract labeled polynomial g from f.
The signature and number of the difference of f and g are signature
and number of the maximum of f and g, w.r.t. lbp_cmp.
"""
if sig_cmp(Sign(f), Sign(g), Polyn(f).ring.order) < 0:
max_poly = g
else:
max_poly = f
ret = Polyn(f) - Polyn(g)
return lbp(Sign(max_poly), ret, Num(max_poly))
def lbp_mul_term(f, cx):
"""
Multiply a labeled polynomial with a term.
The product of a labeled polynomial (s, p, k) by a monomial is
defined as (m * s, m * p, k).
"""
return lbp(sig_mult(Sign(f), cx[0]), Polyn(f).mul_term(cx), Num(f))
def lbp_cmp(f, g):
"""
Compare two labeled polynomials.
f < g iff
- Sign(f) < Sign(g)
or
- Sign(f) == Sign(g) and Num(f) > Num(g)
f > g otherwise
"""
if sig_cmp(Sign(f), Sign(g), Polyn(f).ring.order) == -1:
return -1
if Sign(f) == Sign(g):
if Num(f) > Num(g):
return -1
#if Num(f) == Num(g):
# return 0
return 1
def lbp_key(f):
"""
Key for comparing two labeled polynomials.
"""
return (sig_key(Sign(f), Polyn(f).ring.order), -Num(f))
# algorithm and helper functions
def critical_pair(f, g, ring):
"""
Compute the critical pair corresponding to two labeled polynomials.
A critical pair is a tuple (um, f, vm, g), where um and vm are
terms such that um * f - vm * g is the S-polynomial of f and g (so,
wlog assume um * f > vm * g).
For performance sake, a critical pair is represented as a tuple
(Sign(um * f), um, f, Sign(vm * g), vm, g), since um * f creates
a new, relatively expensive object in memory, whereas Sign(um *
f) and um are lightweight and f (in the tuple) is a reference to
an already existing object in memory.
"""
domain = ring.domain
ltf = Polyn(f).LT
ltg = Polyn(g).LT
lt = (monomial_lcm(ltf[0], ltg[0]), domain.one)
um = term_div(lt, ltf, domain)
vm = term_div(lt, ltg, domain)
# The full information is not needed (now), so only the product
# with the leading term is considered:
fr = lbp_mul_term(lbp(Sign(f), Polyn(f).leading_term(), Num(f)), um)
gr = lbp_mul_term(lbp(Sign(g), Polyn(g).leading_term(), Num(g)), vm)
# return in proper order, such that the S-polynomial is just
# u_first * f_first - u_second * f_second:
if lbp_cmp(fr, gr) == -1:
return (Sign(gr), vm, g, Sign(fr), um, f)
else:
return (Sign(fr), um, f, Sign(gr), vm, g)
def cp_cmp(c, d):
"""
Compare two critical pairs c and d.
c < d iff
- lbp(c[0], _, Num(c[2]) < lbp(d[0], _, Num(d[2])) (this
corresponds to um_c * f_c and um_d * f_d)
or
- lbp(c[0], _, Num(c[2]) >< lbp(d[0], _, Num(d[2])) and
lbp(c[3], _, Num(c[5])) < lbp(d[3], _, Num(d[5])) (this
corresponds to vm_c * g_c and vm_d * g_d)
c > d otherwise
"""
zero = Polyn(c[2]).ring.zero
c0 = lbp(c[0], zero, Num(c[2]))
d0 = lbp(d[0], zero, Num(d[2]))
r = lbp_cmp(c0, d0)
if r == -1:
return -1
if r == 0:
c1 = lbp(c[3], zero, Num(c[5]))
d1 = lbp(d[3], zero, Num(d[5]))
r = lbp_cmp(c1, d1)
if r == -1:
return -1
#if r == 0:
# return 0
return 1
def cp_key(c, ring):
"""
Key for comparing critical pairs.
"""
return (lbp_key(lbp(c[0], ring.zero, Num(c[2]))), lbp_key(lbp(c[3], ring.zero, Num(c[5]))))
def s_poly(cp):
"""
Compute the S-polynomial of a critical pair.
The S-polynomial of a critical pair cp is cp[1] * cp[2] - cp[4] * cp[5].
"""
return lbp_sub(lbp_mul_term(cp[2], cp[1]), lbp_mul_term(cp[5], cp[4]))
def is_rewritable_or_comparable(sign, num, B):
"""
Check if a labeled polynomial is redundant by checking if its
signature and number imply rewritability or comparability.
(sign, num) is comparable if there exists a labeled polynomial
h in B, such that sign[1] (the index) is less than Sign(h)[1]
and sign[0] is divisible by the leading monomial of h.
(sign, num) is rewritable if there exists a labeled polynomial
h in B, such thatsign[1] is equal to Sign(h)[1], num < Num(h)
and sign[0] is divisible by Sign(h)[0].
"""
for h in B:
# comparable
if sign[1] < Sign(h)[1]:
if monomial_divides(Polyn(h).LM, sign[0]):
return True
# rewritable
if sign[1] == Sign(h)[1]:
if num < Num(h):
if monomial_divides(Sign(h)[0], sign[0]):
return True
return False
def f5_reduce(f, B):
"""
F5-reduce a labeled polynomial f by B.
Continously searches for non-zero labeled polynomial h in B, such
that the leading term lt_h of h divides the leading term lt_f of
f and Sign(lt_h * h) < Sign(f). If such a labeled polynomial h is
found, f gets replaced by f - lt_f / lt_h * h. If no such h can be
found or f is 0, f is no further F5-reducible and f gets returned.
A polynomial that is reducible in the usual sense need not be
F5-reducible, e.g.:
>>> from sympy.polys.groebnertools import lbp, sig, f5_reduce, Polyn
>>> from sympy.polys import ring, QQ, lex
>>> R, x,y,z = ring("x,y,z", QQ, lex)
>>> f = lbp(sig((1, 1, 1), 4), x, 3)
>>> g = lbp(sig((0, 0, 0), 2), x, 2)
>>> Polyn(f).rem([Polyn(g)])
0
>>> f5_reduce(f, [g])
(((1, 1, 1), 4), x, 3)
"""
order = Polyn(f).ring.order
domain = Polyn(f).ring.domain
if not Polyn(f):
return f
while True:
g = f
for h in B:
if Polyn(h):
if monomial_divides(Polyn(h).LM, Polyn(f).LM):
t = term_div(Polyn(f).LT, Polyn(h).LT, domain)
if sig_cmp(sig_mult(Sign(h), t[0]), Sign(f), order) < 0:
# The following check need not be done and is in general slower than without.
#if not is_rewritable_or_comparable(Sign(gp), Num(gp), B):
hp = lbp_mul_term(h, t)
f = lbp_sub(f, hp)
break
if g == f or not Polyn(f):
return f
def _f5b(F, ring):
"""
Computes a reduced Groebner basis for the ideal generated by F.
f5b is an implementation of the F5B algorithm by Yao Sun and
Dingkang Wang. Similarly to Buchberger's algorithm, the algorithm
proceeds by computing critical pairs, computing the S-polynomial,
reducing it and adjoining the reduced S-polynomial if it is not 0.
Unlike Buchberger's algorithm, each polynomial contains additional
information, namely a signature and a number. The signature
specifies the path of computation (i.e. from which polynomial in
the original basis was it derived and how), the number says when
the polynomial was added to the basis. With this information it
is (often) possible to decide if an S-polynomial will reduce to
0 and can be discarded.
Optimizations include: Reducing the generators before computing
a Groebner basis, removing redundant critical pairs when a new
polynomial enters the basis and sorting the critical pairs and
the current basis.
Once a Groebner basis has been found, it gets reduced.
** References **
Yao Sun, Dingkang Wang: "A New Proof for the Correctness of F5
(F5-Like) Algorithm", http://arxiv.org/abs/1004.0084 (specifically
v4)
Thomas Becker, Volker Weispfenning, Groebner bases: A computational
approach to commutative algebra, 1993, p. 203, 216
"""
order = ring.order
domain = ring.domain
# reduce polynomials (like in Mario Pernici's implementation) (Becker, Weispfenning, p. 203)
B = F
while True:
F = B
B = []
for i in xrange(len(F)):
p = F[i]
r = p.rem(F[:i])
if r:
B.append(r)
if F == B:
break
# basis
B = [lbp(sig(ring.zero_monom, i + 1), F[i], i + 1) for i in xrange(len(F))]
B.sort(key=lambda f: order(Polyn(f).LM), reverse=True)
# critical pairs
CP = [critical_pair(B[i], B[j], ring) for i in xrange(len(B)) for j in xrange(i + 1, len(B))]
CP.sort(key=lambda cp: cp_key(cp, ring), reverse=True)
k = len(B)
reductions_to_zero = 0
while len(CP):
cp = CP.pop()
# discard redundant critical pairs:
if is_rewritable_or_comparable(cp[0], Num(cp[2]), B):
continue
if is_rewritable_or_comparable(cp[3], Num(cp[5]), B):
continue
s = s_poly(cp)
p = f5_reduce(s, B)
p = lbp(Sign(p), Polyn(p).monic(), k + 1)
if Polyn(p):
# remove old critical pairs, that become redundant when adding p:
indices = []
for i, cp in enumerate(CP):
if is_rewritable_or_comparable(cp[0], Num(cp[2]), [p]):
indices.append(i)
elif is_rewritable_or_comparable(cp[3], Num(cp[5]), [p]):
indices.append(i)
for i in reversed(indices):
del CP[i]
# only add new critical pairs that are not made redundant by p:
for g in B:
if Polyn(g):
cp = critical_pair(p, g, ring)
if is_rewritable_or_comparable(cp[0], Num(cp[2]), [p]):
continue
elif is_rewritable_or_comparable(cp[3], Num(cp[5]), [p]):
continue
CP.append(cp)
# sort (other sorting methods/selection strategies were not as successful)
CP.sort(key=lambda cp: cp_key(cp, ring), reverse=True)
# insert p into B:
m = Polyn(p).LM
if order(m) <= order(Polyn(B[-1]).LM):
B.append(p)
else:
for i, q in enumerate(B):
if order(m) > order(Polyn(q).LM):
B.insert(i, p)
break
k += 1
#print(len(B), len(CP), "%d critical pairs removed" % len(indices))
else:
reductions_to_zero += 1
# reduce Groebner basis:
H = [Polyn(g).monic() for g in B]
H = red_groebner(H, ring)
return sorted(H, key=lambda f: order(f.LM), reverse=True)
def red_groebner(G, ring):
"""
Compute reduced Groebner basis, from BeckerWeispfenning93, p. 216
Selects a subset of generators, that already generate the ideal
and computes a reduced Groebner basis for them.
"""
def reduction(P):
"""
The actual reduction algorithm.
"""
Q = []
for i, p in enumerate(P):
h = p.rem(P[:i] + P[i + 1:])
if h:
Q.append(h)
return [p.monic() for p in Q]
F = G
H = []
while F:
f0 = F.pop()
if not any(monomial_divides(f.LM, f0.LM) for f in F + H):
H.append(f0)
# Becker, Weispfenning, p. 217: H is Groebner basis of the ideal generated by G.
return reduction(H)
def is_groebner(G, ring):
"""
Check if G is a Groebner basis.
"""
for i in xrange(len(G)):
for j in xrange(i + 1, len(G)):
s = spoly(G[i], G[j])
s = s.rem(G)
if s:
return False
return True
def is_minimal(G, ring):
"""
Checks if G is a minimal Groebner basis.
"""
order = ring.order
domain = ring.domain
G.sort(key=lambda g: order(g.LM))
for i, g in enumerate(G):
if g.LC != domain.one:
return False
for h in G[:i] + G[i + 1:]:
if monomial_divides(h.LM, g.LM):
return False
return True
def is_reduced(G, ring):
"""
Checks if G is a reduced Groebner basis.
"""
order = ring.order
domain = ring.domain
G.sort(key=lambda g: order(g.LM))
for i, g in enumerate(G):
if g.LC != domain.one:
return False
for term in g:
for h in G[:i] + G[i + 1:]:
if monomial_divides(h.LM, term[0]):
return False
return True
def groebner_lcm(f, g):
"""
Computes LCM of two polynomials using Groebner bases.
The LCM is computed as the unique generater of the intersection
of the two ideals generated by `f` and `g`. The approach is to
compute a Groebner basis with respect to lexicographic ordering
of `t*f` and `(1 - t)*g`, where `t` is an unrelated variable and
then filtering out the solution that doesn't contain `t`.
References
==========
1. [Cox97]_
"""
if f.ring != g.ring:
raise ValueError("Values should be equal")
ring = f.ring
domain = ring.domain
if not f or not g:
return ring.zero
if len(f) <= 1 and len(g) <= 1:
monom = monomial_lcm(f.LM, g.LM)
coeff = domain.lcm(f.LC, g.LC)
return ring.term_new(monom, coeff)
fc, f = f.primitive()
gc, g = g.primitive()
lcm = domain.lcm(fc, gc)
f_terms = [ ((1,) + monom, coeff) for monom, coeff in f.terms() ]
g_terms = [ ((0,) + monom, coeff) for monom, coeff in g.terms() ] \
+ [ ((1,) + monom,-coeff) for monom, coeff in g.terms() ]
t = Dummy("t")
t_ring = ring.clone(symbols=(t,) + ring.symbols, order=lex)
F = t_ring.from_terms(f_terms)
G = t_ring.from_terms(g_terms)
basis = groebner([F, G], t_ring)
def is_independent(h, j):
return all(not monom[j] for monom in h.monoms())
H = [ h for h in basis if is_independent(h, 0) ]
h_terms = [ (monom[1:], coeff*lcm) for monom, coeff in H[0].terms() ]
h = ring.from_terms(h_terms)
return h
def groebner_gcd(f, g):
"""Computes GCD of two polynomials using Groebner bases. """
if f.ring != g.ring:
raise ValueError("Values should be equal")
domain = f.ring.domain
if not domain.has_Field:
fc, f = f.primitive()
gc, g = g.primitive()
gcd = domain.gcd(fc, gc)
H = (f*g).quo([groebner_lcm(f, g)])
if len(H) != 1:
raise ValueError("Length should be 1")
h = H[0]
if not domain.has_Field:
return gcd*h
else:
return h.monic()
| {
"content_hash": "82dde99e2afc815c675509ece49f019f",
"timestamp": "",
"source": "github",
"line_count": 861,
"max_line_length": 116,
"avg_line_length": 27.18466898954704,
"alnum_prop": 0.5449457404084422,
"repo_name": "AunShiLord/sympy",
"id": "0f8af05bde60f6fc85cfbcd8530aaea487526afe",
"size": "23406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/polys/groebnertools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13716936"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
from amaascore.assets.utils import json_to_asset
from amaascore.config import ENVIRONMENT
from amaascore.core.interface import Interface
from amaascore.core.amaas_model import json_handler
class AssetsInterface(Interface):
def __init__(self, environment=ENVIRONMENT, endpoint=None, logger=None):
self.logger = logger or logging.getLogger(__name__)
super(AssetsInterface, self).__init__(endpoint=endpoint, endpoint_type='assets', environment=environment)
def new(self, asset):
self.logger.info('New Asset - Asset Manager: %s - Asset ID: %s', asset.asset_manager_id, asset.asset_id)
url = '%s/assets/%s' % (self.endpoint, asset.asset_manager_id)
response = self.session.post(url, json=asset.to_interface())
if response.ok:
self.logger.info('Successfully Created Asset - Asset Manager: %s - Asset ID: %s', asset.asset_manager_id,
asset.asset_id)
asset = json_to_asset(response.json())
return asset
else:
self.logger.error(response.text)
response.raise_for_status()
def amend(self, asset):
self.logger.info('Amend Asset - Asset Manager: %s - Asset ID: %s', asset.asset_manager_id, asset.asset_id)
url = '%s/assets/%s/%s' % (self.endpoint, asset.asset_manager_id, asset.asset_id)
response = self.session.put(url, json=asset.to_interface())
if response.ok:
self.logger.info('Successfully Amended Asset - Asset Manager: %s - Asset ID: %s', asset.asset_manager_id,
asset.asset_id)
asset = json_to_asset(response.json())
return asset
else:
self.logger.error(response.text)
response.raise_for_status()
def partial(self, asset_manager_id, asset_id, updates):
self.logger.info('Partial Amend Asset - Asset Manager: %s - Asset ID: %s', asset_manager_id,
asset_id)
url = '%s/assets/%s/%s' % (self.endpoint, asset_manager_id, asset_id)
# Setting handler ourselves so we can be sure Decimals work
response = self.session.patch(url, data=json.dumps(updates, default=json_handler), headers=self.json_header)
if response.ok:
asset = json_to_asset(response.json())
return asset
else:
self.logger.error(response.text)
response.raise_for_status()
def retrieve(self, asset_manager_id, asset_id, version=None):
self.logger.info('Retrieve Asset - Asset Manager: %s - Asset ID: %s', asset_manager_id, asset_id)
url = '%s/assets/%s/%s' % (self.endpoint, asset_manager_id, asset_id)
if version:
url += '?version=%d' % int(version)
response = self.session.get(url)
if response.ok:
self.logger.info('Successfully Retrieved Asset - Asset Manager: %s - Asset ID: %s', asset_manager_id,
asset_id)
return json_to_asset(response.json())
else:
self.logger.error(response.text)
response.raise_for_status()
def deactivate(self, asset_manager_id, asset_id):
self.logger.info('Deactivate Asset - Asset Manager: %s - Asset ID: %s', asset_manager_id, asset_id)
url = '%s/assets/%s/%s' % (self.endpoint, asset_manager_id, asset_id)
json = {'asset_status': 'Inactive'}
response = self.session.patch(url, json=json)
if response.ok:
self.logger.info('Successfully Deactivated Asset - Asset Manager: %s - Asset ID: %s', asset_manager_id,
asset_id)
return json_to_asset(response.json())
else:
self.logger.error(response.text)
response.raise_for_status()
def search(self, asset_manager_ids=None, asset_ids=None):
self.logger.info('Search for Assets - Asset Manager(s): %s', asset_manager_ids)
search_params = {}
# Potentially roll this into a loop through args rather than explicitly named - depends on additional validation
if asset_manager_ids:
search_params['asset_manager_ids'] = ','.join([str(amid) for amid in asset_manager_ids])
if asset_ids:
search_params['asset_ids'] = ','.join(asset_ids)
url = self.endpoint + '/assets'
response = self.session.get(url, params=search_params)
if response.ok:
assets = [json_to_asset(json_asset) for json_asset in response.json()]
self.logger.info('Returned %s Assets.', len(assets))
return assets
else:
self.logger.error(response.text)
response.raise_for_status()
def assets_by_asset_manager(self, asset_manager_id):
self.logger.info('Retrieve Assets By Asset Manager: %s', asset_manager_id)
url = '%s/assets/%s' % (self.endpoint, asset_manager_id)
response = self.session.get(url)
if response.ok:
assets = [json_to_asset(json_asset) for json_asset in response.json()]
self.logger.info('Returned %s Assets.', len(assets))
return assets
else:
self.logger.error(response.text)
response.raise_for_status()
def clear(self, asset_manager_id):
""" This method deletes all the data for an asset_manager_id.
It should be used with extreme caution. In production it
is almost always better to Inactivate rather than delete. """
self.logger.info('Clear Assets - Asset Manager: %s', asset_manager_id)
url = '%s/clear/%s' % (self.endpoint, asset_manager_id)
response = self.session.delete(url)
if response.ok:
count = response.json().get('count', 'Unknown')
self.logger.info('Deleted %s Assets.', count)
return count
else:
self.logger.error(response.text)
response.raise_for_status()
| {
"content_hash": "4309a05bf25e3e724e6be90982581f7a",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 120,
"avg_line_length": 47.803149606299215,
"alnum_prop": 0.6101136550815351,
"repo_name": "paul-rs/amaas-core-sdk-python",
"id": "7a328ef2b3ebb3e2c9ad5b4663a4966089721ff8",
"size": "6071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amaascore/assets/interface.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "618"
},
{
"name": "Python",
"bytes": "418608"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
} |
from typing import Union, Optional
CompressType = Optional[Union[str,bool]]
ParallelType = Union[int,bool]
CacheType = Union[bool,str]
SecretsType = Optional[Union[str,dict]] | {
"content_hash": "a9eac553f7ca6aa171c722be2fad4946",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 40,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.7828571428571428,
"repo_name": "seung-lab/cloud-volume",
"id": "f4e387799327159b96d01b3810e1fc3e3930ce24",
"size": "175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudvolume/types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "17074"
},
{
"name": "JavaScript",
"bytes": "34643"
},
{
"name": "Python",
"bytes": "707034"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.conf.urls import url
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render, redirect
from django.conf import settings
import os
from celery import group
from app_reservas.tasks import obtener_eventos_recurso_especifico
from ..models import RecursoAli
@admin.register(RecursoAli)
class RecursoAliAdmin(admin.ModelAdmin):
"""
Especificación de la representación de RecursoAli en la interfaz de administración.
"""
list_display = (
'_identificador',
'_tipo',
'calendar_codigo',
'calendar_color',
)
list_filter = (
'tipo',
)
def _identificador(self, obj):
"""
Obtiene el identificador de la instancia.
"""
return obj.get_nombre_corto()
_identificador.short_description = 'Identificador'
_identificador.admin_order_field = 'identificador'
def _tipo(self, obj):
"""
Obtiene el tipo asociado a la instancia.
"""
return obj.tipo
_tipo.short_description = 'Tipo'
_tipo.admin_order_field = 'tipo__nombre'
def get_urls(self):
urls = super(RecursoAliAdmin, self).get_urls()
add_urls = [
url(
r'^actualizar/$',
self.admin_site.admin_view(self.actulizar),
name='app_reservas_recursoali_actulizar',
)
]
return add_urls + urls
def actulizar(self, request):
context = {
'site_title': 'Administración de Django',
'site_header': 'Administración de Django',
'title': 'Actualizar Calendarios',
'app_label': self.model._meta.app_label,
'opts': self.model._meta,
'has_change_permission': self.has_change_permission(request)
}
if request.method == "POST":
self.actualizar_recursos()
return redirect(reverse_lazy("admin:%s_%s_changelist" %(self.model._meta.app_label, self.model._meta.model_name)))
return render(request, 'admin/app_reservas/confirm.html', context)
def actualizar_recursos(self):
ruta_archivos = settings.EVENTOS_URL
# Crea el directorio, en caso de que no exista.
os.makedirs(ruta_archivos, exist_ok=True)
# Importación de Recurso, para evitar dependencia circular.
from app_reservas.models import RecursoAli
# Obtiene todos los recursos existentes.
recursos = RecursoAli.objects.all()
subtareas = group(
obtener_eventos_recurso_especifico.s(recurso, ruta_archivos)
for recurso in recursos
)
subtareas()
| {
"content_hash": "64fa9fbc5d76a318bbd232d0eaa5015d",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 126,
"avg_line_length": 29.944444444444443,
"alnum_prop": 0.6181818181818182,
"repo_name": "desarrollogt-frm-utn/reservas",
"id": "78613fe0360f7dd16e59f202646dcf7083deeef1",
"size": "2717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app_reservas/admin/recursoAli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5688"
},
{
"name": "HTML",
"bytes": "97664"
},
{
"name": "Python",
"bytes": "151479"
},
{
"name": "Shell",
"bytes": "865"
}
],
"symlink_target": ""
} |
from ansible import errors
from ansible import utils
import os
import ansible.utils.template as template
class Task(object):
__slots__ = [
'name', 'meta', 'action', 'only_if', 'when', 'async_seconds', 'async_poll_interval',
'notify', 'module_name', 'module_args', 'module_vars', 'default_vars',
'play', 'notified_by', 'tags', 'register',
'delegate_to', 'first_available_file', 'ignore_errors',
'local_action', 'transport', 'sudo', 'sudo_user', 'sudo_pass',
'items_lookup_plugin', 'items_lookup_terms', 'environment', 'args',
'any_errors_fatal', 'changed_when', 'always_run'
]
# to prevent typos and such
VALID_KEYS = [
'name', 'meta', 'action', 'only_if', 'async', 'poll', 'notify',
'first_available_file', 'include', 'tags', 'register', 'ignore_errors',
'delegate_to', 'local_action', 'transport', 'sudo', 'sudo_user',
'sudo_pass', 'when', 'connection', 'environment', 'args',
'any_errors_fatal', 'changed_when', 'always_run'
]
def __init__(self, play, ds, module_vars=None, default_vars=None, additional_conditions=None):
''' constructor loads from a task or handler datastructure '''
# meta directives are used to tell things like ansible/playbook to run
# operations like handler execution. Meta tasks are not executed
# normally.
if 'meta' in ds:
self.meta = ds['meta']
self.tags = []
return
else:
self.meta = None
library = os.path.join(play.basedir, 'library')
if os.path.exists(library):
utils.plugins.module_finder.add_directory(library)
for x in ds.keys():
# code to allow for saying "modulename: args" versus "action: modulename args"
if x in utils.plugins.module_finder:
if 'action' in ds:
raise errors.AnsibleError("multiple actions specified in task %s" % (ds.get('name', ds['action'])))
if isinstance(ds[x], dict):
if 'args' in ds:
raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x]))))
ds['args'] = ds[x]
ds[x] = ''
elif ds[x] is None:
ds[x] = ''
if not isinstance(ds[x], basestring):
raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x])))
ds['action'] = x + " " + ds[x]
ds.pop(x)
# code to allow "with_glob" and to reference a lookup plugin named glob
elif x.startswith("with_"):
plugin_name = x.replace("with_","")
if plugin_name in utils.plugins.lookup_loader:
ds['items_lookup_plugin'] = plugin_name
ds['items_lookup_terms'] = ds[x]
ds.pop(x)
else:
raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
elif x in [ 'changed_when', 'when']:
ds[x] = "jinja2_compare %s" % (ds[x])
elif x.startswith("when_"):
if 'when' in ds:
raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action'])))
when_name = x.replace("when_","")
ds['when'] = "%s %s" % (when_name, ds[x])
ds.pop(x)
elif not x in Task.VALID_KEYS:
raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x)
self.module_vars = module_vars
self.default_vars = default_vars
self.play = play
# load various attributes
self.name = ds.get('name', None)
self.tags = [ 'all' ]
self.register = ds.get('register', None)
self.sudo = utils.boolean(ds.get('sudo', play.sudo))
self.environment = ds.get('environment', {})
# rather than simple key=value args on the options line, these represent structured data and the values
# can be hashes and lists, not just scalars
self.args = ds.get('args', {})
if self.sudo:
self.sudo_user = ds.get('sudo_user', play.sudo_user)
self.sudo_pass = ds.get('sudo_pass', play.playbook.sudo_pass)
else:
self.sudo_user = None
self.sudo_pass = None
# Both are defined
if ('action' in ds) and ('local_action' in ds):
raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together")
# Both are NOT defined
elif (not 'action' in ds) and (not 'local_action' in ds):
raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', '<Unnamed>'))
# Only one of them is defined
elif 'local_action' in ds:
self.action = ds.get('local_action', '')
self.delegate_to = '127.0.0.1'
else:
self.action = ds.get('action', '')
self.delegate_to = ds.get('delegate_to', None)
self.transport = ds.get('connection', ds.get('transport', play.transport))
if isinstance(self.action, dict):
if 'module' not in self.action:
raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action))
if self.args:
raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action))
self.args = self.action
self.action = self.args.pop('module')
# delegate_to can use variables
if not (self.delegate_to is None):
# delegate_to: localhost should use local transport
if self.delegate_to in ['127.0.0.1', 'localhost']:
self.transport = 'local'
# notified by is used by Playbook code to flag which hosts
# need to run a notifier
self.notified_by = []
# if no name is specified, use the action line as the name
if self.name is None:
self.name = self.action
# load various attributes
self.only_if = ds.get('only_if', 'True')
self.when = ds.get('when', None)
self.changed_when = ds.get('changed_when', None)
if self.changed_when is not None:
self.changed_when = utils.compile_when_to_only_if(self.changed_when)
self.async_seconds = int(ds.get('async', 0)) # not async by default
self.async_poll_interval = int(ds.get('poll', 10)) # default poll = 10 seconds
self.notify = ds.get('notify', [])
self.first_available_file = ds.get('first_available_file', None)
self.items_lookup_plugin = ds.get('items_lookup_plugin', None)
self.items_lookup_terms = ds.get('items_lookup_terms', None)
self.ignore_errors = ds.get('ignore_errors', False)
self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal)
self.always_run = ds.get('always_run', False)
# action should be a string
if not isinstance(self.action, basestring):
raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name))
# notify can be a string or a list, store as a list
if isinstance(self.notify, basestring):
self.notify = [ self.notify ]
# split the action line into a module name + arguments
tokens = self.action.split(None, 1)
if len(tokens) < 1:
raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name)
self.module_name = tokens[0]
self.module_args = ''
if len(tokens) > 1:
self.module_args = tokens[1]
import_tags = self.module_vars.get('tags',[])
if type(import_tags) in [str,unicode]:
# allow the user to list comma delimited tags
import_tags = import_tags.split(",")
# handle mutually incompatible options
incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ]
if len(incompatibles) > 1:
raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task")
# make first_available_file accessable to Runner code
if self.first_available_file:
self.module_vars['first_available_file'] = self.first_available_file
if self.items_lookup_plugin is not None:
self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin
self.module_vars['items_lookup_terms'] = self.items_lookup_terms
# allow runner to see delegate_to option
self.module_vars['delegate_to'] = self.delegate_to
# make some task attributes accessible to Runner code
self.module_vars['ignore_errors'] = self.ignore_errors
self.module_vars['register'] = self.register
self.module_vars['changed_when'] = self.changed_when
self.module_vars['always_run'] = self.always_run
# tags allow certain parts of a playbook to be run without running the whole playbook
apply_tags = ds.get('tags', None)
if apply_tags is not None:
if type(apply_tags) in [ str, unicode ]:
self.tags.append(apply_tags)
elif type(apply_tags) == list:
self.tags.extend(apply_tags)
self.tags.extend(import_tags)
if self.when is not None:
if self.only_if != 'True':
raise errors.AnsibleError('when obsoletes only_if, only use one or the other')
self.only_if = utils.compile_when_to_only_if(self.when)
if additional_conditions:
self.only_if = [ self.only_if ]
self.only_if.extend(additional_conditions)
| {
"content_hash": "35bd149f3e696976ab664246839d47d5",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 152,
"avg_line_length": 45.07017543859649,
"alnum_prop": 0.5682172051381861,
"repo_name": "dlab-berkeley/collaboratool-archive",
"id": "ba7599772e3563d6009ec6b11f68d810f2b63b15",
"size": "10990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bsd2/vagrant-ansible/ansible/lib/ansible/playbook/task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18154"
},
{
"name": "JavaScript",
"bytes": "30509"
},
{
"name": "Perl",
"bytes": "23315"
},
{
"name": "Puppet",
"bytes": "2252"
},
{
"name": "Python",
"bytes": "684123"
},
{
"name": "Ruby",
"bytes": "11103"
},
{
"name": "Shell",
"bytes": "6980"
}
],
"symlink_target": ""
} |
from bson.objectid import ObjectId
from jsonpath_rw import parse
from girder.models.model_base import ValidationException
from girder.constants import AccessType
from girder.api.rest import getCurrentUser
from girder.utility.model_importer import ModelImporter
from .base import BaseModel
from ..utility.volume_adapters import get_volume_adapter
from cumulus.constants import VolumeType
from cumulus.constants import VolumeState
from cumulus.common.girder import send_log_notification, \
send_status_notification
class Volume(BaseModel):
def __init__(self):
super(Volume, self).__init__()
def initialize(self):
self.name = 'volumes'
self.exposeFields(level=AccessType.READ,
fields=('_id', 'config', 'ec2', 'fs', 'name', 'size',
'type', 'zone', 'profileId', 'clusterId',
'status', 'path'))
def validate(self, volume):
if not volume['name']:
raise ValidationException('Name must not be empty.', 'name')
if not volume['type']:
raise ValidationException('Type must not be empty.', 'type')
profile_id = parse('profileId').find(volume)
if profile_id:
profile_id = profile_id[0].value
profile = ModelImporter.model('aws', 'cumulus').load(profile_id,
user=getCurrentUser())
if not profile:
raise ValidationException('Invalid profile id')
volume['profileId'] = profile['_id']
volume_adapter = get_volume_adapter(volume)
volume = volume_adapter.validate()
return volume
def filter(self, volume, user):
volume = super(Volume, self).filter(doc=volume, user=user)
# Convert status (IntEnum) to string
volume['status'] = str(volume['status'])
return volume
def create_ebs(self, user, profileId, name, zone, size, fs):
volume = {
'name': name,
'zone': zone,
'size': size,
'type': VolumeType.EBS,
'ec2': {
'id': None
},
'profileId': profileId,
'status': VolumeState.CREATED,
'log': []
}
if fs:
volume['fs'] = fs
# Add userId field to make search for a user volumes easier
volume['userId'] = user['_id']
self.setUserAccess(volume, user=user, level=AccessType.ADMIN)
group = {
'_id': ObjectId(self.get_group_id())
}
self.setGroupAccess(volume, group, level=AccessType.ADMIN)
self.save(volume)
send_status_notification('volume', volume)
return volume
def append_to_log(self, user, id, record):
volume = self.load(id, user=user, level=AccessType.WRITE)
self.update({'_id': ObjectId(id)}, {'$push': {'log': record}})
send_log_notification('volume', volume, record)
def update_volume(self, user, volume):
volume_id = volume['_id']
current_volume = self.load(volume_id, user=user,
level=AccessType.WRITE)
previous_status = current_volume['status']
current_volume.update(volume)
if current_volume['status'] != previous_status:
send_status_notification('volume', current_volume)
return self.save(current_volume)
def log_records(self, user, id, offset=0):
volume = self.load(id, user=user, level=AccessType.READ)
return volume['log'][offset:]
| {
"content_hash": "b5f4d7d6982346489aaf561dbef9c65d",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 79,
"avg_line_length": 32.24107142857143,
"alnum_prop": 0.5801716975906951,
"repo_name": "Kitware/cumulus",
"id": "78f0b96777dd29192d0434bdf7530aea0c3d6fcb",
"size": "4405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "girder/cumulus/cumulus_plugin/models/volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "5997"
},
{
"name": "Python",
"bytes": "852977"
},
{
"name": "Shell",
"bytes": "13175"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 2f941b1e9680
Revises: 1a850f7e0aa6
Create Date: 2015-04-04 03:05:00.315217
"""
# revision identifiers, used by Alembic.
revision = '2f941b1e9680'
down_revision = '1a850f7e0aa6'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('document_placeholder',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('document_id', sa.Integer(), nullable=False),
sa.Column('placeholder_id', sa.Integer(), nullable=False),
sa.Column('value', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['document_id'], ['document_base.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['placeholder_id'], ['template_placeholders.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('document_placeholder')
### end Alembic commands ###
| {
"content_hash": "2571c5baf8058d095d6b8423b8cf9c03",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 98,
"avg_line_length": 30.147058823529413,
"alnum_prop": 0.6878048780487804,
"repo_name": "codeforamerica/template-maker",
"id": "b39ff328195dd1a9975adeb39b7a6f2ab65090dc",
"size": "1025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/2f941b1e9680_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3199"
},
{
"name": "HTML",
"bytes": "28021"
},
{
"name": "JavaScript",
"bytes": "9441"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "96038"
},
{
"name": "Shell",
"bytes": "2628"
}
],
"symlink_target": ""
} |
import json
from persistent_message.models import Message
from myuw.test.api import MyuwApiTest, require_url
@require_url('myuw_banner_message')
class PersistentMessageAPITest(MyuwApiTest):
fixtures = ['persistent_messages.json']
def get_response(self):
return self.get_response_by_reverse('myuw_banner_message')
def test_javerage(self):
self.set_user('javerage')
response = self.get_response()
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data), 2)
Message.objects.all().delete()
response = self.get_response()
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data), 0)
| {
"content_hash": "927db65d39dbbd29dcac0c43323705c7",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 66,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.684010152284264,
"repo_name": "uw-it-aca/myuw",
"id": "ac1e56fc5bf06fe29eea0653121e1754f82806f0",
"size": "876",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "myuw/test/api/test_persistent_messages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1353"
},
{
"name": "Dockerfile",
"bytes": "1182"
},
{
"name": "HTML",
"bytes": "87842"
},
{
"name": "JavaScript",
"bytes": "362025"
},
{
"name": "Python",
"bytes": "1057335"
},
{
"name": "SCSS",
"bytes": "5763"
},
{
"name": "Shell",
"bytes": "838"
},
{
"name": "Vue",
"bytes": "522119"
}
],
"symlink_target": ""
} |
from flask import request, abort
def require_apikey(key):
"""
Decorator for view functions and API requests. Requires
that the user pass in the API key for the application.
"""
def _wrapped_func(view_func):
def _decorated_func(*args, **kwargs):
passed_key = request.args.get('key', None)
if passed_key == key:
return view_func(*args, **kwargs)
else:
abort(401)
return _decorated_func
return _wrapped_func
| {
"content_hash": "5087f5ae038ae088a0401d1eca703150",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 60,
"avg_line_length": 30.352941176470587,
"alnum_prop": 0.5813953488372093,
"repo_name": "hkpeprah/television-2.0",
"id": "9571eca66a1564bf34cb67adc5b1ee30c55fe4de",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/telebble/decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "71747"
},
{
"name": "C++",
"bytes": "2117"
},
{
"name": "CSS",
"bytes": "6547"
},
{
"name": "HTML",
"bytes": "6435"
},
{
"name": "JavaScript",
"bytes": "18646"
},
{
"name": "Makefile",
"bytes": "240"
},
{
"name": "Python",
"bytes": "97258"
}
],
"symlink_target": ""
} |
from django import template
from django.core.urlresolvers import reverse
register = template.Library()
SKIP_PAGINATION = True # When new filter adds\removes - remove page variable from GET, to jump to first page
@register.simple_tag
def query(GET, name, value, simple_query=True):
result = u'?'
if simple_query or name not in GET:
result += name + u'=' + unicode(value)
else:
result += name + u'=' + GET[name] + "," + unicode(value)
for get_name in GET:
if name != get_name and (get_name != u"page" or not SKIP_PAGINATION):
result += u'&' + get_name + u'=' + GET[get_name]
return result
@register.simple_tag
def current_query(GET):
result = u'?'
for get_name in GET:
result += u'&' + get_name + u'=' + GET[get_name]
return result
@register.simple_tag
def remove_query(GET, name, value):
result = u'?'
for get_name in GET:
if result != u'?':
result += u'&'
if name != get_name:
if get_name != u"page" or not SKIP_PAGINATION:
result += get_name + u'=' + GET[get_name]
else:
if ',' in GET[get_name]:
result += get_name + u'=' + GET[get_name].replace(unicode(value)+',', '').replace(','+unicode(value), '')
return result
def filter(request, name, objects, current, simple_query=True):
items = []
for index, value, count in objects:
if (type(current) == type(index) and index == current) or (type(current) == list and index in current):
items.append((request.path + remove_query(request.GET, name, index), value, count, True))
else:
items.append((request.path + query(request.GET, name, index, simple_query), value, count, False))
return items
@register.inclusion_tag("search/filter_simple.html", takes_context = True)
def filter_simple(context, name, list, current):
"""
Make simple filter (when you can choose only one object to filter)
Usage: filter_simple [ name of variable in query ] [ list of objects you want to filter from ] [ current value ]
"""
context['items'] = filter(context['request'], name, list, current)
return context
@register.inclusion_tag("search/filter_complex.html", takes_context = True)
def filter_complex(context, name, list, current):
"""
Make complex filter (when you can choose more than one object from list in one time)
Usage: filter_complex [ name of variable in query ] [ list of objects you want to filter from ] [ current value ]
"""
context['items'] = filter(context['request'], name, list, current, False)
return context
@register.inclusion_tag("search/filter_current.html", takes_context = True)
def filter_current(context, name, list, current):
"""
Rander current values for this filter
Usage: filter_current [ name of variable in query ] [ list of objects from where you'll see current ] [ current value ]
"""
context['var_name'] = name
context['var_list'] = list
context['var_current'] = current
return context
@register.inclusion_tag("search/show_filters.html", takes_context = True)
def show_filters(context):
return context
@register.inclusion_tag("search/show_current_filters.html", takes_context = True)
def show_current_filters(context):
return context
@register.inclusion_tag("search/show_sort.html", takes_context = True)
def show_sort(context):
return context
| {
"content_hash": "6007e1db0885606ce6e68c029270f398",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 127,
"avg_line_length": 39.04494382022472,
"alnum_prop": 0.640863309352518,
"repo_name": "ProstoKSI/django-filters",
"id": "777df02ac65710e3e0d35aebaaf67ab6348fd3fe",
"size": "3475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filters/templatetags/query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2045"
},
{
"name": "JavaScript",
"bytes": "704"
},
{
"name": "Python",
"bytes": "11997"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models
from ._configuration import AzureMapsManagementClientConfiguration
from ._serialization import Deserializer, Serializer
from .operations import AccountsOperations, CreatorsOperations, MapsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class AzureMapsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Azure Maps.
:ivar accounts: AccountsOperations operations
:vartype accounts: azure.mgmt.maps.operations.AccountsOperations
:ivar maps: MapsOperations operations
:vartype maps: azure.mgmt.maps.operations.MapsOperations
:ivar creators: CreatorsOperations operations
:vartype creators: azure.mgmt.maps.operations.CreatorsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AzureMapsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.accounts = AccountsOperations(self._client, self._config, self._serialize, self._deserialize)
self.maps = MapsOperations(self._client, self._config, self._serialize, self._deserialize)
self.creators = CreatorsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AzureMapsManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| {
"content_hash": "a88d80404eb3661bfdaf6b4db555ae96",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 106,
"avg_line_length": 43.611111111111114,
"alnum_prop": 0.6929936305732484,
"repo_name": "Azure/azure-sdk-for-python",
"id": "84532f21e4a7ba5d2d1d437137bad1e900b2bf33",
"size": "4393",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/maps/azure-mgmt-maps/azure/mgmt/maps/_azure_maps_management_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
class Graph():
def __init__(self, vertices):
self.vertices = vertices
self.graph = [[0 for column in range(vertices)] for row in range(vertices)]
def min_distance(self, dist, min_dist_set):
min_dist = float("inf")
for v in range(self.vertices):
if dist[v] < min_dist and min_dist_set[v] == False:
min_dist = dist[v]
min_index = v
return min_index
def dijkstra(self, src):
dist = [float("inf")] * self.vertices
dist[src] = 0
min_dist_set = [False] * self.vertices
for count in range(self.vertices):
#minimum distance vertex that is not processed
u = self.min_distance(dist, min_dist_set)
#put minimum distance vertex in shortest tree
min_dist_set[u] = True
#Update dist value of the adjacent vertices
for v in range(self.vertices):
if self.graph[u][v] > 0 and min_dist_set[v] == False and dist[v] > dist[u] + self.graph[u][v]:
dist[v] = dist[u] + self.graph[u][v]
return dist
| {
"content_hash": "ca1132345c3c7579d233e94a7b527250",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 110,
"avg_line_length": 33.1764705882353,
"alnum_prop": 0.5452127659574468,
"repo_name": "amaozhao/algorithms",
"id": "fe5772ec53deb7de5744bb8ae94097eac7ab41ec",
"size": "1179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithms/graph/dijkstra.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "469268"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys, os
if __name__ == '__main__':
if len(sys.argv) < 2:
from .launcher import application_launcher
application_launcher()
else:
from .launcher import start_app
start_app(sys.argv[1]) | {
"content_hash": "32f45ae44ce08ac7bd65a39c9cc9913f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 47,
"avg_line_length": 22.454545454545453,
"alnum_prop": 0.6720647773279352,
"repo_name": "lukacu/manus",
"id": "43dde93c073173dc14db7a4ad8a9da6ea6955aa1",
"size": "247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/manus_apps/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "56029"
},
{
"name": "CMake",
"bytes": "3715"
},
{
"name": "CSS",
"bytes": "4232"
},
{
"name": "HTML",
"bytes": "6519"
},
{
"name": "JavaScript",
"bytes": "185060"
},
{
"name": "Python",
"bytes": "80711"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
} |
from ceph_deploy import exc, mon
from ceph_deploy.conf.ceph import CephConf
from mock import Mock
import pytest
def make_fake_conf():
return CephConf()
# NOTE: If at some point we re-use this helper, move it out
# and make it even more generic
def make_fake_conn(receive_returns=None):
receive_returns = receive_returns or (['{}'], [], 0)
conn = Mock()
conn.cmd = lambda x: x
conn.sudo = ''
conn.return_value = conn
conn.execute = conn
conn.receive = Mock(return_value=receive_returns)
conn.gateway.remote_exec = conn.receive
conn.result = Mock(return_value=conn)
conn.cmd = lambda x: x
return conn
class TestGetMonInitialMembers(object):
def test_assert_if_mon_none_and_empty_True(self):
cfg = make_fake_conf()
with pytest.raises(exc.NeedHostError):
mon.get_mon_initial_members(Mock(), True, cfg)
def test_return_if_mon_none_and_empty_false(self):
cfg = make_fake_conf()
mon_initial_members = mon.get_mon_initial_members(Mock(), False, cfg)
assert mon_initial_members is None
def test_single_item_if_mon_not_none(self):
cfg = make_fake_conf()
cfg.add_section('global')
cfg.set('global', 'mon initial members', 'AAAA')
mon_initial_members = mon.get_mon_initial_members(Mock(), False, cfg)
assert set(mon_initial_members) == set(['AAAA'])
def test_multiple_item_if_mon_not_none(self):
cfg = make_fake_conf()
cfg.add_section('global')
cfg.set('global', 'mon initial members', 'AAAA, BBBB')
mon_initial_members = mon.get_mon_initial_members(Mock(), False, cfg)
assert set(mon_initial_members) == set(['AAAA', 'BBBB'])
class TestCatchCommonErrors(object):
def setup(self):
self.logger = Mock()
def assert_logger_message(self, logger, msg):
calls = logger.call_args_list
for log_call in calls:
if msg in log_call[0][0]:
return True
raise AssertionError('"%s" was not found in any of %s' % (msg, calls))
def test_warn_if_no_intial_members(self):
fake_conn = make_fake_conn()
cfg = make_fake_conf()
mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock())
expected_msg = 'is not defined in `mon initial members`'
self.assert_logger_message(self.logger.warning, expected_msg)
def test_warn_if_host_not_in_intial_members(self):
fake_conn = make_fake_conn()
cfg = make_fake_conf()
cfg.add_section('global')
cfg.set('global', 'mon initial members', 'AAAA')
mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock())
expected_msg = 'is not defined in `mon initial members`'
self.assert_logger_message(self.logger.warning, expected_msg)
def test_warn_if_not_mon_in_monmap(self):
fake_conn = make_fake_conn()
cfg = make_fake_conf()
mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock())
expected_msg = 'does not exist in monmap'
self.assert_logger_message(self.logger.warning, expected_msg)
def test_warn_if_not_public_addr_and_not_public_netw(self):
fake_conn = make_fake_conn()
cfg = make_fake_conf()
cfg.add_section('global')
mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock())
expected_msg = 'neither `public_addr` nor `public_network`'
self.assert_logger_message(self.logger.warning, expected_msg)
| {
"content_hash": "d4e66dfcdd1163ddaddd1acbc1a28450",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 78,
"avg_line_length": 36.96842105263158,
"alnum_prop": 0.6332574031890661,
"repo_name": "ceph/ceph-deploy",
"id": "7e73cad704e496d38940b648e0255aee6bac346e",
"size": "3512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceph_deploy/tests/test_mon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "395800"
},
{
"name": "Shell",
"bytes": "9231"
}
],
"symlink_target": ""
} |
from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, LinksField, JSONAPIListField
from website.models import Subject
class TaxonomyField(ser.Field):
def to_representation(self, subject):
if not isinstance(subject, Subject):
subject = Subject.load(subject)
if subject is not None:
return {'id': subject._id,
'text': subject.text}
return None
def to_internal_value(self, subject_id):
return subject_id
class TaxonomySerializer(JSONAPISerializer):
filterable_fields = frozenset([
'text',
'parents',
'id'
])
id = ser.CharField(source='_id', required=True)
text = ser.CharField(max_length=200)
parents = JSONAPIListField(child=TaxonomyField())
child_count = ser.IntegerField()
links = LinksField({
'parents': 'get_parent_urls',
'self': 'get_absolute_url',
})
def get_parent_urls(self, obj):
return [p.get_absolute_url() for p in obj.parents]
def get_absolute_url(self, obj):
return obj.get_absolute_url()
class Meta:
type_ = 'taxonomies'
| {
"content_hash": "300a650d998ab2ff6aaefda6a6a4da14",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 80,
"avg_line_length": 28.73170731707317,
"alnum_prop": 0.6324278438030561,
"repo_name": "rdhyee/osf.io",
"id": "9a435ad9911180b2f0b2f6f1ed43b25be18705bc",
"size": "1178",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api/taxonomies/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "174764"
},
{
"name": "HTML",
"bytes": "131860"
},
{
"name": "JavaScript",
"bytes": "1663707"
},
{
"name": "Mako",
"bytes": "679787"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "6720626"
}
],
"symlink_target": ""
} |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
from enum import Enum
from glob import glob, iglob
from pathlib import Path
from lxml.etree import XMLSyntaxError
from zipfile import ZipFile
from matrix_runner import main, matrix_axis, matrix_action, matrix_command, matrix_filter, \
ConsoleReport, CropReport, TransformReport, JUnitReport
@matrix_axis("device", "d", "Device(s) to be considered.")
class DeviceAxis(Enum):
CM0 = ('Cortex-M0', 'CM0')
CM0plus = ('Cortex-M0plus', 'CM0plus')
CM3 = ('Cortex-M3', 'CM3')
CM4 = ('Cortex-M4', 'CM4')
CM4FP = ('Cortex-M4FP', 'CM4FP')
CM7 = ('Cortex-M7', 'CM7')
CM7SP = ('Cortex-M7SP', 'CM7SP')
CM7DP = ('Cortex-M7DP', 'CM7DP')
CM23 = ('Cortex-M23', 'CM23')
CM23S = ('Cortex-M23S', 'CM23S')
CM23NS = ('Cortex-M23NS', 'CM23NS')
CM33 = ('Cortex-M33', 'CM33')
CM33S = ('Cortex-M33S', 'CM33S')
CM33NS = ('Cortex-M33NS', 'CM33NS')
CM35P = ('Cortex-M35P', 'CM35P')
CM35PS = ('Cortex-M35PS', 'CM35PS')
CM35PNS = ('Cortex-M35PNS', 'CM35PNS')
CM55S = ('Cortex-M55S', 'CM55S')
CM55NS = ('Cortex-M55NS', 'CM55NS')
CM85S = ('Cortex-M85S', 'CM85S')
CM85NS = ('Cortex-M85NS', 'CM85NS')
CA5 = ('Cortex-A5', 'CA5')
CA7 = ('Cortex-A7', 'CA7')
CA9 = ('Cortex-A9', 'CA9')
# CA5NEON = ('Cortex-A5neon', 'CA5neon')
# CA7NEON = ('Cortex-A7neon', 'CA7neon')
# CA9NEON = ('Cortex-A9neon', 'CA9neon')
def has_bl(self):
return self in [
DeviceAxis.CM23NS,
DeviceAxis.CM33NS,
DeviceAxis.CM35PNS,
DeviceAxis.CM55NS,
DeviceAxis.CM85NS
]
@property
def bl_device(self):
bld = {
DeviceAxis.CM23NS: 'CM23S',
DeviceAxis.CM33NS: 'CM33S',
DeviceAxis.CM35PNS: 'CM35PS',
DeviceAxis.CM55NS: 'CM55S',
DeviceAxis.CM85NS: 'CM85S'
}
return bld[self]
@matrix_axis("compiler", "c", "Compiler(s) to be considered.")
class CompilerAxis(Enum):
AC6 = ('AC6')
AC6LTM = ('AC6LTM')
GCC = ('GCC')
@property
def image_ext(self):
ext = {
CompilerAxis.AC6: 'axf',
CompilerAxis.AC6LTM: 'axf',
CompilerAxis.GCC: 'elf'
}
return ext[self]
@matrix_axis("optimize", "o", "Optimization level(s) to be considered.")
class OptimizationAxis(Enum):
LOW = ('low', 'O1')
MID = ('mid', 'O2')
HIGH = ('high', 'Ofast')
SIZE = ('size', 'Os')
TINY = ('tiny', 'Oz')
MODEL_EXECUTABLE = {
DeviceAxis.CM0: ("VHT_MPS2_Cortex-M0", []),
DeviceAxis.CM0plus: ("VHT_MPS2_Cortex-M0plus", []),
DeviceAxis.CM3: ("VHT_MPS2_Cortex-M3", []),
DeviceAxis.CM4: ("VHT_MPS2_Cortex-M4", []),
DeviceAxis.CM4FP: ("VHT_MPS2_Cortex-M4", []),
DeviceAxis.CM7: ("VHT_MPS2_Cortex-M7", []),
DeviceAxis.CM7DP: ("VHT_MPS2_Cortex-M7", []),
DeviceAxis.CM7SP: ("VHT_MPS2_Cortex-M7", []),
DeviceAxis.CM23: ("VHT_MPS2_Cortex-M23", []),
DeviceAxis.CM23S: ("VHT_MPS2_Cortex-M23", []),
DeviceAxis.CM23NS: ("VHT_MPS2_Cortex-M23", []),
DeviceAxis.CM33: ("VHT_MPS2_Cortex-M33", []),
DeviceAxis.CM33S: ("VHT_MPS2_Cortex-M33", []),
DeviceAxis.CM33NS: ("VHT_MPS2_Cortex-M33", []),
DeviceAxis.CM35P: ("VHT_MPS2_Cortex-M35P", []),
DeviceAxis.CM35PS: ("VHT_MPS2_Cortex-M35P", []),
DeviceAxis.CM35PNS: ("VHT_MPS2_Cortex-M35P", []),
DeviceAxis.CM55S: ("VHT_MPS2_Cortex-M55", []),
DeviceAxis.CM55NS: ("VHT_MPS2_Cortex-M55", []),
DeviceAxis.CM85S: ("VHT_MPS2_Cortex-M85", []),
DeviceAxis.CM85NS: ("VHT_MPS2_Cortex-M85", []),
DeviceAxis.CA5: ("FVP_VE_Cortex-A5x1", []),
DeviceAxis.CA7: ("FVP_VE_Cortex-A7x1", []),
DeviceAxis.CA9: ("FVP_VE_Cortex-A9x1", []),
# DeviceAxis.CA5NEON: ("FVP_VE_Cortex-A5x1", []),
# DeviceAxis.CA7NEON: ("FVP_VE_Cortex-A7x1", []),
# DeviceAxis.CA9NEON: ("FVP_VE_Cortex-A9x1", [])
}
def config_suffix(config, timestamp=True):
suffix = f"{config.compiler[0]}-{config.optimize[0]}-{config.device[1]}"
if timestamp:
suffix += f"-{datetime.now().strftime('%Y%m%d%H%M%S')}"
return suffix
def project_name(config):
return f"Validation.{config.compiler}_{config.optimize}+{config.device[1]}"
def bl_project_name(config):
return f"Bootloader.{config.compiler}_{config.optimize}+{config.device.bl_device}"
def output_dir(config):
return f"{project_name(config)}_outdir"
def bl_output_dir(config):
return f"{bl_project_name(config)}_outdir"
def model_config(config):
return f"../Layer/Target/{config.device[1]}/model_config.txt"
@matrix_action
def clean(config):
"""Build the selected configurations using CMSIS-Build."""
yield cbuild_clean(f"{project_name(config)}/{project_name(config)}.cprj")
@matrix_action
def build(config, results):
"""Build the selected configurations using CMSIS-Build."""
if config.device.has_bl():
logging.info("Compiling Bootloader...")
yield csolution(f"{bl_project_name(config)}")
yield cbuild(f"{bl_project_name(config)}/{bl_project_name(config)}.cprj")
logging.info("Compiling Tests...")
if config.compiler == CompilerAxis.GCC and config.device.match("CA*"):
ldfile = Path(f"{project_name(config)}/RTE/Device/ARM{config.device[1]}/ARM{config.device[1]}.ld")
infile = ldfile.replace(ldfile.with_suffix('.ld.in'))
yield preprocess(infile, ldfile)
yield csolution(f"{project_name(config)}")
yield cbuild(f"{project_name(config)}/{project_name(config)}.cprj")
if not all(r.success for r in results):
return
file = f"Core_Validation-{config_suffix(config)}.zip"
logging.info(f"Archiving build output to {file}...")
with ZipFile(file, "w") as archive:
for content in iglob(f"{project_name(config)}/**/*", recursive=True):
if Path(content).is_file():
archive.write(content)
@matrix_action
def extract(config):
"""Extract the latest build archive."""
archives = sorted(glob(f"RTOS2_Validation-{config_suffix(config, timestamp=False)}-*.zip"), reverse=True)
yield unzip(archives[0])
@matrix_action
def run(config, results):
"""Run the selected configurations."""
logging.info("Running Core Validation on Arm model ...")
yield model_exec(config)
try:
results[0].test_report.write(f"Core_Validation-{config_suffix(config)}.junit")
except RuntimeError as e:
if isinstance(e.__cause__, XMLSyntaxError):
logging.error("No valid test report found in model output!")
else:
logging.exception(e)
@matrix_command()
def cbuild_clean(project):
return ["cbuild", "-c", project]
@matrix_command()
def unzip(archive):
return ["bash", "-c", f"unzip {archive}"]
@matrix_command()
def preprocess(infile, outfile):
return ["arm-none-eabi-gcc", "-xc", "-E", infile, "-P", "-o", outfile]
@matrix_command()
def csolution(project):
return ["csolution", "convert", "-s", "Validation.csolution.yml", "-c", project]
@matrix_command()
def cbuild(project):
return ["cbuild", project]
@matrix_command(test_report=ConsoleReport() |
CropReport('<\?xml version="1.0"\?>', '</report>') |
TransformReport('validation.xsl') |
JUnitReport(title=lambda title, result: f"{result.command.config.compiler}."
f"{result.command.config.optimize}."
f"{result.command.config.device}."
f"{title}"))
def model_exec(config):
cmdline = [MODEL_EXECUTABLE[config.device][0], "-q", "--simlimit", 100, "-f", model_config(config)]
cmdline += MODEL_EXECUTABLE[config.device][1]
cmdline += ["-a", f"{project_name(config)}/{output_dir(config)}/{project_name(config)}.{config.compiler.image_ext}"]
if config.device.has_bl():
cmdline += ["-a", f"{bl_project_name(config)}/{bl_output_dir(config)}/{bl_project_name(config)}.{config.compiler.image_ext}"]
return cmdline
if __name__ == "__main__":
main()
| {
"content_hash": "f1e23dd92cbc76f7b69220af1b2cc274",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 133,
"avg_line_length": 33.21686746987952,
"alnum_prop": 0.5984766050054406,
"repo_name": "ARM-software/CMSIS_5",
"id": "e6084b35155f01e5faf7902b7f4b997efa3ea8f6",
"size": "8271",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "CMSIS/CoreValidation/Project/build.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "608811"
},
{
"name": "Batchfile",
"bytes": "878"
},
{
"name": "C",
"bytes": "10588897"
},
{
"name": "C++",
"bytes": "16079"
},
{
"name": "CMake",
"bytes": "37827"
},
{
"name": "CSS",
"bytes": "184016"
},
{
"name": "Gnuplot",
"bytes": "1971"
},
{
"name": "HTML",
"bytes": "1039321"
},
{
"name": "JavaScript",
"bytes": "266861"
},
{
"name": "Python",
"bytes": "135156"
},
{
"name": "Shell",
"bytes": "28765"
},
{
"name": "XSLT",
"bytes": "2287"
}
],
"symlink_target": ""
} |
sky_model = {}
sky_model['sky_original'] = ('SKY/models/sky_model_original.fits', {})
sky_model['sky_updarm_gaussian'] = ('SKY/models/sky_model_updarm_gaussian.fits', {})
sky_model['sky_updarm_gaussian_hole'] = ('SKY/models/sky_model_updarm_gaussian.fits', {'hole': (4.5, 1.)})
sky_model['sky_updarm_gaussian_hole_morepah'] = ('SKY/models/sky_model_updarm_gaussian.fits', {'hole': (4.5, 1.), 'pah_factor': 1.50})
| {
"content_hash": "749d9ddb08e7bb24bac00b45085081b2",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 134,
"avg_line_length": 69,
"alnum_prop": 0.6690821256038647,
"repo_name": "hyperion-rt/paper-galaxy-rt-model",
"id": "3d267cca1245308cedab800660291f8ee0301932",
"size": "414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "58804"
},
{
"name": "Shell",
"bytes": "955"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from homepage.views import HomeIndexView, HomeLoginView, HomeLogoutView, HomeRegisterView
urlpatterns = patterns('',
url( r'^$', HomeIndexView.as_view(), name='index'),
url( r'^logout/', HomeLogoutView.as_view(), name='logout_page' ),
url( r'^login/', HomeLoginView.as_view(), name='login_page' ),
url( r'^register/', HomeRegisterView.as_view(), name='register_page' ),
)
| {
"content_hash": "8d4843bf3253ae1ea7bce7d939059073",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 89,
"avg_line_length": 49,
"alnum_prop": 0.6961451247165533,
"repo_name": "polarkac/ReaderSS",
"id": "db1090876b0a617856d00d21200786834a6d7e2f",
"size": "441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ReaderSS/homepage/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "202"
},
{
"name": "Python",
"bytes": "20080"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
# Create your views here.
from .models import Book, Author, BookInstance, Genre
# Function based views
def index(request):
"""
View function for the home page of the site.
"""
# Generate counts of some of the main objects
num_books=Book.objects.all().count()
num_instances=BookInstance.objects.all().count()
#Available books (status = 'a')
num_instances_available=BookInstance.objects.filter(status__exact='a').count()
num_authors=Author.objects.count() # The 'all()' is implied by default
# Number of visits to this view, as counted in the session variable.
num_visits=request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits+1
# Render the HTML template index.html with the data in the context variable
return render(
request,
'index.html',
context={'num_books':num_books,'num_instances':num_instances,'num_instances_available':num_instances_available,'num_authors':num_authors, 'num_visits':num_visits},
)
# Class based views
from django.views import generic
class BookListView(generic.ListView):
"""
Generic class-based view for a list of books
"""
model = Book
paginate_by = 10
class BookDetailView(generic.DetailView):
"""
Generic class-based detail view for a book
"""
model = Book
class AuthorListView(generic.ListView):
"""
Generic class-based list view for a list of authors.
"""
model = Author
paginate_by = 10
class AuthorDetailView(generic.DetailView):
"""
Generic class-based detail view for an author.
"""
model = Author
from django.contrib.auth.mixins import LoginRequiredMixin
class LoanedBooksByUserListView(LoginRequiredMixin,generic.ListView):
"""
Generic class-based view listing book on loan to current user.
Login Required
"""
model = BookInstance
template_name = 'catalog/bookinstance_list_borrowed_user.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(borrower=self.request.user).filter(status__exact='o').order_by('due_back')
from django.contrib.auth.mixins import PermissionRequiredMixin
class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView):
"""
Generic class-based view listing all books on loan.
Only visible to users with can_mark_returned permission.
"""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name = 'catalog/bookinstance_list_borrowed_all.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(status__exact='o').order_by('due_back')
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
import datetime
from django.contrib.auth.decorators import permission_required
from .forms import RenewBookForm # references forms.py
@permission_required('catalog.can_mark_returned')
def renew_book_librarian(request, pk):
"""
View function for renewing a specific BookInstance by librarian
"""
book_inst=get_object_or_404(BookInstance, pk = pk)
#If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = RenewBookForm(request.POST)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_date as required (here we just write it to the model due_back field)
book_inst.due_back = form.cleaned_data['renewal_date']
book_inst.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed') )
# If this is a GET (or any other method) create the default form.
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=3)
form = RenewBookForm(initial={'renewal_date': proposed_renewal_date,})
return render(request, 'catalog/book_renew_librarian.html', {'form': form, 'bookinst':book_inst})
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from .models import Author
class AuthorCreate(PermissionRequiredMixin, CreateView):
model = Author
fields = '__all__'
initial={'date_of_death':'12/10/2016',}
permission_required = 'catalog.can_mark_returned'
class AuthorUpdate(PermissionRequiredMixin, UpdateView):
model = Author
fields = ['first_name','last_name','date_of_birth','date_of_death']
permission_required = 'catalog.can_mark_returned'
class AuthorDelete(PermissionRequiredMixin, DeleteView):
model = Author
success_url = reverse_lazy('authors')
permission_required = 'catalog.can_mark_returned'
class BookCreate(PermissionRequiredMixin, CreateView):
model = Book
fields = '__all__'
initial = {'date_of_death':'12/10/2016'}
permission_required = 'catalog.can_mark_returned'
class BookUpdate(PermissionRequiredMixin, UpdateView):
model = Book
fields = '__all__'
permission_required = 'catalog.can_mark_returned'
class BookDelete(PermissionRequiredMixin, DeleteView):
model = Book
success_url = reverse_lazy('books')
permission_required = 'catalog.can_mark_returned'
| {
"content_hash": "090a3175c76c13a0c57ba7ade5b38f37",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 165,
"avg_line_length": 29.857988165680474,
"alnum_prop": 0.7526753864447087,
"repo_name": "byronvhughey/django_local_library",
"id": "40c3c5aee9f0c89bd62a5e2cea06b36ba1b0c52e",
"size": "5046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalog/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67"
},
{
"name": "HTML",
"bytes": "10438"
},
{
"name": "Python",
"bytes": "38256"
}
],
"symlink_target": ""
} |
"""Loose reimplementation of the t2t tokenizer.
Original code:
https://github.com/tensorflow/tensor2tensor/blob/v1.5.5/tensor2tensor/data_generators/tokenizer.py
Provides a WordpiecePreprocessor, a higher order function which takes a
vocabulary object and returns a preprocessor, and a WordpiecePostprocessor.
Note that the latter is not a higher order function and can be used directly
without making a new section in the configuration.
"""
from typing import List, Callable, Set
import re
from typeguard import check_argument_types
from neuralmonkey.vocabulary import Vocabulary
UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
def escape_token(token: str, alphabet: Set[str]) -> str:
"""Escapes the token in the t2t fashion.
Underscores are regarded as an end of a token, so they must be escaped.
Additionally, they/we escape also the OOA (out-of-alphabet) characters
using their unicode code.
"""
esc_token = token.replace("\\", "\\\\") # replace 1 backslash with 2
esc_token = esc_token.replace("_", "\\u") # replace underscore with "\u"
# replace OOA symbol `s` with \1234; where 1234 is `ord(s)`
characters = [c if c in alphabet and c != "\n" else "\\{};".format(ord(c))
for c in token] # not sure about the "\n"-part
return "".join(characters) + "_"
def unescape_token(escaped_token: str) -> str:
"""Inverse function for escape_token."""
# Ends with underscore -> remove it
token = escaped_token
token = token[:-1] if token.endswith("_") else token
def match(m):
if m.group(1) is None:
return "_" if m.group(0) == "\\u" else "\\"
try:
return chr(int(m.group(1)))
except (ValueError, OverflowError):
return u"\u3013" # Unicode for undefined character.
# The substitution works because of the left-to-right nature of matching
return UNESCAPE_REGEX.sub(match, token)
def wordpiece_encode(sentence: List[str], vocabulary: Vocabulary) -> List[str]:
"""Convert tokens to subtokens using a vocabulary of subtokens.
A greedy implementation, as in t2t referenced above.
We search for the longest subtoken available in the vocabulary from left to
right.
"""
tokens = []
for token in sentence:
esc_token = escape_token(token, vocabulary.alphabet)
subtokens = []
current_subtoken_start = 0
token_len = len(esc_token)
while current_subtoken_start < len(esc_token):
# TODO: they optimize this by ranging from
# min(token_len, max_subtoken_len + start)
# this can be achieved by saving the len of longest word in vocab
for end in range(token_len, current_subtoken_start, -1):
subtoken = esc_token[current_subtoken_start:end]
if subtoken in vocabulary:
subtokens.append(subtoken)
current_subtoken_start = end
break
else: # executed if the loop is not exited by the break statement
raise AssertionError(
"No token substring found in the vocab ({})."
.format(esc_token[current_subtoken_start:]))
# TODO: they also optimize this by caching the segmentation of the
# escaped tokens.
tokens.extend(subtokens)
return tokens
def wordpiece_decode(sentence: List[str]) -> List[str]:
"""Postprocess the wordpieces into a sentence.
First, retokenize the sentence - join and split around underscores.
Second, unescape tokens throwing away any empty tokens encountered.
"""
retokenized = "".join(sentence).split("_")
unescaped = [unescape_token(tok) for tok in retokenized if tok]
return [tok for tok in unescaped if tok]
def wordpiece_decode_batch(sentences: List[List[str]]) -> List[List[str]]:
return [wordpiece_decode(s) for s in sentences]
def get_wordpiece_preprocessor(
vocabulary: Vocabulary) -> Callable[[List[str]], List[str]]:
check_argument_types()
return lambda s: wordpiece_encode(s, vocabulary)
# pylint: disable=invalid-name
# Syntactic sugar for configuration
WordpiecePreprocessor = get_wordpiece_preprocessor
WordpiecePostprocessor = wordpiece_decode_batch
| {
"content_hash": "326ac08b0ae8b5111fc5fb714f0cab9b",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 98,
"avg_line_length": 34.94308943089431,
"alnum_prop": 0.6582131223825035,
"repo_name": "ufal/neuralmonkey",
"id": "4a8fe4185010a4a8861b4f4a9395be1afb5fc386",
"size": "4298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neuralmonkey/processors/wordpiece.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13780"
},
{
"name": "HTML",
"bytes": "3116"
},
{
"name": "JavaScript",
"bytes": "2070"
},
{
"name": "Makefile",
"bytes": "2564"
},
{
"name": "Mask",
"bytes": "69384"
},
{
"name": "Mathematica",
"bytes": "1874"
},
{
"name": "Perl",
"bytes": "45129"
},
{
"name": "Python",
"bytes": "823152"
},
{
"name": "Shell",
"bytes": "4671"
}
],
"symlink_target": ""
} |
import json
import requests
resp = requests.get('http://samples.openweathermap.org/data/2.5/weather?zip=44143,us&appid=b1b15e88fa797225412429c1c50c122a1')
test = json.dumps(json.loads(resp.content), indent=4)
print("Test: " + test) | {
"content_hash": "e61a70dc714e02ecb642b404725af879",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 126,
"avg_line_length": 29.25,
"alnum_prop": 0.7735042735042735,
"repo_name": "snowsc/Python",
"id": "820f410d005574a638d2e599d701eba11952727a",
"size": "234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Misc/w.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1198"
}
],
"symlink_target": ""
} |
from ..service_func import service_func, meta_arg, func_error
class answer_question(service_func):
def __init__(self):
service_func.__init__(self, "/question/answer")
self.name = "Answer question"
self.description = "Answer the current question"
self.args.append(meta_arg("key", "Protection key", "none"))
self.args.append(meta_arg("valid", "Is the answer is good or not (true or false)", "none"))
self.args.append(meta_arg("next_team", "If the answer is invalid, next team to got a chance to answer, if it -1, the question is removed from current_question", "none"))
def execute(self, args, server):
key = args["key"]
valid = bool(args["valid"])
if server.key == key:
if server.game_data.current_question is not None:
if valid:
server.game_data.valid_answer()
else:
next_team = int(args["next_team"])
if next_team == -1:
# question is ended, nobody gets the points
server.game_data.current_question = None
else:
# this team got a chance to answer this question
server.game_data.current_question.team = next_team
else:
raise func_error("No question waiting for an answer")
else:
raise func_error("Invalid key")
def answer(self):
# nothing to send here
return {} | {
"content_hash": "ed14324710026d8860a965ec7fa797d3",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 177,
"avg_line_length": 40.421052631578945,
"alnum_prop": 0.5520833333333334,
"repo_name": "jordsti/hacker-jeopardy",
"id": "f3b1c3fc3a26dc7acef0f6b61a879149401d8d17",
"size": "1603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webservice/funcs/answer_question.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49"
},
{
"name": "HTML",
"bytes": "2441"
},
{
"name": "JavaScript",
"bytes": "7658"
},
{
"name": "Python",
"bytes": "29104"
}
],
"symlink_target": ""
} |
from tastypie.authorization import Authorization
class OwnerBasedAuthorization(Authorization):
def read_list(self, object_list, bundle):
return object_list.filter(owner=bundle.request.user)
def read_detail(self, object_list, bundle):
return bundle.obj.owner == bundle.request.user
# TODO check tastypie sources, seems that to date tastypie
# does not use this method
def create_list(self, object_list, bundle):
return object_list.filter(owner=bundle.request.user)
def create_detail(self, object_list, bundle):
return bundle.obj.owner == bundle.request.user
# TODO check tastypie sources, seems that for now tastypie
# does not use this method
def update_list(self, object_list, bundle):
return object_list.filter(owner=bundle.request.user)
def update_detail(self, object_list, bundle):
return bundle.obj.owner == bundle.request.user
def delete_list(self, object_list, bundle):
return object_list.filter(owner=bundle.request.user)
def delete_detail(self, object_list, bundle):
return bundle.obj.owner == bundle.request.user
| {
"content_hash": "962705a41e9716f945bd198e6b366851",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 62,
"avg_line_length": 36.903225806451616,
"alnum_prop": 0.708916083916084,
"repo_name": "pavelkuchin/tracktrains",
"id": "5d491eb1004194af4d0adc666f62e3ef32d2aa77",
"size": "1144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/authorization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "607"
},
{
"name": "Python",
"bytes": "103526"
},
{
"name": "Shell",
"bytes": "91"
}
],
"symlink_target": ""
} |
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
from typing import Dict, Optional, Tuple, Union, cast
from ._typing import IcmpReplies
class PingStats:
def __init__(self, *args, **kwargs) -> None:
self.__destination = kwargs.pop("destination", None)
self.__packet_transmit = kwargs.pop("packet_transmit", None)
self.__packet_receive = kwargs.pop("packet_receive", None)
self.__rtt_min = kwargs.pop("rtt_min", None)
self.__rtt_avg = kwargs.pop("rtt_avg", None)
self.__rtt_max = kwargs.pop("rtt_max", None)
self.__rtt_mdev = kwargs.pop("rtt_mdev", None)
self.__duplicates = kwargs.pop("duplicates", None)
self.__icmp_replies = kwargs.pop("icmp_replies", [])
@property
def destination(self) -> str:
"""
The ping destination.
Returns:
|str|:
"""
return self.__destination
@property
def packet_transmit(self) -> Optional[int]:
"""
Number of packets transmitted.
Returns:
|int|:
"""
return self.__packet_transmit
@property
def packet_receive(self) -> Optional[int]:
"""
Number of packets received.
Returns:
|int|:
"""
return self.__packet_receive
@property
def packet_loss_count(self) -> Optional[int]:
"""
Number of packet losses.
Returns:
|int|: |None| if the value is not a number.
"""
try:
return cast(int, self.packet_transmit) - cast(int, self.packet_receive)
except TypeError:
return None
@property
def packet_loss_rate(self) -> Optional[float]:
"""
Percentage of packet loss |percent_unit|.
Returns:
|float|: |None| if the value is not a number.
"""
try:
return (cast(int, self.packet_loss_count) / cast(int, self.packet_transmit)) * 100
except (TypeError, ZeroDivisionError, OverflowError):
return None
@property
def rtt_min(self) -> Optional[float]:
"""
Minimum round trip time of transmitted ICMP packets |msec_unit|.
Returns:
|float|:
"""
return self.__rtt_min
@property
def rtt_avg(self) -> Optional[float]:
"""
Average round trip time of transmitted ICMP packets |msec_unit|.
Returns:
|float|:
"""
return self.__rtt_avg
@property
def rtt_max(self) -> Optional[float]:
"""
Maximum round trip time of transmitted ICMP packets |msec_unit|.
Returns:
|float|:
"""
return self.__rtt_max
@property
def rtt_mdev(self) -> Optional[float]:
"""
Standard deviation of transmitted ICMP packets.
Returns:
|float|: |None| when parsing Windows ping result.
"""
return self.__rtt_mdev
@property
def packet_duplicate_count(self) -> Optional[int]:
"""
Number of duplicated packets.
Returns:
|int|: |None| when parsing Windows ping result.
"""
return self.__duplicates
@property
def packet_duplicate_rate(self) -> Optional[float]:
"""
Percentage of duplicated packets |percent_unit|.
Returns:
|float|: |None| if the value is not a number.
"""
try:
return (cast(int, self.packet_duplicate_count) / cast(int, self.packet_receive)) * 100
except (TypeError, ZeroDivisionError, OverflowError):
return None
@property
def icmp_replies(self) -> IcmpReplies:
"""
ICMP packet reply information.
.. note:
``time<1ms`` considered as ``time=1``
Returns:
|list| of |dict|:
"""
return self.__icmp_replies
def is_empty(self):
return all(
[
self.destination is None,
self.packet_transmit is None,
self.packet_receive is None,
self.packet_loss_count is None,
self.packet_loss_rate is None,
self.packet_duplicate_count is None,
self.packet_duplicate_rate is None,
self.rtt_min is None,
self.rtt_avg is None,
self.rtt_max is None,
self.rtt_mdev is None,
not self.icmp_replies,
]
)
def as_dict(
self, include_icmp_replies: bool = False
) -> Dict[str, Union[str, int, float, IcmpReplies, None]]:
"""
ping statistics.
Returns:
|dict|:
Examples:
>>> import pingparsing
>>> parser = pingparsing.PingParsing()
>>> parser.parse(ping_result)
>>> parser.as_dict()
{
"destination": "google.com",
"packet_transmit": 60,
"packet_receive": 60,
"packet_loss_rate": 0.0,
"packet_loss_count": 0,
"rtt_min": 61.425,
"rtt_avg": 99.731,
"rtt_max": 212.597,
"rtt_mdev": 27.566,
"packet_duplicate_rate": 0.0,
"packet_duplicate_count": 0
}
"""
d: Dict[str, Union[str, int, float, IcmpReplies, None]] = {
"destination": self.destination,
"packet_transmit": self.packet_transmit,
"packet_receive": self.packet_receive,
"packet_loss_count": self.packet_loss_count,
"packet_loss_rate": self.packet_loss_rate,
"rtt_min": self.rtt_min,
"rtt_avg": self.rtt_avg,
"rtt_max": self.rtt_max,
"rtt_mdev": self.rtt_mdev,
"packet_duplicate_count": self.packet_duplicate_count,
"packet_duplicate_rate": self.packet_duplicate_rate,
}
if include_icmp_replies:
d["icmp_replies"] = self.icmp_replies
return d
def as_tuple(self) -> Tuple:
"""
ping statistics.
Returns:
|namedtuple|:
Examples:
>>> import pingparsing
>>> parser = pingparsing.PingParsing()
>>> parser.parse(ping_result)
>>> parser.as_tuple()
PingResult(destination='google.com', packet_transmit=60, packet_receive=60, packet_loss_rate=0.0, packet_loss_count=0, rtt_min=61.425, rtt_avg=99.731, rtt_max=212.597, rtt_mdev=27.566, packet_duplicate_rate=0.0, packet_duplicate_count=0)
""" # noqa
from collections import namedtuple
ping_result = self.as_dict()
return namedtuple("PingStatsTuple", ping_result.keys())(**ping_result) # type: ignore
| {
"content_hash": "d175831cd0f1992b91a9e6ae041bf4fa",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 249,
"avg_line_length": 27.48605577689243,
"alnum_prop": 0.5254384693433831,
"repo_name": "thombashi/pingparsing",
"id": "3801af2f1359cb45721ed1b22c8b155c2eab6b2c",
"size": "6899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pingparsing/_stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "509"
},
{
"name": "Python",
"bytes": "100698"
},
{
"name": "Shell",
"bytes": "265"
}
],
"symlink_target": ""
} |
"""Test site urls.
URLs
----
project/<project_slug>/
content/<content_id>/
"""
from django.conf.urls import patterns, url
from apps.test_crawl import views
urlpatterns = patterns('',
url(r'^content/(?P<content_id>[\w-]+)/$', views.ContentView.as_view(),
name='content')
)
| {
"content_hash": "c153c6349063d755532b8117e2ea0c31",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 74,
"avg_line_length": 17.235294117647058,
"alnum_prop": 0.6382252559726962,
"repo_name": "0x0mar/memex-explorer",
"id": "d4eef146c9d0f82df85a5c6dc00c6b141170e59b",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/apps/test_crawl/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "40546"
},
{
"name": "HTML",
"bytes": "29000"
},
{
"name": "JavaScript",
"bytes": "75604"
},
{
"name": "Nginx",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "255801"
},
{
"name": "Python",
"bytes": "194008"
},
{
"name": "Ruby",
"bytes": "973"
},
{
"name": "Shell",
"bytes": "1552"
}
],
"symlink_target": ""
} |
from sys import argv
from sys import exit
import sys
import re
import os
from subprocess import Popen,PIPE
# read in command line args
params = list(argv)
del params[0]
imgToken = params[0:2]
annoToken = params[2:4]
queryFile = params[4:6]
useSemaphore = params[6:8]
serviceLocation = params[8:10]
dilXY = params[10:12]
dilZ = params[12:14]
thresh = params[14:16]
emCube = params[16:18]
tokenFile = params[18:20]
labelOut = params[20:22]
print labelOut
# get root directory of framework
frameworkRoot = os.getenv("CAJAL3D_LOCATION")
if frameworkRoot is None:
raise Exception('You must set the CAJAL3D_LOCATION environment variable so the wrapper knows where the framework is!')
# Gen path of matlab wrapper
wrapper = os.path.join(frameworkRoot, 'api', 'matlab','wrapper','basicWrapper.py')
# Build call to EM Cube Cutout
args = [wrapper] + ["packages/cubeCutout/cubeCutout.m"] + imgToken + queryFile + emCube + useSemaphore + ["-d", "0"] + serviceLocation + ["-b", "0"]
print args
# Call Cube Cutout
process = Popen(args, stdout=PIPE, stderr=PIPE)
output = process.communicate()
proc_error = output[1]
proc_output = output[0]
exit_code = process.wait()
# Write std out stream
print "#######################\n"
print "Output From EM Cube Cutout\n"
print "#######################\n\n\n"
print proc_output
# If exit code != 0 exit
if exit_code != 0:
# it bombed. Write out matlab errors and return error code
sys.stderr.write("Error from Cube Cutout:\n\n")
sys.stderr.write(proc_error)
exit(exit_code)
#emCube,dbToken, dilXY, dilZ, thresh,useSemaphore, errorPageLocation, serviceLocation, varargin
# Build call to Segment Watershed
args = [wrapper] + ["packages/segmentWatershed/segmentWatershedWrapper.m"] + emCube + annoToken + dilXY + dilZ + thresh + useSemaphore + ["-s", "/mnt/pipeline/errorPages"] + serviceLocation + labelOut + tokenFile #+ ["-b", "0"]
print 'made it'
print args
# Call Segment Watershed Detector
process = Popen(args, stdout=PIPE, stderr=PIPE)
output = process.communicate()
proc_error2 = output[1]
proc_output2 = output[0]
exit_code2 = process.wait()
# Write std out stream
print "########################################\n"
print "Output From Membrane Detector\n"
print "########################################\n\n\n"
print proc_output2
# If exit code != 0 exit
if exit_code2 != 0:
# it bombed. Write out matlab errors and return error code
sys.stderr.write("Error from Segment Watershd:\n\n")
sys.stderr.write(proc_error2)
exit(exit_code2)
| {
"content_hash": "cef0e6a93cdbfb385bb38d7a227b6247",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 228,
"avg_line_length": 30,
"alnum_prop": 0.6845238095238095,
"repo_name": "iscoe/cajal3d-i2g",
"id": "fb4388698c9fb9304973bda5effbf344892e83a4",
"size": "3932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/segment_watershed/segmentWatershed.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "582"
},
{
"name": "C",
"bytes": "149"
},
{
"name": "C++",
"bytes": "12750"
},
{
"name": "M",
"bytes": "92"
},
{
"name": "Makefile",
"bytes": "7251"
},
{
"name": "Matlab",
"bytes": "193247"
},
{
"name": "Python",
"bytes": "193988"
},
{
"name": "Shell",
"bytes": "239"
}
],
"symlink_target": ""
} |
"""
WSGI config for eeep project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eeep.settings")
application = get_wsgi_application()
| {
"content_hash": "8dbfe02df65ff7a01b1892a88eeb5f6c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.0625,
"alnum_prop": 0.7662337662337663,
"repo_name": "lowitty/eeep",
"id": "cceb1f396fa34fd7ad9d3d87d73c2fee84a5e2a5",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eeep/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1016253"
},
{
"name": "HTML",
"bytes": "459900"
},
{
"name": "JavaScript",
"bytes": "668361"
},
{
"name": "Python",
"bytes": "48455"
}
],
"symlink_target": ""
} |
from datetime import datetime
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import httplib
from libcloud.utils.misc import reverse_dict
from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm
from libcloud.loadbalancer.base import DEFAULT_ALGORITHM
from libcloud.common.types import LibcloudError
from libcloud.common.base import JsonResponse, PollingConnection
from libcloud.loadbalancer.types import State, MemberCondition
from libcloud.common.openstack import OpenStackBaseConnection,\
OpenStackDriverMixin
from libcloud.common.rackspace import AUTH_URL_US, AUTH_URL_UK
class RackspaceResponse(JsonResponse):
def parse_body(self):
if not self.body:
return None
return super(RackspaceResponse, self).parse_body()
def success(self):
return 200 <= int(self.status) <= 299
class RackspaceHealthMonitor(object):
"""
@param type: type of load balancer. currently CONNECT (connection
monitoring), HTTP, HTTPS (connection and HTTP
monitoring) are supported.
@type type: C{str}
@param delay: minimum seconds to wait before executing the health
monitor. (Must be between 1 and 3600)
@type delay: C{int}
@param timeout: maximum seconds to wait when establishing a
connection before timing out. (Must be between 1
and 3600)
@type timeout: C{int}
@param attempts_before_deactivation: Number of monitor failures
before removing a node from
rotation. (Must be between 1
and 10)
@type attempts_before_deactivation: C{int}
"""
def __init__(self, type, delay, timeout, attempts_before_deactivation):
self.type = type
self.delay = delay
self.timeout = timeout
self.attempts_before_deactivation = attempts_before_deactivation
def __repr__(self):
return ('<RackspaceHealthMonitor: type=%s, delay=%d, timeout=%d, '
'attempts_before_deactivation=%d>' %
(self.type, self.delay, self.timeout,
self.attempts_before_deactivation))
def _to_dict(self):
return {
'type': self.type,
'delay': self.delay,
'timeout': self.timeout,
'attemptsBeforeDeactivation': self.attempts_before_deactivation
}
class RackspaceHTTPHealthMonitor(RackspaceHealthMonitor):
"""
A HTTP health monitor adds extra features to a Rackspace health monitor.
@param path: the HTTP path to monitor.
@type path: C{str}
@param body_regex: Regular expression used to evaluate the body of
the HTTP response.
@type body_regex: C{str}
@param status_regex: Regular expression used to evaluate the HTTP
status code of the response.
@type status_regex: C{str}
"""
def __init__(self, type, delay, timeout, attempts_before_deactivation,
path, body_regex, status_regex):
super(RackspaceHTTPHealthMonitor, self).__init__(
type, delay, timeout, attempts_before_deactivation)
self.path = path
self.body_regex = body_regex
self.status_regex = status_regex
def __repr__(self):
return ('<RackspaceHTTPHealthMonitor: type=%s, delay=%d, timeout=%d, '
'attempts_before_deactivation=%d, path=%s, body_regex=%s, '
'status_regex=%s>' %
(self.type, self.delay, self.timeout,
self.attempts_before_deactivation, self.path, self.body_regex,
self.status_regex))
def _to_dict(self):
super_dict = super(RackspaceHTTPHealthMonitor, self)._to_dict()
super_dict['path'] = self.path
super_dict['statusRegex'] = self.status_regex
if self.body_regex:
super_dict['bodyRegex'] = self.body_regex
return super_dict
class RackspaceConnectionThrottle(object):
"""
@param min_connections: Minimum number of connections per IP address
before applying throttling.
@type min_connections: C{int}
@param max_connections: Maximum number of of connections per IP address.
(Must be between 0 and 100000, 0 allows an
unlimited number of connections.)
@type max_connections: C{int}
@param max_connection_rate: Maximum number of connections allowed
from a single IP address within the
given rate_interval_seconds. (Must be
between 0 and 100000, 0 allows an
unlimited number of connections.)
@type max_connection_rate: C{int}
@param rate_interval_seconds: Interval at which the
max_connection_rate is enforced.
(Must be between 1 and 3600.)
@type rate_interval_seconds: C{int}
"""
def __init__(self, min_connections, max_connections,
max_connection_rate, rate_interval_seconds):
self.min_connections = min_connections
self.max_connections = max_connections
self.max_connection_rate = max_connection_rate
self.rate_interval_seconds = rate_interval_seconds
def __repr__(self):
return ('<RackspaceConnectionThrottle: min_connections=%d, '
'max_connections=%d, max_connection_rate=%d, '
'rate_interval_seconds=%d>' %
(self.min_connections, self.max_connections,
self.max_connection_rate, self.rate_interval_seconds))
def _to_dict(self):
return {
'maxConnections': self.max_connections,
'minConnections': self.min_connections,
'maxConnectionRate': self.max_connection_rate,
'rateInterval': self.rate_interval_seconds
}
class RackspaceAccessRuleType(object):
ALLOW = 0
DENY = 1
_RULE_TYPE_STRING_MAP = {
ALLOW: 'ALLOW',
DENY: 'DENY'
}
class RackspaceAccessRule(object):
"""
An access rule allows or denies traffic to a Load Balancer based on the
incoming IPs.
@param id: Unique identifier to refer to this rule by.
@type id: C{str}
@param rule_type: RackspaceAccessRuleType.ALLOW or
RackspaceAccessRuleType.DENY.
@type id: C{int}
@param address: IP address or cidr (can be IPv4 or IPv6).
@type address: C{str}
"""
def __init__(self, id=None, rule_type=None, address=None):
self.id = id
self.rule_type = rule_type
self.address = address
def _to_dict(self):
type_string =\
RackspaceAccessRuleType._RULE_TYPE_STRING_MAP[self.rule_type]
as_dict = {
'type': type_string,
'address': self.address
}
if self.id is not None:
as_dict['id'] = self.id
return as_dict
class RackspaceConnection(OpenStackBaseConnection, PollingConnection):
responseCls = RackspaceResponse
auth_url = AUTH_URL_US
poll_interval = 2
timeout = 80
def __init__(self, user_id, key, secure=True, ex_force_region='ord',
**kwargs):
super(RackspaceConnection, self).__init__(user_id, key, secure,
**kwargs)
self.api_version = 'v1.0'
self.accept_format = 'application/json'
self._ex_force_region = ex_force_region
def request(self, action, params=None, data='', headers=None,
method='GET'):
if not headers:
headers = {}
if not params:
params = {}
if method in ('POST', 'PUT'):
headers['Content-Type'] = 'application/json'
if method == 'GET':
self._add_cache_busting_to_params(params)
return super(RackspaceConnection, self).request(
action=action, params=params,
data=data, method=method, headers=headers)
def get_poll_request_kwargs(self, response, context, request_kwargs):
return {'action': request_kwargs['action'],
'method': 'GET'}
def has_completed(self, response):
state = response.object['loadBalancer']['status']
if state == 'ERROR':
raise LibcloudError("Load balancer entered an ERROR state.",
driver=self.driver)
return state == 'ACTIVE'
def get_endpoint(self):
"""
FIXME:
Dirty, dirty hack. Loadbalancers so not show up in the auth 1.1 service
catalog, so we build it from the servers url.
"""
if self._auth_version == "1.1":
ep = self.service_catalog.get_endpoint(name="cloudServers")
return self._construct_loadbalancer_endpoint_from_servers_endpoint(
ep)
elif "2.0" in self._auth_version:
ep = self.service_catalog.get_endpoint(name="cloudServers",
service_type="compute",
region=None)
return self._construct_loadbalancer_endpoint_from_servers_endpoint(
ep)
else:
raise LibcloudError(
"Auth version %s not supported" % self._auth_version)
def _construct_loadbalancer_endpoint_from_servers_endpoint(self, ep):
if 'publicURL' in ep:
loadbalancer_prefix = "%s.loadbalancers" % self._ex_force_region
return ep['publicURL'].replace("servers", loadbalancer_prefix)
else:
raise LibcloudError('Could not find specified endpoint')
class RackspaceUKConnection(RackspaceConnection):
auth_url = AUTH_URL_UK
class RackspaceLBDriver(Driver, OpenStackDriverMixin):
connectionCls = RackspaceConnection
api_name = 'rackspace_lb'
name = 'Rackspace LB'
website = 'http://www.rackspace.com/'
LB_STATE_MAP = {
'ACTIVE': State.RUNNING,
'BUILD': State.PENDING,
'ERROR': State.ERROR,
'DELETED': State.DELETED,
'PENDING_UPDATE': State.PENDING,
'PENDING_DELETE': State.PENDING
}
LB_MEMBER_CONDITION_MAP = {
'ENABLED': MemberCondition.ENABLED,
'DISABLED': MemberCondition.DISABLED,
'DRAINING': MemberCondition.DRAINING
}
CONDITION_LB_MEMBER_MAP = reverse_dict(LB_MEMBER_CONDITION_MAP)
_VALUE_TO_ALGORITHM_MAP = {
'RANDOM': Algorithm.RANDOM,
'ROUND_ROBIN': Algorithm.ROUND_ROBIN,
'LEAST_CONNECTIONS': Algorithm.LEAST_CONNECTIONS,
'WEIGHTED_ROUND_ROBIN': Algorithm.WEIGHTED_ROUND_ROBIN,
'WEIGHTED_LEAST_CONNECTIONS': Algorithm.WEIGHTED_LEAST_CONNECTIONS
}
_ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
def __init__(self, *args, **kwargs):
OpenStackDriverMixin.__init__(self, *args, **kwargs)
self._ex_force_region = kwargs.pop('ex_force_region', None)
super(RackspaceLBDriver, self).__init__(*args, **kwargs)
def _ex_connection_class_kwargs(self):
kwargs = self.openstack_connection_kwargs()
if self._ex_force_region:
kwargs['ex_force_region'] = self._ex_force_region
return kwargs
def list_protocols(self):
return self._to_protocols(
self.connection.request('/loadbalancers/protocols').object)
def ex_list_protocols_with_default_ports(self):
"""
List protocols with default ports.
@rtype: C{list} of C{tuple}
@return: A list of protocols with default ports included.
"""
return self._to_protocols_with_default_ports(
self.connection.request('/loadbalancers/protocols').object)
def list_balancers(self, ex_member_address=None):
"""
@inherits: L{Driver.list_balancers}
@param ex_member_address: Optional IP address of the attachment member.
If provided, only the load balancers which
have this member attached will be returned.
@type ex_member_address: C{str}
"""
params = {}
if ex_member_address:
params['nodeaddress'] = ex_member_address
return self._to_balancers(
self.connection.request('/loadbalancers', params=params).object)
def create_balancer(self, name, members, protocol='http',
port=80, algorithm=DEFAULT_ALGORITHM):
return self.ex_create_balancer(name, members, protocol, port,
algorithm)
def ex_create_balancer(self, name, members, protocol='http',
port=80, algorithm=DEFAULT_ALGORITHM, vip='PUBLIC'):
"""
Creates a new load balancer instance
@param name: Name of the new load balancer (required)
@type name: C{str}
@param members: C{list} ofL{Member}s to attach to balancer
@type members: C{list} of L{Member}
@param protocol: Loadbalancer protocol, defaults to http.
@type protocol: C{str}
@param port: Port the load balancer should listen on, defaults to 80
@type port: C{str}
@param algorithm: Load balancing algorithm, defaults to
LBAlgorithm.ROUND_ROBIN
@type algorithm: L{Algorithm}
@param vip: Virtual ip type of PUBLIC, SERVICENET, or ID of a virtual
ip
@type vip: C{str}
@rtype: L{LoadBalancer}
"""
balancer_attrs = self._kwargs_to_mutable_attrs(
name=name,
protocol=protocol,
port=port,
algorithm=algorithm,
vip=vip)
balancer_attrs.update({
'nodes': [self._member_attributes(member) for member in members],
})
balancer_object = {"loadBalancer": balancer_attrs}
resp = self.connection.request('/loadbalancers',
method='POST',
data=json.dumps(balancer_object))
return self._to_balancer(resp.object['loadBalancer'])
def _member_attributes(self, member):
member_attributes = {'address': member.ip,
'port': member.port}
member_attributes.update(self._kwargs_to_mutable_member_attrs(
**member.extra))
# If the condition is not specified on the member, then it should be
# set to ENABLED by default
if 'condition' not in member_attributes:
member_attributes['condition'] =\
self.CONDITION_LB_MEMBER_MAP[MemberCondition.ENABLED]
return member_attributes
def destroy_balancer(self, balancer):
uri = '/loadbalancers/%s' % (balancer.id)
resp = self.connection.request(uri, method='DELETE')
return resp.status == httplib.ACCEPTED
def ex_destroy_balancers(self, balancers):
"""
Destroys a list of Balancers (the API supports up to 10).
@param balancers: A list of Balancers to destroy.
@type balancers: C{list} of L{LoadBalancer}
@return: Returns whether the destroy request was accepted.
@rtype: C{bool}
"""
ids = [('id', balancer.id) for balancer in balancers]
resp = self.connection.request('/loadbalancers',
method='DELETE',
params=ids)
return resp.status == httplib.ACCEPTED
def get_balancer(self, balancer_id):
uri = '/loadbalancers/%s' % (balancer_id)
resp = self.connection.request(uri)
return self._to_balancer(resp.object["loadBalancer"])
def balancer_attach_member(self, balancer, member):
member_object = {"nodes": [self._member_attributes(member)]}
uri = '/loadbalancers/%s/nodes' % (balancer.id)
resp = self.connection.request(uri, method='POST',
data=json.dumps(member_object))
return self._to_members(resp.object, balancer)[0]
def ex_balancer_attach_members(self, balancer, members):
"""
Attaches a list of members to a load balancer.
@param balancer: The Balancer to which members will be attached.
@type balancer: L{LoadBalancer}
@param members: A list of Members to attach.
@type members: C{list} of L{Member}
@rtype: C{list} of L{Member}
"""
member_objects = {"nodes": [self._member_attributes(member) for member
in members]}
uri = '/loadbalancers/%s/nodes' % (balancer.id)
resp = self.connection.request(uri, method='POST',
data=json.dumps(member_objects))
return self._to_members(resp.object, balancer)
def balancer_detach_member(self, balancer, member):
# Loadbalancer always needs to have at least 1 member.
# Last member cannot be detached. You can only disable it or destroy
# the balancer.
uri = '/loadbalancers/%s/nodes/%s' % (balancer.id, member.id)
resp = self.connection.request(uri, method='DELETE')
return resp.status == httplib.ACCEPTED
def ex_balancer_detach_members(self, balancer, members):
"""
Detaches a list of members from a balancer (the API supports up to 10).
This method blocks until the detach request has been processed and the
balancer is in a RUNNING state again.
@param balancer: The Balancer to detach members from.
@type balancer: L{LoadBalancer}
@param members: A list of Members to detach.
@type members: C{list} of L{Member}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
accepted = self.ex_balancer_detach_members_no_poll(balancer, members)
if not accepted:
msg = 'Detach members request was not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_balancer_detach_members_no_poll(self, balancer, members):
"""
Detaches a list of members from a balancer (the API supports up to 10).
This method returns immediately.
@param balancer: The Balancer to detach members from.
@type balancer: L{LoadBalancer}
@param members: A list of Members to detach.
@type members: C{list} of L{Member}
@return: Returns whether the detach request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/nodes' % (balancer.id)
ids = [('id', member.id) for member in members]
resp = self.connection.request(uri, method='DELETE', params=ids)
return resp.status == httplib.ACCEPTED
def balancer_list_members(self, balancer):
uri = '/loadbalancers/%s/nodes' % (balancer.id)
data = self.connection.request(uri).object
return self._to_members(data, balancer)
def update_balancer(self, balancer, **kwargs):
attrs = self._kwargs_to_mutable_attrs(**kwargs)
resp = self.connection.async_request(
action='/loadbalancers/%s' % balancer.id,
method='PUT',
data=json.dumps(attrs))
return self._to_balancer(resp.object["loadBalancer"])
def ex_update_balancer_no_poll(self, balancer, **kwargs):
"""
Update balancer no poll.
@inherits: L{Driver.update_balancer}
"""
attrs = self._kwargs_to_mutable_attrs(**kwargs)
resp = self.connection.request(
action='/loadbalancers/%s' % balancer.id,
method='PUT',
data=json.dumps(attrs))
return resp.status == httplib.ACCEPTED
def ex_balancer_update_member(self, balancer, member, **kwargs):
"""
Updates a Member's extra attributes for a Balancer. The attributes can
include 'weight' or 'condition'. This method blocks until the update
request has been processed and the balancer is in a RUNNING state
again.
@param balancer: Balancer to update the member on.
@type balancer: L{LoadBalancer}
@param member: Member which should be used
@type member: L{Member}
@keyword **kwargs: New attributes. Should contain either 'weight'
or 'condition'. 'condition' can be set to 'ENABLED', 'DISABLED'.
or 'DRAINING'. 'weight' can be set to a positive integer between
1 and 100, with a higher weight indicating that the node will receive
more traffic (assuming the Balancer is using a weighted algorithm).
@type **kwargs: C{dict}
@return: Updated Member.
@rtype: L{Member}
"""
accepted = self.ex_balancer_update_member_no_poll(
balancer, member, **kwargs)
if not accepted:
msg = 'Update member attributes was not accepted'
raise LibcloudError(msg, driver=self)
balancer = self._get_updated_balancer(balancer)
members = balancer.extra['members']
updated_members = [m for m in members if m.id == member.id]
if not updated_members:
raise LibcloudError('Could not find updated member')
return updated_members[0]
def ex_balancer_update_member_no_poll(self, balancer, member, **kwargs):
"""
Updates a Member's extra attributes for a Balancer. The attribute can
include 'weight' or 'condition'. This method returns immediately.
@param balancer: Balancer to update the member on.
@type balancer: L{LoadBalancer}
@param member: Member which should be used
@type member: L{Member}
@keyword **kwargs: New attributes. Should contain either 'weight'
or 'condition'. 'condition' can be set to 'ENABLED', 'DISABLED'.
or 'DRAINING'. 'weight' can be set to a positive integer between
1 and 100, with a higher weight indicating that the node will receive
more traffic (assuming the Balancer is using a weighted algorithm).
@type **kwargs: C{dict}
@return: Returns whether the update request was accepted.
@rtype: C{bool}
"""
resp = self.connection.request(
action='/loadbalancers/%s/nodes/%s' % (balancer.id, member.id),
method='PUT',
data=json.dumps(self._kwargs_to_mutable_member_attrs(**kwargs))
)
return resp.status == httplib.ACCEPTED
def ex_list_algorithm_names(self):
"""
Lists algorithms supported by the API. Returned as strings because
this list may change in the future.
@rtype: C{list} of C{str}
"""
response = self.connection.request('/loadbalancers/algorithms')
return [a["name"].upper() for a in response.object["algorithms"]]
def ex_get_balancer_error_page(self, balancer):
"""
List error page configured for the specified load balancer.
@param balancer: Balancer which should be used
@type balancer: L{LoadBalancer}
@rtype: C{str}
"""
uri = '/loadbalancers/%s/errorpage' % (balancer.id)
resp = self.connection.request(uri)
return resp.object["errorpage"]["content"]
def ex_balancer_access_list(self, balancer):
"""
List the access list.
@param balancer: Balancer which should be used
@type balancer: L{LoadBalancer}
@rtype: C{list} of L{RackspaceAccessRule}
"""
uri = '/loadbalancers/%s/accesslist' % (balancer.id)
resp = self.connection.request(uri)
return [self._to_access_rule(el) for el in resp.object["accessList"]]
def _get_updated_balancer(self, balancer):
"""
Updating a balancer's attributes puts a balancer into
'PENDING_UPDATE' status. Wait until the balancer is
back in 'ACTIVE' status and then return the individual
balancer details call.
"""
resp = self.connection.async_request(
action='/loadbalancers/%s' % balancer.id,
method='GET')
return self._to_balancer(resp.object['loadBalancer'])
def ex_update_balancer_health_monitor(self, balancer, health_monitor):
"""
Sets a Balancer's health monitor. This method blocks until the update
request has been processed and the balancer is in a RUNNING state
again.
@param balancer: Balancer to update.
@type balancer: L{LoadBalancer}
@param health_monitor: Health Monitor for the balancer.
@type health_monitor: L{RackspaceHealthMonitor}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
accepted = self.ex_update_balancer_health_monitor_no_poll(
balancer, health_monitor)
if not accepted:
msg = 'Update health monitor request not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_update_balancer_health_monitor_no_poll(self, balancer,
health_monitor):
"""
Sets a Balancer's health monitor. This method returns immediately.
@param balancer: Balancer to update health monitor on.
@type balancer: L{LoadBalancer}
@param health_monitor: Health Monitor for the balancer.
@type health_monitor: L{RackspaceHealthMonitor}
@return: Returns whether the update request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/healthmonitor' % (balancer.id)
resp = self.connection.request(
uri, method='PUT', data=json.dumps(health_monitor._to_dict()))
return resp.status == httplib.ACCEPTED
def ex_disable_balancer_health_monitor(self, balancer):
"""
Disables a Balancer's health monitor. This method blocks until the
disable request has been processed and the balancer is in a RUNNING
state again.
@param balancer: Balancer to disable health monitor on.
@type balancer: L{LoadBalancer}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
if not self.ex_disable_balancer_health_monitor_no_poll(balancer):
msg = 'Disable health monitor request not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_disable_balancer_health_monitor_no_poll(self, balancer):
"""
Disables a Balancer's health monitor. This method returns
immediately.
@param balancer: Balancer to disable health monitor on.
@type balancer: L{LoadBalancer}
@return: Returns whether the disable request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/healthmonitor' % (balancer.id)
resp = self.connection.request(uri,
method='DELETE')
return resp.status == httplib.ACCEPTED
def ex_update_balancer_connection_throttle(self, balancer,
connection_throttle):
"""
Updates a Balancer's connection throttle. This method blocks until
the update request has been processed and the balancer is in a
RUNNING state again.
@param balancer: Balancer to update connection throttle on.
@type balancer: L{LoadBalancer}
@param connection_throttle: Connection Throttle for the balancer.
@type connection_throttle: L{RackspaceConnectionThrottle}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
accepted = self.ex_update_balancer_connection_throttle_no_poll(
balancer, connection_throttle)
if not accepted:
msg = 'Update connection throttle request not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_update_balancer_connection_throttle_no_poll(self, balancer,
connection_throttle):
"""
Sets a Balancer's connection throttle. This method returns
immediately.
@param balancer: Balancer to update connection throttle on.
@type balancer: L{LoadBalancer}
@param connection_throttle: Connection Throttle for the balancer.
@type connection_throttle: L{RackspaceConnectionThrottle}
@return: Returns whether the update request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/connectionthrottle' % (balancer.id)
resp = self.connection.request(
uri, method='PUT',
data=json.dumps(connection_throttle._to_dict()))
return resp.status == httplib.ACCEPTED
def ex_disable_balancer_connection_throttle(self, balancer):
"""
Disables a Balancer's connection throttle. This method blocks until
the disable request has been processed and the balancer is in a RUNNING
state again.
@param balancer: Balancer to disable connection throttle on.
@type balancer: L{LoadBalancer}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
if not self.ex_disable_balancer_connection_throttle_no_poll(balancer):
msg = 'Disable connection throttle request not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_disable_balancer_connection_throttle_no_poll(self, balancer):
"""
Disables a Balancer's connection throttle. This method returns
immediately.
@param balancer: Balancer to disable connection throttle on.
@type balancer: L{LoadBalancer}
@return: Returns whether the disable request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/connectionthrottle' % (balancer.id)
resp = self.connection.request(uri, method='DELETE')
return resp.status == httplib.ACCEPTED
def ex_enable_balancer_connection_logging(self, balancer):
"""
Enables connection logging for a Balancer. This method blocks until
the enable request has been processed and the balancer is in a RUNNING
state again.
@param balancer: Balancer to enable connection logging on.
@type balancer: L{LoadBalancer}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
if not self.ex_enable_balancer_connection_logging_no_poll(balancer):
msg = 'Enable connection logging request not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_enable_balancer_connection_logging_no_poll(self, balancer):
"""
Enables connection logging for a Balancer. This method returns
immediately.
@param balancer: Balancer to enable connection logging on.
@type balancer: L{LoadBalancer}
@return: Returns whether the enable request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/connectionlogging' % (balancer.id)
resp = self.connection.request(
uri, method='PUT',
data=json.dumps({'connectionLogging': {'enabled': True}})
)
return resp.status == httplib.ACCEPTED
def ex_disable_balancer_connection_logging(self, balancer):
"""
Disables connection logging for a Balancer. This method blocks until
the enable request has been processed and the balancer is in a RUNNING
state again.
@param balancer: Balancer to disable connection logging on.
@type balancer: L{LoadBalancer}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
if not self.ex_disable_balancer_connection_logging_no_poll(balancer):
msg = 'Disable connection logging request not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_disable_balancer_connection_logging_no_poll(self, balancer):
"""
Disables connection logging for a Balancer. This method returns
immediately.
@param balancer: Balancer to disable connection logging on.
@type balancer: L{LoadBalancer}
@return: Returns whether the disable request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/connectionlogging' % (balancer.id)
resp = self.connection.request(
uri, method='PUT',
data=json.dumps({'connectionLogging': {'enabled': False}})
)
return resp.status == httplib.ACCEPTED
def ex_enable_balancer_session_persistence(self, balancer):
"""
Enables session persistence for a Balancer by setting the persistence
type to 'HTTP_COOKIE'. This method blocks until the enable request
has been processed and the balancer is in a RUNNING state again.
@param balancer: Balancer to enable session persistence on.
@type balancer: L{LoadBalancer}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
if not self.ex_enable_balancer_session_persistence_no_poll(balancer):
msg = 'Enable session persistence request not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_enable_balancer_session_persistence_no_poll(self, balancer):
"""
Enables session persistence for a Balancer by setting the persistence
type to 'HTTP_COOKIE'. This method returns immediately.
@param balancer: Balancer to enable session persistence on.
@type balancer: L{LoadBalancer}
@return: Returns whether the enable request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/sessionpersistence' % (balancer.id)
resp = self.connection.request(
uri, method='PUT',
data=json.dumps(
{'sessionPersistence': {'persistenceType': 'HTTP_COOKIE'}})
)
return resp.status == httplib.ACCEPTED
def ex_disable_balancer_session_persistence(self, balancer):
"""
Disables session persistence for a Balancer. This method blocks until
the disable request has been processed and the balancer is in a RUNNING
state again.
@param balancer: Balancer to disable session persistence on.
@type balancer: L{LoadBalancer}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
if not self.ex_disable_balancer_session_persistence_no_poll(balancer):
msg = 'Disable session persistence request not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_disable_balancer_session_persistence_no_poll(self, balancer):
"""
Disables session persistence for a Balancer. This method returns
immediately.
@param balancer: Balancer to disable session persistence for.
@type balancer: L{LoadBalancer}
@return: Returns whether the disable request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/sessionpersistence' % (balancer.id)
resp = self.connection.request(uri, method='DELETE')
return resp.status == httplib.ACCEPTED
def ex_update_balancer_error_page(self, balancer, page_content):
"""
Updates a Balancer's custom error page. This method blocks until
the update request has been processed and the balancer is in a
RUNNING state again.
@param balancer: Balancer to update the custom error page for.
@type balancer: L{LoadBalancer}
@param page_content: HTML content for the custom error page.
@type page_content: C{str}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
accepted = self.ex_update_balancer_error_page_no_poll(balancer,
page_content)
if not accepted:
msg = 'Update error page request not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_update_balancer_error_page_no_poll(self, balancer, page_content):
"""
Updates a Balancer's custom error page. This method returns
immediately.
@param balancer: Balancer to update the custom error page for.
@type balancer: L{LoadBalancer}
@param page_content: HTML content for the custom error page.
@type page_content: C{str}
@return: Returns whether the update request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/errorpage' % (balancer.id)
resp = self.connection.request(
uri, method='PUT',
data=json.dumps({'errorpage': {'content': page_content}})
)
return resp.status == httplib.ACCEPTED
def ex_disable_balancer_custom_error_page(self, balancer):
"""
Disables a Balancer's custom error page, returning its error page to
the Rackspace-provided default. This method blocks until the disable
request has been processed and the balancer is in a RUNNING state
again.
@param balancer: Balancer to disable the custom error page for.
@type balancer: L{LoadBalancer}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
if not self.ex_disable_balancer_custom_error_page_no_poll(balancer):
msg = 'Disable custom error page request not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_disable_balancer_custom_error_page_no_poll(self, balancer):
"""
Disables a Balancer's custom error page, returning its error page to
the Rackspace-provided default. This method returns immediately.
@param balancer: Balancer to disable the custom error page for.
@type balancer: L{LoadBalancer}
@return: Returns whether the disable request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/errorpage' % (balancer.id)
resp = self.connection.request(uri, method='DELETE')
# Load Balancer API currently returns 200 OK on custom error page
# delete.
return resp.status == httplib.OK or resp.status == httplib.ACCEPTED
def ex_create_balancer_access_rule(self, balancer, rule):
"""
Adds an access rule to a Balancer's access list. This method blocks
until the update request has been processed and the balancer is in a
RUNNING state again.
@param balancer: Balancer to create the access rule for.
@type balancer: L{LoadBalancer}
@param rule: Access Rule to add to the balancer.
@type rule: L{RackspaceAccessRule}
@return: The created access rule.
@rtype: L{RackspaceAccessRule}
"""
accepted = self.ex_create_balancer_access_rule_no_poll(balancer, rule)
if not accepted:
msg = 'Create access rule not accepted'
raise LibcloudError(msg, driver=self)
balancer = self._get_updated_balancer(balancer)
access_list = balancer.extra['accessList']
created_rule = self._find_matching_rule(rule, access_list)
if not created_rule:
raise LibcloudError('Could not find created rule')
return created_rule
def ex_create_balancer_access_rule_no_poll(self, balancer, rule):
"""
Adds an access rule to a Balancer's access list. This method returns
immediately.
@param balancer: Balancer to create the access rule for.
@type balancer: L{LoadBalancer}
@param rule: Access Rule to add to the balancer.
@type rule: L{RackspaceAccessRule}
@return: Returns whether the create request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/accesslist' % (balancer.id)
resp = self.connection.request(
uri, method='POST',
data=json.dumps({'networkItem': rule._to_dict()})
)
return resp.status == httplib.ACCEPTED
def ex_create_balancer_access_rules(self, balancer, rules):
"""
Adds a list of access rules to a Balancer's access list. This method
blocks until the update request has been processed and the balancer is
in a RUNNING state again.
@param balancer: Balancer to create the access rule for.
@type balancer: L{LoadBalancer}
@param rules: List of L{RackspaceAccessRule} to add to the balancer.
@type rules: C{list} of L{RackspaceAccessRule}
@return: The created access rules.
@rtype: L{RackspaceAccessRule}
"""
accepted = self.ex_create_balancer_access_rules_no_poll(balancer,
rules)
if not accepted:
msg = 'Create access rules not accepted'
raise LibcloudError(msg, driver=self)
balancer = self._get_updated_balancer(balancer)
access_list = balancer.extra['accessList']
created_rules = []
for r in rules:
matched_rule = self._find_matching_rule(r, access_list)
if matched_rule:
created_rules.append(matched_rule)
if len(created_rules) != len(rules):
raise LibcloudError('Could not find all created rules')
return created_rules
def _find_matching_rule(self, rule_to_find, access_list):
"""
LB API does not return the ID for the newly created rules, so we have
to search the list to find the rule with a matching rule type and
address to return an object with the right identifier.it. The API
enforces rule type and address uniqueness.
"""
for r in access_list:
if rule_to_find.rule_type == r.rule_type and\
rule_to_find.address == r.address:
return r
return None
def ex_create_balancer_access_rules_no_poll(self, balancer, rules):
"""
Adds a list of access rules to a Balancer's access list. This method
returns immediately.
@param balancer: Balancer to create the access rule for.
@type balancer: L{LoadBalancer}
@param rules: List of L{RackspaceAccessRule} to add to the balancer.
@type rules: C{list} of L{RackspaceAccessRule}
@return: Returns whether the create request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/accesslist' % (balancer.id)
resp = self.connection.request(
uri, method='POST',
data=json.dumps({'accessList':
[rule._to_dict() for rule in rules]})
)
return resp.status == httplib.ACCEPTED
def ex_destroy_balancer_access_rule(self, balancer, rule):
"""
Removes an access rule from a Balancer's access list. This method
blocks until the update request has been processed and the balancer
is in a RUNNING state again.
@param balancer: Balancer to remove the access rule from.
@type balancer: L{LoadBalancer}
@param rule: Access Rule to remove from the balancer.
@type rule: L{RackspaceAccessRule}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
accepted = self.ex_destroy_balancer_access_rule_no_poll(balancer, rule)
if not accepted:
msg = 'Delete access rule not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_destroy_balancer_access_rule_no_poll(self, balancer, rule):
"""
Removes an access rule from a Balancer's access list. This method
returns immediately.
@param balancer: Balancer to remove the access rule from.
@type balancer: L{LoadBalancer}
@param rule: Access Rule to remove from the balancer.
@type rule: L{RackspaceAccessRule}
@return: Returns whether the destroy request was accepted.
@rtype: C{bool}
"""
uri = '/loadbalancers/%s/accesslist/%s' % (balancer.id, rule.id)
resp = self.connection.request(uri, method='DELETE')
return resp.status == httplib.ACCEPTED
def ex_destroy_balancer_access_rules(self, balancer, rules):
"""
Removes a list of access rules from a Balancer's access list. This
method blocks until the update request has been processed and the
balancer is in a RUNNING state again.
@param balancer: Balancer to remove the access rules from.
@type balancer: L{LoadBalancer}
@param rules: List of L{RackspaceAccessRule} objects to remove from the
balancer.
@type rules: C{list} of L{RackspaceAccessRule}
@return: Updated Balancer.
@rtype: L{LoadBalancer}
"""
accepted = self.ex_destroy_balancer_access_rules_no_poll(
balancer, rules)
if not accepted:
msg = 'Destroy access rules request not accepted'
raise LibcloudError(msg, driver=self)
return self._get_updated_balancer(balancer)
def ex_destroy_balancer_access_rules_no_poll(self, balancer, rules):
"""
Removes a list of access rules from a Balancer's access list. This
method returns immediately.
@param balancer: Balancer to remove the access rules from.
@type balancer: L{LoadBalancer}
@param rules: List of L{RackspaceAccessRule} objects to remove from the
balancer.
@type rules: C{list} of L{RackspaceAccessRule}
@return: Returns whether the destroy request was accepted.
@rtype: C{bool}
"""
ids = [('id', rule.id) for rule in rules]
uri = '/loadbalancers/%s/accesslist' % balancer.id
resp = self.connection.request(uri,
method='DELETE',
params=ids)
return resp.status == httplib.ACCEPTED
def ex_list_current_usage(self, balancer):
"""
Return current load balancer usage report.
@param balancer: Balancer to remove the access rules from.
@type balancer: L{LoadBalancer}
@return: Raw load balancer usage object.
@rtype: C{dict}
"""
uri = '/loadbalancers/%s/usage/current' % (balancer.id)
resp = self.connection.request(uri, method='GET')
return resp.object
def _to_protocols(self, object):
protocols = []
for item in object["protocols"]:
protocols.append(item['name'].lower())
return protocols
def _to_protocols_with_default_ports(self, object):
protocols = []
for item in object["protocols"]:
name = item['name'].lower()
port = int(item['port'])
protocols.append((name, port))
return protocols
def _to_balancers(self, object):
return [self._to_balancer(el) for el in object["loadBalancers"]]
def _to_balancer(self, el):
ip = None
port = None
sourceAddresses = {}
if 'port' in el:
port = el["port"]
if 'sourceAddresses' in el:
sourceAddresses = el['sourceAddresses']
extra = {
"ipv6PublicSource": sourceAddresses.get("ipv6Public"),
"ipv4PublicSource": sourceAddresses.get("ipv4Public"),
"ipv4PrivateSource": sourceAddresses.get("ipv4Servicenet"),
}
if 'virtualIps' in el:
ip = el['virtualIps'][0]['address']
extra['virtualIps'] = el['virtualIps']
if 'protocol' in el:
extra['protocol'] = el['protocol']
if 'algorithm' in el and \
el["algorithm"] in self._VALUE_TO_ALGORITHM_MAP:
extra["algorithm"] = self._value_to_algorithm(el["algorithm"])
if 'healthMonitor' in el:
health_monitor = self._to_health_monitor(el)
if health_monitor:
extra["healthMonitor"] = health_monitor
if 'connectionThrottle' in el:
extra["connectionThrottle"] = self._to_connection_throttle(el)
if 'sessionPersistence' in el:
persistence = el["sessionPersistence"]
extra["sessionPersistenceType"] =\
persistence.get("persistenceType")
if 'connectionLogging' in el:
logging = el["connectionLogging"]
extra["connectionLoggingEnabled"] = logging.get("enabled")
if 'nodes' in el:
extra['members'] = self._to_members(el)
if 'created' in el:
extra['created'] = self._iso_to_datetime(el['created']['time'])
if 'updated' in el:
extra['updated'] = self._iso_to_datetime(el['updated']['time'])
if 'accessList' in el:
extra['accessList'] = [self._to_access_rule(rule)
for rule in el['accessList']]
return LoadBalancer(id=el["id"],
name=el["name"],
state=self.LB_STATE_MAP.get(
el["status"], State.UNKNOWN),
ip=ip,
port=port,
driver=self.connection.driver,
extra=extra)
def _to_members(self, object, balancer=None):
return [self._to_member(el, balancer) for el in object["nodes"]]
def _to_member(self, el, balancer=None):
extra = {}
if 'weight' in el:
extra['weight'] = el["weight"]
if 'condition' in el and\
el['condition'] in self.LB_MEMBER_CONDITION_MAP:
extra['condition'] =\
self.LB_MEMBER_CONDITION_MAP.get(el["condition"])
if 'status' in el:
extra['status'] = el["status"]
lbmember = Member(id=el["id"],
ip=el["address"],
port=el["port"],
balancer=balancer,
extra=extra)
return lbmember
def _protocol_to_value(self, protocol):
non_standard_protocols = {'imapv2': 'IMAPv2', 'imapv3': 'IMAPv3',
'imapv4': 'IMAPv4'}
protocol_name = protocol.lower()
if protocol_name in non_standard_protocols:
protocol_value = non_standard_protocols[protocol_name]
else:
protocol_value = protocol.upper()
return protocol_value
def _kwargs_to_mutable_attrs(self, **attrs):
update_attrs = {}
if "name" in attrs:
update_attrs['name'] = attrs['name']
if "algorithm" in attrs:
algorithm_value = self._algorithm_to_value(attrs['algorithm'])
update_attrs['algorithm'] = algorithm_value
if "protocol" in attrs:
update_attrs['protocol'] =\
self._protocol_to_value(attrs['protocol'])
if "port" in attrs:
update_attrs['port'] = int(attrs['port'])
if "vip" in attrs:
if attrs['vip'] == 'PUBLIC' or attrs['vip'] == 'SERVICENET':
update_attrs['virtualIps'] = [{'type': attrs['vip']}]
else:
update_attrs['virtualIps'] = [{'id': attrs['vip']}]
return update_attrs
def _kwargs_to_mutable_member_attrs(self, **attrs):
update_attrs = {}
if 'condition' in attrs:
update_attrs['condition'] =\
self.CONDITION_LB_MEMBER_MAP.get(attrs['condition'])
if 'weight' in attrs:
update_attrs['weight'] = attrs['weight']
return update_attrs
def _to_health_monitor(self, el):
health_monitor_data = el["healthMonitor"]
type = health_monitor_data.get("type")
delay = health_monitor_data.get("delay")
timeout = health_monitor_data.get("timeout")
attempts_before_deactivation =\
health_monitor_data.get("attemptsBeforeDeactivation")
if type == "CONNECT":
return RackspaceHealthMonitor(
type=type, delay=delay, timeout=timeout,
attempts_before_deactivation=attempts_before_deactivation)
if type == "HTTP" or type == "HTTPS":
return RackspaceHTTPHealthMonitor(
type=type, delay=delay, timeout=timeout,
attempts_before_deactivation=attempts_before_deactivation,
path=health_monitor_data.get("path"),
status_regex=health_monitor_data.get("statusRegex"),
body_regex=health_monitor_data.get("bodyRegex", ''))
return None
def _to_connection_throttle(self, el):
connection_throttle_data = el["connectionThrottle"]
min_connections = connection_throttle_data.get("minConnections")
max_connections = connection_throttle_data.get("maxConnections")
max_connection_rate = connection_throttle_data.get("maxConnectionRate")
rate_interval = connection_throttle_data.get("rateInterval")
return RackspaceConnectionThrottle(
min_connections=min_connections,
max_connections=max_connections,
max_connection_rate=max_connection_rate,
rate_interval_seconds=rate_interval)
def _to_access_rule(self, el):
return RackspaceAccessRule(
id=el.get("id"),
rule_type=self._to_access_rule_type(el.get("type")),
address=el.get("address"))
def _to_access_rule_type(self, type):
if type == "ALLOW":
return RackspaceAccessRuleType.ALLOW
elif type == "DENY":
return RackspaceAccessRuleType.DENY
def _iso_to_datetime(self, isodate):
date_formats = ('%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S%z')
date = None
for date_format in date_formats:
try:
date = datetime.strptime(isodate, date_format)
except ValueError:
pass
if date:
break
return date
class RackspaceUKLBDriver(RackspaceLBDriver):
connectionCls = RackspaceUKConnection
| {
"content_hash": "851a607a6ddc6b85849c464878f17e86",
"timestamp": "",
"source": "github",
"line_count": 1523,
"max_line_length": 79,
"avg_line_length": 36.181221273801704,
"alnum_prop": 0.6027874564459931,
"repo_name": "Jc2k/libcloud",
"id": "1aa7fd51563cb8c3c76d7920c8a59778bd632abd",
"size": "55886",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "libcloud/loadbalancer/drivers/rackspace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2274647"
},
{
"name": "Shell",
"bytes": "13009"
}
],
"symlink_target": ""
} |
"""
Module with NMF algorithm in concentric annuli for ADI/RDI.
"""
__author__ = 'Valentin Christiaens'
__all__ = ['nmf_annular']
import numpy as np
from multiprocessing import cpu_count
from sklearn.decomposition import NMF
from ..preproc import cube_derotate, cube_collapse, check_pa_vector
from ..preproc.derotation import _find_indices_adi, _define_annuli
from ..var import get_annulus_segments, matrix_scaling
from ..config import timing, time_ini
from ..config.utils_conf import pool_map, iterable
def nmf_annular(cube, angle_list, cube_ref=None, radius_int=0, fwhm=4, asize=4,
n_segments=1, delta_rot=(0.1, 1), ncomp=1, init_svd='nndsvd',
nproc=1, min_frames_lib=2, max_frames_lib=200, scaling=None,
imlib='vip-fft', interpolation='lanczos4', collapse='median',
full_output=False, verbose=True, theta_init=0, weights=None,
cube_sig=None, handle_neg='mask', max_iter=1000,
random_state=None, nmf_args={}, **rot_options):
""" Non Negative Matrix Factorization in concentric annuli, for ADI/RDI
sequences. Alternative to the annular ADI-PCA processing that does not rely
on SVD or ED for obtaining a low-rank approximation of the datacube.
This function embeds the scikit-learn NMF algorithm solved through either
the coordinate descent or the multiplicative update method.
Parameters
----------
cube : numpy ndarray, 3d
Input cube.
angle_list : numpy ndarray, 1d
Corresponding parallactic angle for each frame.
cube_ref : numpy ndarray, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
radius_int : int, optional
The radius of the innermost annulus. By default is 0, if >0 then the
central circular region is discarded.
fwhm : float, optional
Size of the FHWM in pixels. Default is 4.
asize : float, optional
The size of the annuli, in pixels.
n_segments : int or list of ints or 'auto', optional
The number of segments for each annulus. When a single integer is given
it is used for all annuli. When set to 'auto', the number of segments is
automatically determined for every annulus, based on the annulus width.
delta_rot : float or tuple of floats, optional
Factor for adjusting the parallactic angle threshold, expressed in
FWHM. Default is 1 (excludes 1 FHWM on each side of the considered
frame). If a tuple of two floats is provided, they are used as the lower
and upper intervals for the threshold (grows linearly as a function of
the separation). !!! Important: this is used even if a reference cube
is provided for RDI. This is to allow ARDI (PCA library built from both
science and reference cubes). If you want to do pure RDI, set delta_rot
to an arbitrarily high value such that the condition is never fulfilled
for science frames to make it in the PCA library.
ncomp : int, optional
How many components are used as for low-rank approximation of the
datacube.
scaling : {None, 'temp-mean', 'spat-mean', 'temp-standard', 'spat-standard'}
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done, with
"spat-mean" then the spatial mean is subtracted, with "temp-standard"
temporal mean centering plus scaling to unit variance is done and with
"spat-standard" spatial mean centering plus scaling to unit variance is
performed.
max_iter : int optional
The number of iterations for the coordinate descent solver.
random_state : int or None, optional
Controls the seed for the Pseudo Random Number generator.
mask_center_px : None or int
If None, no masking is done. If an integer > 1 then this value is the
radius of the circular mask.
source_xy : tuple of int, optional
For ADI-PCA, this triggers a frame rejection in the PCA library, with
``source_xy`` as the coordinates X,Y of the center of the annulus where
the PA criterion is estimated. When ``ncomp`` is a tuple, a PCA grid is
computed and the S/Ns (mean value in a 1xFWHM circular aperture) of the
given (X,Y) coordinates are computed.
delta_rot : int, optional
Factor for tunning the parallactic angle threshold, expressed in FWHM.
Default is 1 (excludes 1xFHWM on each side of the considered frame).
init_svd: str, optional {'nnsvd','nnsvda','random'}
Method used to initialize the iterative procedure to find H and W.
'nndsvd': non-negative double SVD recommended for sparseness
'nndsvda': NNDSVD where zeros are filled with the average of cube;
recommended when sparsity is not desired
'random': random initial non-negative matrix
collapse : {'median', 'mean', 'sum', 'trimmean'}, str optional
Sets the way of collapsing the frames for producing a final image.
full_output: boolean, optional
Whether to return the final median combined image only or with other
intermediate arrays.
verbose : {True, False}, bool optional
If True prints intermediate info and timing.
nmf_args: dictionary, optional
Additional arguments for scikit-learn NMF algorithm. See:
https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.NMF.html
rot_options: dictionary, optional
Dictionary with optional keyword values for "imlib", "interpolation,
"border_mode", "mask_val", "edge_blend", "interp_zeros", "ker" (see
documentation of ``vip_hci.preproc.frame_rotate``)
Returns
-------
If full_output is False the final frame is returned. If True the algorithm
returns the reshaped NMF components, the reconstructed cube, the residuals,
the residuals derotated and the final frame.
"""
if verbose:
global start_time
start_time = time_ini()
array = cube
if array.ndim != 3:
raise TypeError('Input array is not a cube or 3d array')
if array.shape[0] != angle_list.shape[0]:
raise TypeError('Input vector or parallactic angles has wrong length')
n, y, _ = array.shape
angle_list = check_pa_vector(angle_list)
n_annuli = int((y / 2 - radius_int) / asize)
if isinstance(delta_rot, tuple):
delta_rot = np.linspace(delta_rot[0], delta_rot[1], num=n_annuli)
elif isinstance(delta_rot, (int, float)):
delta_rot = [delta_rot] * n_annuli
if isinstance(n_segments, int):
n_segments = [n_segments for _ in range(n_annuli)]
elif n_segments == 'auto':
n_segments = list()
n_segments.append(2) # for first annulus
n_segments.append(3) # for second annulus
ld = 2 * np.tan(360 / 4 / 2) * asize
for i in range(2, n_annuli): # rest of annuli
radius = i * asize
ang = np.rad2deg(2 * np.arctan(ld / (2 * radius)))
n_segments.append(int(np.ceil(360 / ang)))
if verbose:
msg = 'N annuli = {}, FWHM = {:.3f}'
print(msg.format(n_annuli, fwhm))
print('NMF per annulus (or annular sectors):')
if nproc is None: # Hyper-threading "duplicates" the cores -> cpu_count/2
nproc = cpu_count() // 2
# how to handle negative values
if handle_neg == 'null':
array[np.where(array < 0)] = 0
elif handle_neg == 'subtr_min':
array -= np.amin(array)
elif not handle_neg == 'mask':
raise ValueError("Mode to handle neg. pixels not recognized")
# The annuli are built, and the corresponding PA thresholds for frame
# rejection are calculated (at the center of the annulus)
cube_out = np.zeros_like(array)
cube_recon = np.zeros_like(array)
H_comps = np.zeros([ncomp, array.shape[1], array.shape[2]])
if cube_ref is None:
strict = False
else:
strict = True
for ann in range(n_annuli):
if isinstance(ncomp, tuple) or isinstance(ncomp, np.ndarray):
if len(ncomp) == n_annuli:
ncompann = ncomp[ann]
else:
raise TypeError('If `ncomp` is a tuple, it must match the '
'number of annuli')
else:
ncompann = ncomp
n_segments_ann = n_segments[ann]
res_ann_par = _define_annuli(angle_list, ann, n_annuli, fwhm,
radius_int, asize, delta_rot[ann],
n_segments_ann, verbose, strict)
pa_thr, inner_radius, ann_center = res_ann_par
indices = get_annulus_segments(array[0], inner_radius, asize,
n_segments_ann, theta_init)
# Library matrix is created for each segment and scaled if needed
for j in range(n_segments_ann):
yy = indices[j][0]
xx = indices[j][1]
if handle_neg == 'mask':
npts = range(len(yy))
if cube_sig is not None:
yp = [yy[i] for i in npts if np.amin(
array[:, yy[i], xx[i]]-np.abs(cube_sig[:, yy[i], xx[i]])) > 0]
xp = [xx[i] for i in npts if np.amin(
array[:, yy[i], xx[i]]-np.abs(cube_sig[:, yy[i], xx[i]])) > 0]
else:
yp = [yy[i]
for i in npts if np.amin(array[:, yy[i], xx[i]]) > 0]
xp = [xx[i]
for i in npts if np.amin(array[:, yy[i], xx[i]]) > 0]
yy = tuple(yp)
xx = tuple(xp)
matrix_segm = array[:, yy, xx] # shape [nframes x npx_segment]
matrix_segm = matrix_scaling(matrix_segm, scaling)
if cube_ref is not None:
matrix_segm_ref = cube_ref[:, yy, xx]
matrix_segm_ref = matrix_scaling(matrix_segm_ref, scaling)
else:
matrix_segm_ref = None
if cube_sig is not None:
matrix_sig_segm = cube_sig[:, yy, xx]
else:
matrix_sig_segm = None
res = pool_map(nproc, do_nmf_patch, matrix_segm, iterable(range(n)),
angle_list, fwhm, pa_thr, ann_center, ncompann,
max_iter, random_state, init_svd, min_frames_lib,
max_frames_lib, matrix_segm_ref, matrix_sig_segm,
handle_neg)
res = np.array(res, dtype=object)
residuals = np.array(res[:, 0])
# ncomps = res[:, 1]
# nfrslib = res[:, 2]
recon = np.array(res[:, 1])
H = np.array(res[:, 2])
for fr in range(n):
cube_out[fr][yy, xx] = residuals[fr]
cube_recon[fr][yy, xx] = recon[fr]
for pp in range(ncomp):
H_comps[pp][yy, xx] = H[0][pp] # just save H inferred for fr=0
if verbose == 2:
timing(start_time)
# Cube is derotated according to the parallactic angle and collapsed
cube_der = cube_derotate(cube_out, angle_list, nproc=nproc, **rot_options)
frame = cube_collapse(cube_der, mode=collapse, w=weights)
if verbose:
print('Done derotating and combining.')
timing(start_time)
if full_output:
return cube_out, cube_der, cube_recon, H_comps, frame
else:
return frame
def do_nmf_patch(matrix, frame, angle_list, fwhm, pa_threshold, ann_center,
ncomp, max_iter, random_state, init_svd, min_frames_lib,
max_frames_lib, matrix_ref, matrix_sig_segm, handle_neg,
**kwargs):
""" Solves the NMF for each frame patch (small matrix). For each frame we
find the frames to be rejected depending on the amount of rotation. The
library is also truncated on the other end (frames too far or which have
rotated more) which are more decorrelated to keep the computational cost
lower. This truncation is done on the annuli after 10*FWHM and the goal is
to keep min(num_frames/2, 200) in the library.
"""
# Note: blocks below allow the possibility of ARDI
if pa_threshold != 0:
indices_left = _find_indices_adi(angle_list, frame, pa_threshold,
truncate=True,
max_frames=max_frames_lib)
msg = 'Too few frames left in the PCA library. '
msg += 'Accepted indices length ({:.0f}) less than {:.0f}. '
msg += 'Try decreasing either delta_rot or min_frames_lib.'
try:
if matrix_sig_segm is not None:
data_ref = matrix[indices_left]-matrix_sig_segm[indices_left]
else:
data_ref = matrix[indices_left]
except IndexError:
if matrix_ref is None:
raise RuntimeError(msg.format(0, min_frames_lib))
data_ref = None
if data_ref.shape[0] < min_frames_lib and matrix_ref is None:
raise RuntimeError(msg.format(data_ref.shape[0], min_frames_lib))
elif pa_threshold == 0:
if matrix_sig_segm is not None:
data_ref = matrix-matrix_sig_segm
else:
data_ref = matrix
if matrix_ref is not None:
if data_ref is not None:
data_ref = np.vstack((matrix_ref, data_ref))
else:
data_ref = matrix_ref
# to avoid bug, just consider positive values
if np.median(data_ref) < 0:
raise ValueError("Mostly negative values in the cube")
else:
# how to handle negative values
if handle_neg == 'null':
data_ref[np.where(data_ref < 0)] = 0
elif handle_neg == 'subtr_min':
data_ref -= np.amin(data_ref)
else: # 'mask'
zp = np.nonzero(np.amin(data_ref, axis=0) > 0)
solver = 'mu'
# if init_svd == 'nndsvda':
# solver = 'mu'
# else:
# solver = 'cd'
mod = NMF(n_components=ncomp, solver=solver, init=init_svd,
max_iter=max_iter, random_state=random_state, **kwargs)
curr_frame = matrix[frame] # current frame
if matrix_sig_segm is not None:
curr_frame_emp = matrix[frame]-matrix_sig_segm[frame]
else:
curr_frame_emp = curr_frame.copy()
# how to handle negative values
if handle_neg == 'null':
curr_frame_emp[np.where(curr_frame_emp < 0)] = 0
elif handle_neg == 'subtr_min':
curr_frame_emp -= np.amin(curr_frame_emp)
else: # 'mask'
zzp = np.nonzero(curr_frame_emp > 0)
pos_p = np.intersect1d(zp[0], zzp[0])
curr_frame_emp = curr_frame_emp[pos_p]
data_ref = data_ref[:, pos_p]
H = mod.fit(data_ref).components_
W = mod.transform(curr_frame_emp[np.newaxis, ...])
reconstructed = np.dot(W, H)
# if masked neg values, reshape
if handle_neg == 'mask': # 'mask'
recon = np.zeros(matrix.shape[1])
recon[pos_p] = reconstructed
reconstructed = recon.copy()
H_tmp = np.zeros([ncomp, matrix.shape[1]])
for pp in range(ncomp):
H_tmp[pp, pos_p] = H[pp]
H = H_tmp.copy()
residuals = curr_frame - reconstructed
return residuals, reconstructed, H
# return residuals, V.shape[0], data_ref.shape[0]
| {
"content_hash": "2f38bae3d675dc113971fa42ca8bc105",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 88,
"avg_line_length": 44.973837209302324,
"alnum_prop": 0.5997026695106974,
"repo_name": "vortex-exoplanet/VIP",
"id": "530f9785fe31fe4de1944251d4dab12458b04453",
"size": "15495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vip_hci/psfsub/nmf_local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1552"
},
{
"name": "Python",
"bytes": "1518173"
},
{
"name": "TeX",
"bytes": "62923"
}
],
"symlink_target": ""
} |
from pprint import pprint
from libcloud.autoscale.providers import get_driver as as_get_driver
from libcloud.autoscale.types import Provider as as_provider
ACCESS_ID = 'your access id'
SECRET_KEY = 'your secret key'
# Initialize the drivers
as_driver = as_get_driver(as_provider.AWS_AUTOSCALE)(ACCESS_ID, SECRET_KEY)
groups = as_driver.list_auto_scale_groups()
pprint(groups)
| {
"content_hash": "4e38f03165fadc8fd0cdcf9a6c898ce4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 29.23076923076923,
"alnum_prop": 0.781578947368421,
"repo_name": "Cloud-Elasticity-Services/as-libcloud",
"id": "2135595cf7ed9ed1a6986ab1755cd32edc3cfe03",
"size": "380",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "docs/examples/autoscale/aws/list_auto_scale_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "Python",
"bytes": "3871921"
},
{
"name": "Shell",
"bytes": "13868"
}
],
"symlink_target": ""
} |
"""
Created on Jan 21, 2020
@author: alfoa, wangc
Least Angle Regression model
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
from numpy import finfo
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from ....SupervisedLearning.ScikitLearn import ScikitLearnBase
from ....utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class Lars(ScikitLearnBase):
"""
Least Angle Regression model
"""
info = {'problemtype':'regression', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.linear_model
self.model = sklearn.linear_model.Lars
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(Lars, cls).getInputSpecification()
specs.description = r"""The \xmlNode{Lars} (\textit{Least Angle Regression model})
is a regression algorithm for high-dimensional data.
The LARS algorithm provides a means of producing an estimate of which variables
to include, as well as their coefficients, when a response variable is
determined by a linear combination of a subset of potential covariates.
\zNormalizationNotPerformed{Lars}
"""
specs.addSub(InputData.parameterInputFactory("eps", contentType=InputTypes.FloatType,
descr=r"""The machine-precision regularization in the computation of the Cholesky
diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol
parameter in some iterative optimization-based algorithms, this parameter does not
control the tolerance of the optimization.""", default=finfo(float).eps))
specs.addSub(InputData.parameterInputFactory("fit_intercept", contentType=InputTypes.BoolType,
descr=r"""Whether the intercept should be estimated or not. If False,
the data is assumed to be already centered.""", default=True))
specs.addSub(InputData.parameterInputFactory("precompute", contentType=InputTypes.StringType,
descr=r"""Whether to use a precomputed Gram matrix to speed up calculations.
For sparse input this option is always True to preserve sparsity.""", default='auto'))
specs.addSub(InputData.parameterInputFactory("normalize", contentType=InputTypes.BoolType,
descr=r"""This parameter is ignored when fit_intercept is set to False. If True,
the regressors X will be normalized before regression by subtracting the mean and
dividing by the l2-norm.""", default=True))
specs.addSub(InputData.parameterInputFactory("n_nonzero_coefs", contentType=InputTypes.IntegerType,
descr=r"""Target number of non-zero coefficients.""", default=500))
# new in sklearn version 0.23
# specs.addSub(InputData.parameterInputFactory("jitter", contentType=InputTypes.FloatType,
# descr=r"""Upper bound on a uniform noise parameter to be added to the
# y values, to satisfy the model’s assumption of one-at-a-time computations.
# Might help with stability.""", default=None))
specs.addSub(InputData.parameterInputFactory("verbose", contentType=InputTypes.BoolType,
descr=r"""Sets the verbosity amount.""", default=False))
specs.addSub(InputData.parameterInputFactory("fit_path", contentType=InputTypes.BoolType,
descr=r"""If True the full path is stored in the coef_path_ attribute.
If you compute the solution for a large problem or many targets,
setting fit_path to False will lead to a speedup, especially with a
small alpha.""", default=True))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['eps','precompute', 'fit_intercept',
'normalize','n_nonzero_coefs', 'verbose',
'fit_path'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
| {
"content_hash": "80a57af7364e4ad3c41fabc8cd8759f8",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 135,
"avg_line_length": 61.7319587628866,
"alnum_prop": 0.5362391449565799,
"repo_name": "idaholab/raven",
"id": "47e0df48ba702b4a8c8d37c4a553a2549cc2e419",
"size": "6579",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "ravenframework/SupervisedLearning/ScikitLearn/LinearModel/Lars.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1556316"
},
{
"name": "Batchfile",
"bytes": "1095"
},
{
"name": "C",
"bytes": "148504"
},
{
"name": "C++",
"bytes": "48279546"
},
{
"name": "CMake",
"bytes": "9998"
},
{
"name": "Jupyter Notebook",
"bytes": "84202"
},
{
"name": "MATLAB",
"bytes": "202335"
},
{
"name": "Makefile",
"bytes": "2399"
},
{
"name": "Perl",
"bytes": "1297"
},
{
"name": "Python",
"bytes": "7004752"
},
{
"name": "R",
"bytes": "67"
},
{
"name": "SWIG",
"bytes": "8622"
},
{
"name": "Shell",
"bytes": "124289"
},
{
"name": "TeX",
"bytes": "479725"
}
],
"symlink_target": ""
} |
from traceback import print_exc
from pprint import pformat
import cStringIO
from flask import request
from werkzeug import Response
def handle_traceback(exc):
out = cStringIO.StringIO()
print_exc(file=out)
formatted_environ = pformat(request.environ)
response = Response(
'%s\n%s\n' % (out.getvalue(), formatted_environ),
status=500
)
return response
| {
"content_hash": "4acff4d111de95d1790bd617595beccd",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 57,
"avg_line_length": 23.176470588235293,
"alnum_prop": 0.700507614213198,
"repo_name": "dreamhost/akanda-appliance",
"id": "cb272b6b4be0d6e96eae9b3d31d705fc8c570e8f",
"size": "1002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "akanda/router/debug.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "25"
},
{
"name": "Python",
"bytes": "214760"
},
{
"name": "Shell",
"bytes": "14294"
}
],
"symlink_target": ""
} |
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '@1-e&noh8w5@=lzhi^9szjd&_a^ai6y@#n)bx-=&($ldg8q7gm'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_gears',
'ytlist',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ytlist.urls'
WSGI_APPLICATION = 'ytlist.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-za'
TIME_ZONE = 'Africa/Johannesburg'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
GEARS_COMPILERS = {
'.less': 'gears_less.LESSCompiler',
}
| {
"content_hash": "a929d4b2ca505ae240158d543c66ed48",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 65,
"avg_line_length": 22.87272727272727,
"alnum_prop": 0.6868044515103339,
"repo_name": "fluffels/ytlist",
"id": "4f2b18df34f3aea40415e6e2c38ce624c5460f80",
"size": "1258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ytlist/ytlist/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "953"
},
{
"name": "JavaScript",
"bytes": "2910"
},
{
"name": "Python",
"bytes": "8353"
}
],
"symlink_target": ""
} |
import pytest
def setup_module(mod):
mod.nose = pytest.importorskip("nose")
def test_nose_setup(testdir):
p = testdir.makepyfile(
"""
values = []
from nose.tools import with_setup
@with_setup(lambda: values.append(1), lambda: values.append(2))
def test_hello():
assert values == [1]
def test_world():
assert values == [1,2]
test_hello.setup = lambda: values.append(1)
test_hello.teardown = lambda: values.append(2)
"""
)
result = testdir.runpytest(p, "-p", "nose")
result.assert_outcomes(passed=2)
def test_setup_func_with_setup_decorator():
from _pytest.nose import call_optional
values = []
class A:
@pytest.fixture(autouse=True)
def f(self):
values.append(1)
call_optional(A(), "f")
assert not values
def test_setup_func_not_callable():
from _pytest.nose import call_optional
class A:
f = 1
call_optional(A(), "f")
def test_nose_setup_func(testdir):
p = testdir.makepyfile(
"""
from nose.tools import with_setup
values = []
def my_setup():
a = 1
values.append(a)
def my_teardown():
b = 2
values.append(b)
@with_setup(my_setup, my_teardown)
def test_hello():
print(values)
assert values == [1]
def test_world():
print(values)
assert values == [1,2]
"""
)
result = testdir.runpytest(p, "-p", "nose")
result.assert_outcomes(passed=2)
def test_nose_setup_func_failure(testdir):
p = testdir.makepyfile(
"""
from nose.tools import with_setup
values = []
my_setup = lambda x: 1
my_teardown = lambda x: 2
@with_setup(my_setup, my_teardown)
def test_hello():
print(values)
assert values == [1]
def test_world():
print(values)
assert values == [1,2]
"""
)
result = testdir.runpytest(p, "-p", "nose")
result.stdout.fnmatch_lines(["*TypeError: <lambda>()*"])
def test_nose_setup_func_failure_2(testdir):
testdir.makepyfile(
"""
values = []
my_setup = 1
my_teardown = 2
def test_hello():
assert values == []
test_hello.setup = my_setup
test_hello.teardown = my_teardown
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_nose_setup_partial(testdir):
pytest.importorskip("functools")
p = testdir.makepyfile(
"""
from functools import partial
values = []
def my_setup(x):
a = x
values.append(a)
def my_teardown(x):
b = x
values.append(b)
my_setup_partial = partial(my_setup, 1)
my_teardown_partial = partial(my_teardown, 2)
def test_hello():
print(values)
assert values == [1]
def test_world():
print(values)
assert values == [1,2]
test_hello.setup = my_setup_partial
test_hello.teardown = my_teardown_partial
"""
)
result = testdir.runpytest(p, "-p", "nose")
result.stdout.fnmatch_lines(["*2 passed*"])
def test_module_level_setup(testdir):
testdir.makepyfile(
"""
from nose.tools import with_setup
items = {}
def setup():
items[1]=1
def teardown():
del items[1]
def setup2():
items[2] = 2
def teardown2():
del items[2]
def test_setup_module_setup():
assert items[1] == 1
@with_setup(setup2, teardown2)
def test_local_setup():
assert items[2] == 2
assert 1 not in items
"""
)
result = testdir.runpytest("-p", "nose")
result.stdout.fnmatch_lines(["*2 passed*"])
def test_nose_style_setup_teardown(testdir):
testdir.makepyfile(
"""
values = []
def setup_module():
values.append(1)
def teardown_module():
del values[0]
def test_hello():
assert values == [1]
def test_world():
assert values == [1]
"""
)
result = testdir.runpytest("-p", "nose")
result.stdout.fnmatch_lines(["*2 passed*"])
def test_nose_setup_ordering(testdir):
testdir.makepyfile(
"""
def setup_module(mod):
mod.visited = True
class TestClass(object):
def setup(self):
assert visited
def test_first(self):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_apiwrapper_problem_issue260(testdir):
# this would end up trying a call an optional teardown on the class
# for plain unittests we dont want nose behaviour
testdir.makepyfile(
"""
import unittest
class TestCase(unittest.TestCase):
def setup(self):
#should not be called in unittest testcases
assert 0, 'setup'
def teardown(self):
#should not be called in unittest testcases
assert 0, 'teardown'
def setUp(self):
print('setup')
def tearDown(self):
print('teardown')
def test_fun(self):
pass
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def test_setup_teardown_linking_issue265(testdir):
# we accidentally didn't integrate nose setupstate with normal setupstate
# this test ensures that won't happen again
testdir.makepyfile(
'''
import pytest
class TestGeneric(object):
def test_nothing(self):
"""Tests the API of the implementation (for generic and specialized)."""
@pytest.mark.skipif("True", reason=
"Skip tests to check if teardown is skipped as well.")
class TestSkipTeardown(TestGeneric):
def setup(self):
"""Sets up my specialized implementation for $COOL_PLATFORM."""
raise Exception("should not call setup for skipped tests")
def teardown(self):
"""Undoes the setup."""
raise Exception("should not call teardown for skipped tests")
'''
)
reprec = testdir.runpytest()
reprec.assert_outcomes(passed=1, skipped=1)
def test_SkipTest_during_collection(testdir):
p = testdir.makepyfile(
"""
import nose
raise nose.SkipTest("during collection")
def test_failing():
assert False
"""
)
result = testdir.runpytest(p)
result.assert_outcomes(skipped=1)
def test_SkipTest_in_test(testdir):
testdir.makepyfile(
"""
import nose
def test_skipping():
raise nose.SkipTest("in test")
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=1)
def test_istest_function_decorator(testdir):
p = testdir.makepyfile(
"""
import nose.tools
@nose.tools.istest
def not_test_prefix():
pass
"""
)
result = testdir.runpytest(p)
result.assert_outcomes(passed=1)
def test_nottest_function_decorator(testdir):
testdir.makepyfile(
"""
import nose.tools
@nose.tools.nottest
def test_prefix():
pass
"""
)
reprec = testdir.inline_run()
assert not reprec.getfailedcollections()
calls = reprec.getreports("pytest_runtest_logreport")
assert not calls
def test_istest_class_decorator(testdir):
p = testdir.makepyfile(
"""
import nose.tools
@nose.tools.istest
class NotTestPrefix(object):
def test_method(self):
pass
"""
)
result = testdir.runpytest(p)
result.assert_outcomes(passed=1)
def test_nottest_class_decorator(testdir):
testdir.makepyfile(
"""
import nose.tools
@nose.tools.nottest
class TestPrefix(object):
def test_method(self):
pass
"""
)
reprec = testdir.inline_run()
assert not reprec.getfailedcollections()
calls = reprec.getreports("pytest_runtest_logreport")
assert not calls
def test_skip_test_with_unicode(testdir):
testdir.makepyfile(
"""\
import unittest
class TestClass():
def test_io(self):
raise unittest.SkipTest('😊')
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 skipped *"])
| {
"content_hash": "187f526d5a98b972ee1333f582f45e29",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 88,
"avg_line_length": 23.477453580901855,
"alnum_prop": 0.5445712348887132,
"repo_name": "tomviner/pytest",
"id": "16d8d1fc0e088e1131c49c0d31547ef8405abf92",
"size": "8854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_nose.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "1945670"
},
{
"name": "Shell",
"bytes": "374"
}
],
"symlink_target": ""
} |
class BeansException(Exception):
code = ''
message = ''
def __init__(self, error):
if 'code' in error:
self.code = error['code']
if 'message' in error:
self.message = error['message']
Exception.__init__(self, self.message) | {
"content_hash": "9e5688afe5963ada4da3329c049aaa3d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 46,
"avg_line_length": 23.75,
"alnum_prop": 0.5333333333333333,
"repo_name": "trybeans/Beans-API-Python",
"id": "2f0bb9cf083c7bda30415fa85187026a235fe93d",
"size": "285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beans/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4351"
}
],
"symlink_target": ""
} |
import os
import sys
import pandas as pd
from glob import glob
import subprocess
def main():
if len(sys.argv) != 3:
print("Usage: extract_logs_from_nextflow.py <nextflow_trace_file> <task_tag>:<stderr|stdout|both>",file=sys.stderr)
# Load Nextflow trace
nextflow_trace = pd.read_csv(sys.argv[1],sep="\t")
#Get task you are interested in and whether you want stderr/stdout/both
# Format <task>:<stderr|stdout|both>
task_id,output_type = sys.argv[2].split(":")
#Create dirs
out_dir="nextflow_logs/{}".format(task_id)
os.makedirs(out_dir)
# Subset tasks of interest
my_tasks = list(nextflow_trace[ (nextflow_trace.process == task_id) ][["hash","tag","status"]].itertuples(index=False,name=None))
if len(my_tasks) == 0:
print("No tasks were found",file=sys.stderr)
# Iterate through tasks
for t_hash,t_tag,t_status in my_tasks:
task_dir= get_hash_directory(t_hash)
if not task_dir:
print("Error: work/{}* directory was not found".format(t_hash))
continue
print("{}: {}".format(t_tag,task_dir))
out_prefix="{}_{}_{}".format(t_tag,t_status[0],t_hash.replace("/","_"))
if output_type != "stderr":
copy_file_into_dir("{}/.command.out".format(task_dir),out_dir,prefix=out_prefix)
if output_type != "stdout":
copy_file_into_dir("{}/.command.err".format(task_dir),out_dir,prefix=out_prefix)
# Helping functions
def get_hash_directory( h ):
my_task_dir = None
matching_dirs = glob("work/{}*".format(h))
if len(matching_dirs) == 1:
my_task_dir = matching_dirs[0]
return my_task_dir
def copy_file_into_dir(my_file,my_dir,prefix=""):
print("\t{}".format(my_file))
subprocess.check_call(["cp",my_file,"{}/{}.{}".format(my_dir,prefix,my_file[-3:])])
if __name__ == '__main__':
main()
| {
"content_hash": "273c63a1a65db840be72e1fcec01202a",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 133,
"avg_line_length": 34.36363636363637,
"alnum_prop": 0.6126984126984127,
"repo_name": "maubarsom/biotico-tools",
"id": "5923ee6d9a18e47688fc8ccfb2e8e576c26d1c03",
"size": "1913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/extract_logs_from_nextflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "317"
},
{
"name": "HTML",
"bytes": "3250417"
},
{
"name": "Java",
"bytes": "5267"
},
{
"name": "Jupyter Notebook",
"bytes": "914252"
},
{
"name": "Makefile",
"bytes": "32185"
},
{
"name": "Python",
"bytes": "112838"
},
{
"name": "R",
"bytes": "3701"
},
{
"name": "Shell",
"bytes": "7169"
}
],
"symlink_target": ""
} |
import Evtx.Evtx as evtx
import Evtx.Views as e_views
def main():
import argparse
parser = argparse.ArgumentParser(
description="Print the structure of an EVTX record's template.")
parser.add_argument("evtx", type=str,
help="Path to the Windows EVTX file")
parser.add_argument("record", type=int,
help="Record number")
args = parser.parse_args()
with evtx.Evtx(args.evtx) as log:
r = log.get_record(args.record)
if r is None:
print("error: record not found")
return -1
else:
print(e_views.evtx_template_readable_view(r.root()))
if __name__ == "__main__":
main()
| {
"content_hash": "abdded46e06428bf09fe3b48e071df84",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 27.384615384615383,
"alnum_prop": 0.5758426966292135,
"repo_name": "williballenthin/python-evtx",
"id": "29f21678b31441f8ce8d1df6559f0b10927193c3",
"size": "736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/evtx_record_template.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "151514"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2014 Maruf Maniruzzaman
Website: http://cosmosframework.com
Author: Maruf Maniruzzaman
License :: OSI Approved :: MIT License
""" | {
"content_hash": "1f80575cadb3609199e4c5ef4ebc5b64",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 40,
"avg_line_length": 25.5,
"alnum_prop": 0.7254901960784313,
"repo_name": "kuasha/cosmos",
"id": "27874dd85d78bd83881b7941bc6ccea7fe693d60",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cosmos/rbac/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2191509"
}
],
"symlink_target": ""
} |
'''
author:Crisschan
time:2016-6-30
'''
from mitmproxy import controller, proxy,flow
from mitmproxy.proxy import ProxyServer,ProxyConfig
import os
import sys
import pdb
import requests
import datetime
from F2requests import F2requests
from Cfg import Cfg
class FriedRing(controller.Master):
#fscript =
def __init__(self, server,fnamescript):
curpath = os.path.abspath(os.curdir)
fscriptsolutionpath = os.path.join(curpath,fnamescript)
if not os.path.isdir(fscriptsolutionpath):
os.makedirs(fscriptsolutionpath)
else:
fnamescript=fnamescript+str(datetime.datetime.now().microsecond)
fscriptsolutionpath = os.path.join(curpath,fnamescript)
os.makedirs(fscriptsolutionpath)
Cfg(fscriptsolutionpath)
self.fnamescript=str(fscriptsolutionpath)+'/test_scripts/script.py'
print 'script solution path(include script files, config files and results:'+str(fscriptsolutionpath)
controller.Master.__init__(self, server)
self.f2r = F2requests(self.fnamescript)
#def shutdown(self):
# self.shutdown()
def run(self):
try:
return controller.Master.run(self)
except KeyboardInterrupt:
self.shutdown()
def handle_request(self, msg):
#print msg
req=msg.request
print str(req.host)+str(req.path)
self.f2r.F2Req(req)
msg.reply()
def handle_response(self, msgg):
#print msg
msgg.reply()
res = msgg.response
'''
print res.status_code
print res.headers
print res.content+'\n'
print res.reason+'\n'
print res.timestamp_start+'\n'
print res.timestamp_end+'\n'
print '--------------------------------------\n'
'''
'''if __name__ == '__main__':
config = proxy.ProxyConfig(
cadir = os.path.expanduser("~/.mitmproxy/"),
port=8888
)
server = proxy.ProxyServer(config)
m = FriedRing(server)
m.run()'''
| {
"content_hash": "e4602b8987d515ddbe362fdba492e5d7",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 109,
"avg_line_length": 30.313432835820894,
"alnum_prop": 0.6193993106843919,
"repo_name": "crisschan/FriedRing",
"id": "6f5a4478c98856b348f9aea130dfff8fd5efe296",
"size": "2067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/lib/FriedRing/FriedRing.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "55561"
}
],
"symlink_target": ""
} |
'''
A SPARQL driver
Copyright 2012-2020 Codinuum Software Lab <https://codinuum.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# Fortran namespaces added by Masatomo Hashimoto <[email protected]>
import logging
from .siteconf import SPARQL_ENDPOINT
from .virtuoso import (ODBCDriver, VIRTUOSO_PW, VIRTUOSO_PORT,
get_odbc_connect_string)
from . import ns
from cca.factutil.const import (ENTITY_NS, VARIANT_NS, SVNREV_NS, GITREV_NS,
RELEASE_NS)
from .common import setup_logger
logger = logging.getLogger()
NAMESPACES = {
'xsd': ns.XSD_NS,
'owl': ns.OWL_NS,
'rdf': ns.RDF_NS,
'fb': ns.FB_NS,
'src': ns.SRC_NS,
'ver': ns.VER_NS,
'chg': ns.CHG_NS,
'git': ns.GIT_NS,
'ent': ENTITY_NS,
'variant': VARIANT_NS,
'svnrev': SVNREV_NS,
'gitrev': GITREV_NS,
'rel': RELEASE_NS,
'f': ns.F_NS,
'pa': ns.PA_NS,
'fjpa': ns.FJPA_NS,
'fpt': ns.FPT_NS,
'fjpadata': ns.PREFIX_TBL['fjpadata'],
'entpair': ns.PREFIX_TBL['entpair'],
'chgpat': ns.PREFIX_TBL['chgpat'],
'chginst': ns.PREFIX_TBL['chginst'],
}
def get_localname(s):
res = s
if s:
try:
if s.startswith('http://'):
res = (s.split('/'))[-1].split('#')[-1]
except Exception as e:
logger.warning(str(e))
return res
class Driver(object):
def __init__(self):
self._ns_tbl = {}
for (n, p) in NAMESPACES.items():
self._ns_tbl[p] = n
def to_prefixed_form(self, v):
r = v
if v:
try:
for p in self._ns_tbl.keys():
if str(v).startswith(p):
r = '%s:%s' % (self._ns_tbl[p], v[len(p):])
break
except Exception as e:
logger.warning('"%s": %s' % (v, e))
return r
def execute(self, q):
pass
def query(self, q, abbrev=False):
return None
def fetchone(self, q, abbrev=False):
return None
class VirtuosoODBCDriver(ODBCDriver, Driver):
def __init__(self, pw=VIRTUOSO_PW, port=VIRTUOSO_PORT):
connect_string = get_odbc_connect_string(pwd=pw, port=port)
ODBCDriver.__init__(self, connect_string)
Driver.__init__(self)
def conv_row(self, row, abbrev=False):
if row and abbrev:
for (k, v) in row.items():
row[k] = self.to_prefixed_form(v)
return row
def query(self, q, abbrev=False):
# logger.debug('query:\n{}'.format(q))
for qvs, row in ODBCDriver.query(self, 'SPARQL\n'+q):
yield qvs, self.conv_row(row, abbrev)
def execute(self, q):
ODBCDriver.execute(self, 'SPARQL\n'+q)
def fetchone(self, q, abbrev=False):
r = ODBCDriver.fetchone(self, 'SPARQL\n'+q)
if r:
r = self.conv_row(r, abbrev)
return r
class VirtuosoHTTPDriver(Driver):
def __init__(self, endpoint=SPARQL_ENDPOINT):
self._endpoint = endpoint
def conv_binding(self, b, abbrev=False):
d = {}
for k in b.keys():
data = b[k]
v = str(data['value'])
ty = data['type']
if ty == 'typed-literal':
dty = self.to_prefixed_form(data['datatype'])
logger.debug('%s (%s)' % (v, dty))
if dty == 'xsd:decimal':
v = float(v)
elif dty == 'xsd:integer':
v = int(v)
if abbrev:
if ty == 'uri':
v = self.to_prefixed_form(v)
d[k] = v
return d
def _exec(self, q, limit=-1):
import json
from urllib.parse import urlencode
from urllib.request import Request, urlopen
format = 'application/json'
if limit < 0:
maxrows = ''
else:
maxrows = str(limit)
params = {
'query': q,
'format': format,
'maxrows': maxrows,
}
qpart = urlencode(params)
req = Request(self._endpoint, qpart)
response = urlopen(req).read()
result = json.loads(response)
return result
def execute(self, q):
self._exec(q)
def fetchone(self, q, abbrev=False):
row = None
try:
r = self._exec(q, limit=1)
b = r['results']['bindings'][0]
row = self.conv_binding(b, abbrev)
except Exception:
pass
return row
def query(self, q, abbrev=False, limit=-1):
result = self._exec(q, limit)
for b in result['results']['bindings']:
qvs = [str(v) for v in result['head']['vars']]
yield qvs, self.conv_binding(b, abbrev)
def get_driver(method='http', pw=VIRTUOSO_PW, port=VIRTUOSO_PORT):
driver = None
if method == 'http':
driver = VirtuosoHTTPDriver()
elif method == 'odbc':
driver = VirtuosoODBCDriver(pw=pw, port=port)
else:
logger.error('unknown method: "%s"' % method)
return driver
def main():
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description='Execute SPARQL Query',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('query_file', type=str, help='query file')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='enable debug printing')
parser.add_argument('--port', dest='port', default=VIRTUOSO_PORT,
metavar='PORT', type=int, help='set port number')
parser.add_argument('--pw', dest='pw', metavar='PASSWORD',
default=VIRTUOSO_PW,
help='set password to access DB')
parser.add_argument('-m', '--method', dest='method', default='odbc',
metavar='METHOD', type=str,
help='execute query via METHOD (http|odbc)')
args = parser.parse_args()
log_level = logging.INFO
if args.debug:
log_level = logging.DEBUG
setup_logger(logger, log_level)
qfile = args.query_file
logger.info('method: "%s"' % args.method)
logger.info('query: "%s"' % qfile)
driver = get_driver(args.method, pw=args.pw, port=args.port)
count = 0
try:
f = open(qfile, 'r')
q = f.read()
f.close()
for vs, r in driver.query(q, abbrev=True):
row = []
for v in vs:
row.append(' %s="%s"' % (v, r[v]))
print('* row[%d]' % count)
print('\n'.join(row))
count += 1
except Exception: # as e:
# logger.error(str(e))
raise
print('%d rows' % count)
def test():
# sparql = VirtuosoODBCDriver()
sparql = VirtuosoHTTPDriver()
q = 'DEFINE input:inference "ont.cpi" SELECT ?s ?p ?o WHERE { ?s ?p ?o } LIMIT 10'
for r in sparql.query(q):
print(r)
if __name__ == '__main__':
main()
| {
"content_hash": "c6bfe30239e12f7b3ba264a13f8bd854",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 86,
"avg_line_length": 26.764912280701754,
"alnum_prop": 0.5384111169375984,
"repo_name": "codinuum/cca",
"id": "33ae152bbabe4ed80c590a6167698df987833f0b",
"size": "7653",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/src/cca/ccautil/sparql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14313"
},
{
"name": "Dockerfile",
"bytes": "2875"
},
{
"name": "HTML",
"bytes": "3595"
},
{
"name": "JavaScript",
"bytes": "97522"
},
{
"name": "Makefile",
"bytes": "48422"
},
{
"name": "OCaml",
"bytes": "8499239"
},
{
"name": "Python",
"bytes": "379461"
},
{
"name": "Shell",
"bytes": "13648"
},
{
"name": "Standard ML",
"bytes": "84941"
},
{
"name": "Verilog",
"bytes": "129"
}
],
"symlink_target": ""
} |
from ds.vortex.core import baseNode
from ds.vortex.core import plug as plugs
class ScalarNode(baseNode.BaseNode):
def __init__(self, name):
"""
:param name: str, the name of the node
"""
baseNode.BaseNode.__init__(self, name)
def initialize(self):
baseNode.BaseNode.initialize(self)
self.outputPlug_ = plugs.OutputPlug("output", self)
self.valuePlug_ = plugs.InputPlug("value", self, value=0)
self.addPlug(self.outputPlug_, clean=True)
self.addPlug(self.valuePlug_, clean=True)
self.plugAffects(self.valuePlug_, self.outputPlug_)
def compute(self, requestPlug):
baseNode.BaseNode.compute(self, requestPlug=requestPlug)
if requestPlug != self.outputPlug_:
return
result = float(self.valuePlug_.value)
requestPlug.value = result
requestPlug.dirty = False
return result
def getNode():
"""General function that returns our node, used to get create our node via Ui etc
:return: Node instance
"""
return ScalarNode
| {
"content_hash": "80042715010b4c4101a9ceaf5957fa26",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 85,
"avg_line_length": 31.13888888888889,
"alnum_prop": 0.6244424620874219,
"repo_name": "dsparrow27/vortex",
"id": "ad8d1207529a612ca9a6cb05f3f2534eb9db3598",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ds/vortex/nodes/constants/scalar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "139103"
}
],
"symlink_target": ""
} |
import os
import hashlib
import gzip
import time
from sys import argv
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from config import (AWS_KEY, AWS_SECRET_KEY, AWS_BUCKET, AWS_DIRECTORY,
HTML_EXPIRES, STATIC_EXPIRES, IGNORE_DIRECTORIES, IGNORE_FILES,
IGNORE_FILE_TYPES)
content_types = {
'.css': 'text/css',
'.js': 'text/javascript',
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.bmp': 'image/bmp',
'.gif': 'image/gif',
'.ico': 'image/ico',
'.csv': 'text/csv',
'.html': 'text/html',
'.svg': 'image/svg+xml',
'.pdf': 'application/pdf',
'.json': 'text/json'
}
def directory_list(argv, directory='app/build'):
"""Creates a list of all non-excluded files in current directory
and below"""
if argv > 1:
IGNORE_DIRS = IGNORE_DIRECTORIES + argv[1:]
file_list = []
for root, dirs, files in os.walk(directory):
for d in IGNORE_DIRS:
if d in dirs:
dirs.remove(d)
for f in IGNORE_FILES:
if f in files:
files.remove(f)
for f in files:
ext = os.path.splitext(f)[1]
if ext in IGNORE_FILE_TYPES:
files.remove(f)
file_list.append((root, files))
return file_list
def s3_filename():
"""Takes list of files to be uploaded and modifies the names so that they
will be served properly from s3"""
file_list = directory_list(argv)
s3_list = []
for f in file_list:
for i in f[1]:
ext = os.path.splitext(i)[1]
if ext in IGNORE_FILE_TYPES:
pass
else:
if f[0] is not '.':
s3_list.append(f[0][10:] + '/' + i)
else:
s3_list.append(i)
return s3_list
def set_metadata():
"""Take a list of files to be uploaded to s3 and gzip CSS, JS, and HTML,
setting metadata for all files including an 'expires' header defined
at the beginning of the file. HTML expires after 1 hour."""
s3_list = s3_filename()
conn = S3Connection(AWS_KEY, AWS_SECRET_KEY)
mybucket = conn.get_bucket(AWS_BUCKET)
expires = time.time() + STATIC_EXPIRES
expires_header = time.strftime("%a, %d-%b-%Y %T GMT", time.gmtime(expires))
for filename in s3_list:
k = Key(mybucket)
ext = os.path.splitext(filename)[1]
if ext == '':
ext = '.html'
if ext == '.html': # deletes '.html' from s3 key so no ext on url
local_name = os.path.splitext(filename)[0]
if local_name == '/index':
local_name = '/index.html'
if local_name[0] != '/': # if file within child dir
k.key = AWS_DIRECTORY + '/' + local_name
else: # if file in top level dir
k.key = AWS_DIRECTORY + local_name
k.set_metadata('Expires', time.time() + HTML_EXPIRES)
else:
k.key = AWS_DIRECTORY + '/' + filename # strip leading 0
k.set_metadata('Expires', expires_header)
if ext == '.css' or ext == '.js' or ext == '.html':
build_file = 'app/build/' + filename
f_in = open(build_file, 'rb')
with gzip.open(build_file + '.gz', 'w+') as f:
f.writelines(f_in)
f_in.close()
f = build_file + '.gz'
k.set_metadata('Content-Encoding', 'gzip')
else:
f = 'app/build/' + filename
print k.key
k.set_metadata('Content-Type', content_types[ext])
etag_hash = hashlib.sha1(f + str(time.time())).hexdigest()
k.set_metadata('ETag', etag_hash)
k.set_contents_from_filename(f)
k.make_public()
| {
"content_hash": "467f4ca03921eccc7936c6aa094c16c6",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 79,
"avg_line_length": 31.596638655462186,
"alnum_prop": 0.5444148936170212,
"repo_name": "vprnet/dorothy-2013",
"id": "3265447e95db06d8738a2cd1d84b38d059110383",
"size": "3784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "upload_s3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63434"
},
{
"name": "JavaScript",
"bytes": "1153"
},
{
"name": "Python",
"bytes": "8591"
}
],
"symlink_target": ""
} |
import sys, os, re
import urllib
import json
import random
def results(parsed, original_query):
number = parsed.get('number', '')
number = re.sub(r'[^0-9]', '', number)
html = open("kuaidi.html").read().replace("<!--NUMBER-->", number)
return {
"title": u'快递 100 搜索 "%s" 结果' % number,
"html": html,
"run_args": [number],
"webview_user_agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 7_0 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11A465 Safari/9537.53",
"webview_links_open_in_browser": True,
}
def run(number):
if number:
os.system('open "http://m.kuaidi100.com/result.jsp?nu={0}"'.format(urllib.quote(number)))
| {
"content_hash": "6a6d7abdfb9bbea63159d9f24c5021e6",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 168,
"avg_line_length": 35.75,
"alnum_prop": 0.6125874125874126,
"repo_name": "allenhsu/FlashlightPlugins",
"id": "0da71c2516ca088b3f1fb95e5f3eb6a90a26ae36",
"size": "770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kuaidi.bundle/plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "16104"
},
{
"name": "Python",
"bytes": "5441"
}
],
"symlink_target": ""
} |
try:
import simplejson as json
except ImportError:
import json
from flask import Flask, Response, render_template, request
from werkzeug.contrib.cache import SimpleCache
import requests
app = Flask('roundup')
app.config.from_object('settings')
# TODO: In a production environment, you would never use an in-memory cache like
# this. You should instead use something like Redis or Memcached.
cache = SimpleCache()
def get_top_posts(*args, **kwargs):
kwargs['time'] = kwargs.get('time') or '6h'
url = '{}/realtime/posts'.format(app.config['PARSELY']['api_base_url'])
posts = []
for apikey, config in app.config['PARSELY']['apikeys'].iteritems():
kwargs['apikey'] = apikey
kwargs['secret'] = config['secret']
apikey_posts = requests.get(url, params=kwargs).json()['data']
for post in apikey_posts:
post['apikey'] = apikey
post['logo_url'] = config.get('logo_url')
posts.extend(apikey_posts)
posts = sorted(posts, key=lambda x: x['_hits'], reverse=True)
return posts[:kwargs['limit']]
@app.route('/top_posts')
def top_posts():
limit = request.args.get('limit', default=app.config['NUMBER_OF_POSTS'],
type=int)
if limit <= 0:
limit = app.config['NUMBER_OF_POSTS']
posts = cache.get('top_posts')
if posts is None:
posts = get_top_posts(limit=limit)
cache.set('top_posts', posts, timeout=app.config['CACHE_EXPIRY'])
return Response(json.dumps(posts), content_type='application/json')
@app.route('/')
def home():
return render_template('home.jinja2.html')
if __name__ == '__main__':
app.run()
| {
"content_hash": "71834cdc0df16c9a619783c1f6196df3",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 80,
"avg_line_length": 28.47457627118644,
"alnum_prop": 0.6351190476190476,
"repo_name": "Parsely/api-examples",
"id": "92f9fa618fcd16d593f4d8c9d8b5288df5077672",
"size": "1680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roundup/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1351"
},
{
"name": "JavaScript",
"bytes": "248387"
},
{
"name": "Python",
"bytes": "2266"
}
],
"symlink_target": ""
} |
from MorcoWrapper2D.mw2_Application import *
from ProjectExample.ExampleScene import *
from ProjectExample.ExampleManager import *
mw2_Application.Settings.width = 400
mw2_Application.Settings.height = 400
mw2_Application.Settings.icon = "../__Resources/Pong/pong_logo.png"
mw2_Application.Settings.title = "Example Project"
mw2_Application.Settings.fonts["../__Resources/Example/Sketch_Block.ttf"] = "Sketch Block"
mw2_Application.Settings.fonts["../__Resources/Example/BrushHandNew.ttf"] = "Brush Hand New"
mw2_Application.Initialize()
mw2_Application.LoadScene( ExampleScene(), ExampleManager() )
mw2_Application.StartGame() | {
"content_hash": "d8e4dcc11617d99a041bb44e460040ad",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 92,
"avg_line_length": 42,
"alnum_prop": 0.7904761904761904,
"repo_name": "MorcoFreeCode/2014__MorcoWrapper2D",
"id": "1e029acfa86632ae4cc23c3bfd219d4706c09bbd",
"size": "630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "full_project/2D/6. Cocos2D [Python2]/mainExample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "115874"
},
{
"name": "C++",
"bytes": "128881"
},
{
"name": "HTML",
"bytes": "8095"
},
{
"name": "Java",
"bytes": "54197"
},
{
"name": "JavaScript",
"bytes": "54258"
},
{
"name": "Lua",
"bytes": "103402"
},
{
"name": "Python",
"bytes": "100813"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.utils.html import format_html, format_html_join
from wagtail.wagtailcore import hooks
from . import urls
@hooks.register('register_admin_urls')
def register_admin_urls():
return [
url(r'^readability/', include(urls)),
]
@hooks.register('insert_editor_js')
def editor_js():
js_files = ['hallo_custombuttons.js']
js_includes = format_html_join(
'\n',
'<script src="{0}{1}"></script>',
((settings.STATIC_URL, filename) for filename in js_files))
return js_includes + format_html("""
<script>registerHalloPlugin('wagtailReadabilityScore');</script>""")
| {
"content_hash": "2a38c524c06f15286bec80f73dc4ed2a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 27.51851851851852,
"alnum_prop": 0.676985195154778,
"repo_name": "takeflight/wagtail-readability",
"id": "e2df64be967cb638e0fce3ac08ee3eaa15fc58e8",
"size": "743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtailreadability/wagtail_hooks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "618"
},
{
"name": "JavaScript",
"bytes": "5217"
},
{
"name": "Python",
"bytes": "30042"
}
],
"symlink_target": ""
} |
import sys
import time
import csv
from itertools import combinations
# Join n-item set itself and generate (n+1)-item set
# Then prune all the set that are not frequent
def join_prune(k):
rt = {}
if not any(k):
return
keys = k.keys()
if len(keys) < 2:
return
leng = len(keys[0])
candidates = []
for i in range(len(keys)):
for j in range(i+1, len(keys)):
union = set(keys[i] + keys[j])
if len(union) > leng+1:
continue
new_key = tuple(sorted(union))
if new_key not in rt:
prute_yes_no = True
for m in range(len(new_key)):
sub_key = new_key[:m]+ new_key[m+1:]
if sub_key not in k:
prute_yes_no = False
break
if prute_yes_no:
rt[new_key] = 0
return rt
# Join n-item set itself and generate (n+1)-item set
def join_set(k):
rt = {}
if not any(k):
return
keys = k.keys()
if len(keys) < 2:
return
leng = len(keys[0])
candidates = []
for i in range(len(keys)):
for j in range(i+1, len(keys)):
union = set(keys[i] + keys[j])
if len(union) > leng+1:
continue
new_key = tuple(sorted(union))
if new_key not in rt:
rt[new_key] = 0
return rt
# Main function
def solution(data_file, result_file, sigma, row_size=0):
# Remove lines containing fewer than 3 items
matrix = []
if row_size!= 0:
stop = 0
for row in open(data_file):
if stop == row_size:
break
item = row.strip().split(' ')
if len(item)>=3:
item =[int(one_item) for one_item in item]
matrix.append(item)
stop += 1
else:
for row in open(data_file):
item = row.strip().split(' ')
if len(item)>=3:
item =[int(one_item) for one_item in item]
matrix.append(item)
avg_row_leng = 1 + sum([len(row) for row in matrix])/(len(matrix))
with open(result_file, 'w') as csvfile:
fieldnames = ['set_size', 'frequency', 'items']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
# 1st scan and generate 1-item set
L1,C1 = {},{}
start_time = time.time()
for transaction in matrix:
for item in transaction:
item_key = (item,)
if item_key not in C1:
C1[item_key] = 1
else:
C1[item_key] += 1
for item in C1:
if C1[item]>=sigma:
L1[item] = C1[item]
print "round 1"
print "1-item set length:", len(L1.keys())
Lk = L1
Ck = {}
del L1, C1
k = 1
skip_trans = {}
# Loop and generate all frequent itemsets
while any(Lk):
k += 1
print "round %d "%k
if k<=2:
Ck = join_set(Lk)
else:
Ck = join_prune(Lk)
Lk = {}
if Ck is None:
break
# Optimize the speed and choose the faster way to generate candidate
len_candidate = len(Ck.keys())
possible_group = 1
for counter in range(k):
possible_group *= avg_row_leng
avg_row_leng -= 1
if possible_group > 2* len_candidate:
for transaction_idx in range(len(matrix)):
if transaction_idx in skip_trans:
continue
transaction = set(matrix[transaction_idx])
for one_candidate in Ck:
set_one_candtdate = set(one_candidate)
if set_one_candtdate.issubset(transaction):
Ck[one_candidate] += 1
for candidate in Ck:
if Ck[candidate] >= sigma:
Lk[candidate] = Ck[candidate]
del Ck
else:
for transaction_idx in range(len(matrix)):
if transaction_idx in skip_trans:
continue
transaction = matrix[transaction_idx]
for one_candidate in combinations(transaction, k):
if one_candidate in Ck:
Ck[one_candidate] += 1
for candidate in Ck:
if Ck[candidate] >= sigma:
Lk[candidate] = Ck[candidate]
del Ck
# Generate skip rows in matrix
for transaction_idx in range(len(matrix)):
if transaction_idx in skip_trans:
continue
trans_exist = True
transaction = set(matrix[transaction_idx])
for one_candidate in Lk:
set_one_candtdate = set(one_candidate)
if set_one_candtdate.issubset(transaction):
trans_exist = False
break
if trans_exist:
skip_trans[transaction_idx] = 1
print "%d-item frequent set length:%d "%(k,len(Lk))
if k>2:
with open(result_file, 'a') as csvfile:
fieldnames = ['set_size', 'frequency', 'items']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
for key in Lk:
writer.writerow({'set_size': k,
'frequency': Lk[key],
'items': ' '.join([str(item) for item in key])
})
print("--- %s seconds ---" % (time.time() - start_time))
if __name__ == '__main__':
result_file = 'result.csv'
sigma = 4
row_size = 0
data_file = 'input_file_example.dat'
if len(sys.argv) >2 :
try:
data_file = sys.argv[1]
result_file = sys.argv[2]
sigma = int(sys.argv[3])
row_size = int(sys.argv[4])
except IndexError:
pass
solution(data_file, result_file, sigma, row_size)
| {
"content_hash": "cab9276a7596f84477d11cee01c08835",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 71,
"avg_line_length": 23.86868686868687,
"alnum_prop": 0.6214557765552264,
"repo_name": "HaoLyu/Association-rule-learning",
"id": "c43c3b4b9d90479aa0ac7422e6dd1cce51cd9068",
"size": "5139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Apriori.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11326"
}
],
"symlink_target": ""
} |
import sys
import argparse
from cyber_py.record import RecordReader
from modules.control.proto import control_cmd_pb2
from modules.planning.proto import planning_pb2
from modules.canbus.proto import chassis_pb2
from modules.drivers.proto import pointcloud_pb2
from modules.perception.proto import perception_obstacle_pb2
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Recode Analyzer is a tool to analyze record files.",
prog="main.py")
parser.add_argument(
"-f", "--file", action="store", type=str, required=True,
help="Specify the record file for message dumping.")
parser.add_argument(
"-m", "--message", action="store", type=str, required=True,
help="Specify the message topic for dumping.")
parser.add_argument(
"-t", "--timestamp", action="store", type=float, required=True,
help="Specify the timestamp for dumping.")
args = parser.parse_args()
record_file = args.file
reader = RecordReader(record_file)
for msg in reader.read_messages():
timestamp = msg.timestamp / float(1e9)
if msg.topic == args.message and abs(timestamp - args.timestamp) <=1:
if msg.topic == "/apollo/perception/obstacles":
perception_obstacles = \
perception_obstacle_pb2.PerceptionObstacles()
perception_obstacles.ParseFromString(msg.message)
with open('perception_obstacles.txt', 'w') as f:
f.write(str(perception_obstacles))
print str(perception_obstacles)
break
| {
"content_hash": "5188166d1368276eee1c7ab90887cff6",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 38.642857142857146,
"alnum_prop": 0.6481823783117683,
"repo_name": "wanglei828/apollo",
"id": "da9525214c62b9e51f26933760fe72253ec73bc3",
"size": "2407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/tools/record_analyzer/tools/dump_message.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1922"
},
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "C",
"bytes": "22662"
},
{
"name": "C++",
"bytes": "17378263"
},
{
"name": "CMake",
"bytes": "3600"
},
{
"name": "CSS",
"bytes": "40785"
},
{
"name": "Cuda",
"bytes": "97324"
},
{
"name": "Dockerfile",
"bytes": "11960"
},
{
"name": "GLSL",
"bytes": "7000"
},
{
"name": "HTML",
"bytes": "21068"
},
{
"name": "JavaScript",
"bytes": "364183"
},
{
"name": "Makefile",
"bytes": "6626"
},
{
"name": "Python",
"bytes": "1902086"
},
{
"name": "Shell",
"bytes": "302902"
},
{
"name": "Smarty",
"bytes": "33258"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from Dbg import Dbg
from Tst import Tst
from Stencil import Stencil
import os, sys, time, json
dbg = Dbg()
def find_py_file(name):
for path in sys.path:
fn = os.path.join(path, name)
if (os.path.isfile(fn)):
return fn
class JobSubmitBase(object):
def __init__(self, masterTbl):
self.__funcT = {
'CWD' : self.CWD,
'findcmd': self.findcmd,
'mpr' : self.mpr,
'queue' : self.queue,
'submit' : self.submit,
}
self.__masterTbl = masterTbl
self.resultMaxLen = masterTbl['resultMaxLen']
baseFn = find_py_file("DefaultSystems.py")
derivedFn = find_py_file("Systems.py")
namespace = {}
exec(open(baseFn).read(), namespace)
exec(open(derivedFn).read(), namespace)
Systems = namespace['Systems']
DefaultSystems = namespace['DefaultSystems']
self.__batchTbl = {}
batch_hostname = os.environ.get("BATCH_HOSTNAME","INTERACTIVE")
if (batch_hostname == "INTERACTIVE"):
self.__batchTbl = DefaultSystems['INTERACTIVE']
else:
for k in Systems:
if (batch_hostname in Systems[k]):
self.__batchTbl = DefaultSystems[k].copy()
self.__batchTbl.update(Systems[k][batch_hostname])
if (not self.__batchTbl):
Error("Unable to find BatchSystems entry for ",batch_hostname)
def has_function(self, name):
return name in self.__funcT
def funcT(self, name, argA, argT, envTbl, funcTbl):
bound = self.__funcT[name].__get__(self, type(self))
s = bound(argA, argT, envTbl, funcTbl)
return s
def batchTbl(self):
return self.__batchTbl
def masterTbl(self):
return self.__masterTbl
@staticmethod
def build(name, masterTbl):
if (name.lower() == "interactive"):
obj = Interactive(masterTbl)
else:
obj = Batch(masterTbl)
return obj
def formatMsg(self, result, iTest, passed, failed, num_tests, ident):
blank = " "
r = result or "failed"
blankLen = self.resultMaxLen - len(r)
#msg = "{}{} : {} tst: {}/{} P/F: {}:{}, {}".format(
msg = "%s%s : %s tst: %d/%d P/F: %d:%d, %s" % (
blank*(blankLen),
result,
time.strftime("%X",time.localtime(time.time())),
iTest, num_tests,
passed, failed,
ident)
return msg
def msg(self, messageStr, iTest, num_tests, ident, resultFn, background):
if (messageStr != "Started" and background):
print("")
return
masterTbl = self.__masterTbl
msgExtra = ""
if (messageStr != "Started" and not background):
msgExtra = "\n"
resultT = json.loads(open(resultFn).read())
myResult = resultT['testresult']
if (myResult == "passed"):
masterTbl['passed'] += 1
else:
masterTbl['failed'] += 1
messageStr = myResult
print(self.formatMsg(messageStr, iTest, masterTbl['passed'],
masterTbl['failed'], num_tests, ident), msgExtra)
def CWD(self, argA, argT, envTbl, funcTbl):
batchTbl = self.batchTbl()
return batchTbl['CurrentWD']
def findcmd(self, argA, argT, envTbl, funcTbl):
result = None
cmd = argT.get('cmd',"")
pathA = split(argT.get('path') or os.environ.get('PATH',""), ":")
for path in pathA:
fn = os.path.join(path, cmd)
if (os.path.exists(fn)):
result = fn
break
return fn
def mpr(self, argA, argT, envTbl, funcTbl):
batchTbl = self.batchTbl()
stencil = Stencil(argA = argA, tbl=argT, envTbl=envTbl, funcTbl=funcTbl)
return stencil.expand(batchTbl['mprCmd'])
def queue(self,argA, argT, envTbl, funcTbl):
return ""
def submit(self, argA, argT, envTbl, funcTbl):
batchTbl = self.batchTbl()
stencil = Stencil(argA = argA, tbl=argT, envTbl=envTbl, funcTbl=funcTbl)
s = stencil.expand(batchTbl['submitHeader'])
dbg.print("submit: s:\n",s,"\n")
return s
class Batch(JobSubmitBase):
def __init__(self, masterTbl):
super(Batch, self).__init__(masterTbl)
def queue(self, argA, argT, envTbl, funcTbl):
batchTbl = self.batchTbl()
queueT = batchTbl['queueTbl']
name = argT.get('name',"")
return queueT.get(name) or name
def runtest(self, **kw):
masterTbl = self.masterTbl()
batchTbl = self.batchTbl()
logFileNm = masterTbl.get('batchLog') or kw['idtag'] + ".log"
sA = []
sA.append(batchTbl.get('submitCmd') or "")
sA.append(kw['scriptFn'])
sA.append(">>")
sA.append(logFileNm)
sA.append("2>&1 < /dev/null")
s = " ".join(sA)
os.system(s)
class Interactive(JobSubmitBase):
def __init__(self, masterTbl):
super(Interactive, self).__init__(masterTbl)
def runtest(self, **kw):
sA = []
sA.append("./" + kw['scriptFn'])
sA.append(">")
sA.append(kw['idtag'] + ".log")
sA.append("2>&1 < /dev/null")
if (kw['background']):
sA.append("&")
s = " ".join(sA)
os.system(s)
| {
"content_hash": "469c589ba5d6314e80d897c4ab90dd44",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 77,
"avg_line_length": 28.296703296703296,
"alnum_prop": 0.5753398058252427,
"repo_name": "rtmclay/Themis",
"id": "06db117ba15d8de2392e7382a48ce6414712f921",
"size": "5150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rtm/JobSubmitBase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3170"
},
{
"name": "Makefile",
"bytes": "5769"
},
{
"name": "Python",
"bytes": "75215"
},
{
"name": "Shell",
"bytes": "1067"
}
],
"symlink_target": ""
} |
import logging
import logging.handlers
import sys
from opennode.cli.config import get_config
_configured = False
def get_logger(level=None):
logger = logging.getLogger('opennode-tui')
if level is not None:
logger.setLevel(level)
return logger
def _configure():
logger = get_logger()
global _configured
if not getattr(logger, '_configured', False) and not _configured:
conf_level = get_config().getstring('general', 'loglevel', 'INFO')
level = logging._levelNames.get(conf_level.upper())
if level is None:
level = logging.INFO
logger.setLevel(level)
fh = logging.handlers.WatchedFileHandler(get_config().getstring('general', 'log-location',
'/var/log/opennode-tui.log'))
format_str = '%(asctime)s %(levelname)7s %(module)10s:%(lineno)s:%(funcName)s - %(message)s'
fhformatter = logging.Formatter(format_str)
fh.setFormatter(fhformatter)
logger.addHandler(fh)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
sh.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(sh)
sherr = logging.StreamHandler()
sherr.setLevel(logging.ERROR)
sherr.setFormatter(logging.Formatter('%(module)10s:%(lineno)s:%(funcName)s - %(message)s'))
logger.addHandler(sherr)
_configured = True
_configure()
| {
"content_hash": "b6d94de29480016ffd8bedbe5aa864f8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 101,
"avg_line_length": 30.20408163265306,
"alnum_prop": 0.620945945945946,
"repo_name": "tsudmi/opennode-tui",
"id": "f381eb52dbaf4a8f9609cc4f6d90783ff0770a46",
"size": "1480",
"binary": false,
"copies": "1",
"ref": "refs/heads/tui-separation",
"path": "opennode-tui/opennode/cli/log.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "96341"
},
{
"name": "Shell",
"bytes": "9797"
}
],
"symlink_target": ""
} |
from tempest.api.network import base
from networking_fortinet.tests.tempest_plugin.tests import fwaas_client
class BaseFWaaSTest(fwaas_client.FWaaSClientMixin, base.BaseNetworkTest):
_delete_wrapper = base.BaseNetworkTest._try_delete_resource
| {
"content_hash": "8c09fac0a342d84b5eeb0377ec23f1d6",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 73,
"avg_line_length": 32.375,
"alnum_prop": 0.7953667953667953,
"repo_name": "samsu/networking-fortinet",
"id": "3618b82bb31e0f1e35a739a199822ae5896a0766",
"size": "905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_fortinet/tests/tempest_plugin/tests/api/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "367"
},
{
"name": "Python",
"bytes": "1744303"
},
{
"name": "Shell",
"bytes": "13362"
}
],
"symlink_target": ""
} |
from grow.common import utils
from grow.pods import storage
import jinja2
import logging
import os
import webapp2
import webob
import werkzeug
_root = os.path.join(utils.get_grow_dir(), 'server', 'templates')
_loader = storage.FileStorage.JinjaLoader(_root)
_env = jinja2.Environment(loader=_loader, autoescape=True, trim_blocks=True,
extensions=['jinja2.ext.i18n'])
class BaseHandler(webapp2.RequestHandler):
def handle_exception(self, exception, debug):
if debug:
logging.exception(exception)
else:
logging.error(str(exception))
template = _env.get_template('error.html')
html = template.render({'error': {'title': str(exception)}})
if isinstance(exception, webob.exc.HTTPException):
self.response.set_status(exception.code)
else:
self.response.set_status(500)
self.response.write(html)
def respond_with_controller(self, controller):
headers = controller.get_http_headers()
self.response.headers.update(headers)
if 'X-AppEngine-BlobKey' in self.response.headers:
return
return self.response.out.write(controller.render())
class PodHandler(BaseHandler):
def get(self):
pod = self.app.registry['pod']
try:
controller = pod.routes.match(self.request.path, self.request.environ)
self.respond_with_controller(controller)
except werkzeug.routing.RequestRedirect as e:
self.redirect(e.new_url)
| {
"content_hash": "30ee5cf19692670a69778d5856ebffad",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 76,
"avg_line_length": 30.48936170212766,
"alnum_prop": 0.7083042568039079,
"repo_name": "vitorio/pygrow",
"id": "602e1e03db850faa33300d3230c30e472ed407b2",
"size": "1433",
"binary": false,
"copies": "1",
"ref": "refs/heads/objstoreurls2",
"path": "grow/server/handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "177"
},
{
"name": "HTML",
"bytes": "4287"
},
{
"name": "Python",
"bytes": "243109"
},
{
"name": "Shell",
"bytes": "2577"
}
],
"symlink_target": ""
} |
"""
# Meta-info
Author: Nelson Brochado
Created: 10/09/2016
Updated: 07/03/2018
# Description
The forward Euler's method is the easiest method for approximately solving an
initial value ODE problem. In practice, it's used as a vehicle for studying
several important and basic notions on numerical ODE methods.
## Euler's method derivation
We first consider finding an approximate solution for a scalar initial value ODE
problem at equidistant abscissae. Thus, we define the points:
t₀ = a
tᵢ = a + i * h,
where h = (b - a) / N is the step size, for i = 0, 1, 2, ... N. t here is called
an "independent variable", a and b are respectively the beginning and end of the
interval at which we're trying to numerical approximate y(t).
We denote the approximate solution to y(tᵢ) by yᵢ.
Note: in general, the step size h could be variable, i.e. we could have a hᵢ,
but we keep it constant in this explanation and implementation.
Consider the following "forward difference" formula:
y'(tᵢ) = (y(tᵢ₊₁) - y(tᵢ)) / h - h / 2 * y''(Ζᵢ)
By the ODE, y'(tᵢ) = f(tᵢ, y(tᵢ)). So, we manipulate the expression above to
have y(tᵢ₊₁) isolated in one side. We start by multiplying both sides by h:
h * y'(tᵢ) = y(tᵢ₊₁) - y(tᵢ) - (h² / 2 * y''(Ζᵢ))
or, if we rearrange the entries,
h * y'(tᵢ) + y(tᵢ) + (h² / 2 * y''(Ζᵢ)) = y(tᵢ₊₁)
we further rearrange the entries so that what we're looking for is on the left
side of the equals:
y(tᵢ₊₁) = y(tᵢ) + h * y'(tᵢ) + (h² / 2 * y''(Ζᵢ))
and we replace y'(tᵢ) by f(tᵢ, y(tᵢ))
y(tᵢ₊₁) = y(tᵢ) + h * f(tᵢ, y(tᵢ)) + (h² / 2 * y''(Ζᵢ))
dropping the truncation term, i.e. (h² / 2 * y''(Ζᵢ)), we obtain the forward
Euler method, which defines the approximate solution (yᵢ){ᵢ₌₀}^{N} by:
y₀ = c
where c is the initial value.
y(tᵢ₊₁) = y(tᵢ) + h * f(tᵢ, y(tᵢ))
for i = 0, ..., N - 1.
This simple formula allow us to march forward into t.
### Notes
In the following implementation, we assume that f and all other parameters are
specified.
## Explicit vs Implicit methods
What happens if we replace the forward difference formula by the backward
formula
y'(tᵢ₊₁) ~ (y(tᵢ₊₁) - y(tᵢ)) / h
and this leads similarly to the backward Euler method:
y₀ = c
yᵢ₊₁ = yᵢ + h * f(tᵢ₊₁, yᵢ₊₁)
for i = 0, ..., N - 1.
There's actually a big difference between this new method and the previous one.
This one to calculate yᵢ₊₁ depends implicitly on yᵢ₊₁ itself.
In general, if a method to calculate yᵢ₊₁ depends implicitly on yᵢ₊₁, it's
called an implicit method, whereas the forward method is considered a explicit
method.
# References
- First Course in Numerical Methods, chapter 16, by Uri M. Ascher and C. Greif
- https://www.khanacademy.org/math/ap-calculus-bc/diff-equations-bc/eulers-method-bc/v/eulers-method-program-code
"""
__all__ = ["forward_euler", "forward_euler_approx"]
from numpy import arange, zeros
def forward_euler(a: float, b: float, n: int, c: float, f: callable) -> tuple:
"""Forward Euler method, with y = f(x, y), with initial value c, and range
[a, b]. n is the number of times to split the range [a, b], and is thus used
to calculate the step size h.
It returns a tuple, whose first element is the array of abscissas, i.e. the
values of t during the iterations, and the second element is the array of
ordinates, i.e. the values of y during the iterations."""
if a is None or b is None or n is None or c is None:
raise ValueError("a, b, n and c must not be None.")
if b < a:
raise ValueError("b < a, but it should be a <= b.")
if not callable(f):
raise TypeError("f should be a callable object.")
h = (b - a) / n
# t is an array of abscissas.
t = arange(a, b, h)
# y is an array of ordinates.
y = zeros(n)
y[0] = c
for i in range(n - 1):
y[i + 1] = y[i] + h * f(t[i], y[i])
return t, y
def forward_euler_approx(a: float,
b: float,
n: int,
c: float,
f: callable) -> float:
"""Forward Euler method, with y = f(x, y), with initial value c, and range
[a, b]. n is the number of times to split the range [a, b], and is thus used
to calculate the step size h.
It returns just y[b].
Use this function in case space requirements are a must."""
if a is None or b is None or n is None or c is None:
raise ValueError("a, b, n and c must not be None.")
if b < a:
raise ValueError("b < a, but it should be a <= b")
if not callable(f):
raise TypeError("f should be a callable object")
t = a
y = c
h = (b - a) / n
for _ in range(n - 1):
y += h * f(t, y)
t += h
return y
| {
"content_hash": "7c85bb8f03df01854b5a831c4c09f41e",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 113,
"avg_line_length": 29.26993865030675,
"alnum_prop": 0.6285893942569691,
"repo_name": "nbro/ands",
"id": "8c92084241ad3f00e412ac20da30221f4b620b60",
"size": "5004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ands/algorithms/ode/forward_euler.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "374024"
}
],
"symlink_target": ""
} |
import sys
import textwrap
from math import sqrt, ceil
from itertools import zip_longest
s = input().strip()
s = ''.join(s.split())
w = ceil(sqrt(len(s)))
for word in zip_longest(*textwrap.wrap(s, width=w)):
print(''.join([i for i in word if i]), end=' ')
| {
"content_hash": "4bce8ca6bfb954e7de05ae08b8e786e7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 52,
"avg_line_length": 21.916666666666668,
"alnum_prop": 0.6577946768060836,
"repo_name": "avtomato/HackerRank",
"id": "6c7f72fffa3daf2725ef08521a4f819f48800db4",
"size": "279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Algorithms/_02_Implementation/_43_Encryption/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42781"
},
{
"name": "Shell",
"bytes": "1075"
}
],
"symlink_target": ""
} |
import string
def prep_string(sentence):
#strip out non white space punctuation and replace with white space
for char in string.punctuation:
sentence = sentence.replace(char, " ")
return sentence.split()
def word_count(sample):
process_sample = prep_string(sample)
# print(process_sample)
count = {}
for word in process_sample:
# print(word)
if word in count.keys():
count[word] += 1
else:
count[word] = 1
return count
#test_sample = "one fish two fish red fish blue fish"
#print(word_count(test_sample))
| {
"content_hash": "0fd7525bbe4c790fc2ba1cd6b1f48e37",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 71,
"avg_line_length": 22.035714285714285,
"alnum_prop": 0.6110210696920584,
"repo_name": "developerQuinnZ/this_will_work",
"id": "115b9b49368d33242510bc70860e43cc176dcd19",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "student-work/DennisHemken/exercism/python/word-count/word_count.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1748"
},
{
"name": "Jupyter Notebook",
"bytes": "9569022"
},
{
"name": "Makefile",
"bytes": "6783"
},
{
"name": "PLpgSQL",
"bytes": "11955896"
},
{
"name": "Python",
"bytes": "290864"
},
{
"name": "Shell",
"bytes": "5873"
}
],
"symlink_target": ""
} |
import pkg_resources
__version__ = pkg_resources.get_distribution("kinto_remote_settings").version
def includeme(config):
config.include("kinto_remote_settings.changes")
config.include("kinto_remote_settings.signer")
| {
"content_hash": "131c2e45a44e77c8a8aa8bfde18a14af",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 77,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7554585152838428,
"repo_name": "mozilla-services/kinto-dist",
"id": "8c1dda1d66daafb39cbbc4a5847b946c5205f1fe",
"size": "229",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "kinto-remote-settings/src/kinto_remote_settings/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2392"
},
{
"name": "Makefile",
"bytes": "1749"
},
{
"name": "Python",
"bytes": "277732"
},
{
"name": "Shell",
"bytes": "2140"
}
],
"symlink_target": ""
} |
__author__ = 'thomros'
from ckan.lib.base import (BaseController, render)
class FgpController(BaseController):
def ramp_view(self):
return render('ramp.html') | {
"content_hash": "9c806d8c7768b9a15a719aab4f462f9e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 50,
"avg_line_length": 24.571428571428573,
"alnum_prop": 0.7034883720930233,
"repo_name": "open-data/ckanext-fgp",
"id": "5e8d83339b7fd35c16948d3deb2ca35eb28bad38",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ckanext/fgp/controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2309"
},
{
"name": "Shell",
"bytes": "224"
}
],
"symlink_target": ""
} |
"""Tests for the multi-processing base process."""
import unittest
from plaso.engine import configurations
from plaso.multi_processing import base_process
from tests import test_lib as shared_test_lib
class TestProcess(base_process.MultiProcessBaseProcess):
"""Implementation of the multi-processing base process for testing."""
def _GetStatus(self):
"""Returns status information.
Returns:
dict[str, object]: status attributes, indexed by name.
"""
# TODO: implement.
return {}
def _Main(self):
"""The process main loop.
This method is called when the process is ready to start. A sub class
should override this method to do the necessary actions in the main loop.
"""
# TODO: implement.
return
def SignalAbort(self):
"""Signals the process to abort."""
# TODO: implement.
return
class MultiProcessBaseProcessTest(shared_test_lib.BaseTestCase):
"""Tests the multi-processing base process."""
# pylint: disable=protected-access
def testInitialization(self):
"""Tests the initialization."""
configuration = configurations.ProcessingConfiguration()
test_process = TestProcess(configuration, name='TestBase')
self.assertIsNotNone(test_process)
# TODO: add test for name property.
# TODO: add test for _OnCriticalError.
# TODO: add test for _SigSegvHandler.
# TODO: add test for _SigTermHandler.
# TODO: add test for _StartProcessStatusRPCServer.
# TODO: add test for _StopProcessStatusRPCServer.
# TODO: add test for _WaitForStatusNotRunning.
# TODO: add test for run.
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ed605d71705f813889d380956937e5a5",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 77,
"avg_line_length": 26.83606557377049,
"alnum_prop": 0.7098350641417227,
"repo_name": "Onager/plaso",
"id": "6b788afa0daa5389ec92b2de876c5abc2a5b95d2",
"size": "1684",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/multi_processing/base_process.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1172"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1270"
},
{
"name": "Python",
"bytes": "4816953"
},
{
"name": "Shell",
"bytes": "22891"
}
],
"symlink_target": ""
} |
import logging
# RESTServer imports
from MiniREST.RESTServer import RESTServer, responseCodes, responseTypes
class RESTCommunicator(RESTServer):
"""RESTCommunicator - creates a new RESTCommunicator instance.
Extends RESTServer with custom functions.
"""
def __init__(self, rcubic, bind='0.0.0.0', port=8002, *args, **kwargs):
"""Create a RESTCommunicator. Call 'start' to start the server.
Keyword arguments:
bind -- the address to which the server binds (default '0.0.0.0')
port -- the port on which the server listens (default 8002)
portRange -- choose first available port to listen on
"""
super(RESTCommunicator, self).__init__(bind, port, *args, **kwargs)
self.registerFunction('progress', self._progress, token=True)
self.registerFunction('reclone', self._reclone, token=True)
self.registerFunction('cancel', self._cancel, token=True)
self.registerFunction('reschedule', self._reschedule, token=True)
self.registerFunction('manualOverride', self._manualOverride, token=True)
self.registerFunction('supported', self._supported, token=True)
self.features = ['progress', 'reclone', 'reschedule', 'manualOverride', 'cancel']
self.rcubic = rcubic
def _progress(self, env, start_response, post):
"""Reponds to a 'progress' request and calls rcubic._updateProgress(..)
Keyword arguments:
env -- expects a 'data' list TODO: paramaters
"""
logging.debug("Received Progress report for {0}: {1}".format(post['scriptName'], post['message']))
resp = self.rcubic.updateProgress(post['scriptName'], post['message'])
start_response(responseCodes[200], responseTypes['plaintext'])
return str(resp)
def _reclone(self, env, start_response, post):
"""Responds to a 'reclone' request and calls rcubic._initGit()
Keyword arguments:
env -- doesn't expect any paramaters
"""
logging.info("Received reclone request")
resp = self.rcubic._initGit()
start_response(responseCodes[200], responseTypes['plaintext'])
return str(resp)
def _reschedule(self, env, start_response, post):
"""Reponds to a 'reschedule' request and calls rcubic.reschedule(scriptName)
Keyword argument:
env -- expects a 'scriptName'
"""
scriptName = post['scriptName']
logging.info("Received reschedule request for {0}.".format(scriptName))
resp = self.rcubic.reschedule(scriptName)
start_response(responseCodes[200], responseTypes['plaintext'])
if not resp:
logging.warning("Reschedule request for {0} failed.".format(post['scriptName']))
return str(bool(resp))
def _manualOverride(self, env, start_response, post):
"""Responds to a 'manualOverride' request and calls rcubic.manualOverride(scriptName)
Keyword argument:
env -- expects a scriptName
"""
scriptName = post['scriptName']
logging.info("Received override request for {0}.".format(scriptName))
resp = self.rcubic.manualOverride(scriptName)
start_response(responseCodes[200], responseTypes['plaintext'])
if not resp:
logging.warning("Override request for {0} failed.".format(scriptName))
return str(bool(resp))
def _supported(self, env, start_response, post):
"""Responds to a requested asking if a feature is supported
Keyword argument:
env -- expects a 'feature'
"""
feature = post['feature']
start_response(responseCodes[200], responseTypes['plaintext'])
return str(feature in self.features)
def _cancel(self, env, start_response, post):
"""Responds to a 'cancel' request and calls rcubic.abort()
Keyword arguments:
env -- doesn't expect any paramaters
"""
logging.info("Received cancel request")
resp = self.rcubic.abort()
start_response(responseCodes[200], responseTypes['plaintext'])
return str(resp)
| {
"content_hash": "b69ac616449505bf4b0dda988d61929c",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 106,
"avg_line_length": 38.63551401869159,
"alnum_prop": 0.6473149492017417,
"repo_name": "amplify-education/rcubic",
"id": "ead0551e3570b71b1ff7d9789c9b0f6e9dd5dd1e",
"size": "5304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RCubic/RESTCommunicator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32625"
},
{
"name": "JavaScript",
"bytes": "89827"
},
{
"name": "Python",
"bytes": "153817"
},
{
"name": "Shell",
"bytes": "18857"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class HDInsightManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for HDInsightManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2021-06-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "TokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(HDInsightManagementClientConfiguration, self).__init__(**kwargs)
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", "2021-06-01")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-hdinsight/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| {
"content_hash": "60e58d2e9b428ec8738300e5c4b75d91",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 108,
"avg_line_length": 52.261538461538464,
"alnum_prop": 0.7218133647335885,
"repo_name": "Azure/azure-sdk-for-python",
"id": "fb8ddebc34b6cc8286cd49172c7f0f23e5b6f079",
"size": "3865",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/hdinsight/azure-mgmt-hdinsight/azure/mgmt/hdinsight/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from .util import readline_google_store
| {
"content_hash": "35d845fdc82b45c97a930629e5c823b2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 39,
"avg_line_length": 40,
"alnum_prop": 0.825,
"repo_name": "xtc283/Ngrams",
"id": "9a48636f17d4808a556abca515d3a1796bc660a6",
"size": "40",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12527"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='sane3d',
version='0.1',
description='A sane interface to mplot3d',
author='Johannes Kulick',
author_email='[email protected]',
url='http://github.com/hildensia/sane3d',
install_requires=['matplotlib'],
packages=['sane3d']
)
| {
"content_hash": "a79b568d39c8fc0b32782ec6f8a13ecf",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 48,
"avg_line_length": 28.727272727272727,
"alnum_prop": 0.6518987341772152,
"repo_name": "hildensia/sane3d",
"id": "a07cbe137287cb7f718ff8d8d8d775fe04f2441b",
"size": "339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3515"
}
],
"symlink_target": ""
} |
import abc
import subprocess
import json
import logging
#
# This is a base command class.
# Subclasses can override the exec_impl method for handling messages.
# Messages responses are automatically sent and the base class can
# do some error handling.
#
class Command(object):
__metaclass__ = abc.ABCMeta
# logging.basicConfig(level=logging.DEBUG)
#
# Create a new Command base class.
#
def __init__(self, flow, cmd_name, params):
self.flow = flow
self.cmd_name = cmd_name
self.params = params
self.response = None
#
# Clients call this to perform some operation
#
def exec_cmd(self):
try:
self.exec_impl()
except subprocess.CalledProcessError as e:
#
# Failure path
#
logging.debug("Error executing: %s %s" % (e.cmd, e.output));
if self.flow is not None:
self.flow.send_message(self.cmd_name + "_response",
{ 'success': False,
'message': 'Error executing command: %s' % (e.output) })
return
#
# Success path
#
logging.debug("Sending response: %s" % (self.response))
if self.flow is not None:
self.flow.send_message(self.cmd_name + "_response", self.response)
self.post_exec()
#
# Helper to execute subprocess commands
#
def shell_helper(self, cmd_arr):
output = subprocess.check_output(cmd_arr, stderr=subprocess.STDOUT)
return output
#
# Get the response object
#
def get_response(self):
return self.response
#
# Override this to perform some subclass specific operation
#
@abc.abstractmethod
def exec_impl(self):
""" Subclasses implement this method to perform specific operations """
return
#
# Override this to perform some subclass specific operation after
# exec_impl is called and response is sent.
#
def post_exec(self):
return
| {
"content_hash": "534e5595a17476b5cf46f7ef8fdb22ff",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 80,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.5838122605363985,
"repo_name": "manylabs/flow",
"id": "c7059a1b10962967548ffdbc8bacdf039eca9e43",
"size": "2088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flow/commands/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92403"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
} |
import fnmatch
import os
import jsonpath_rw
from oslo_config import cfg
from oslo_utils import timeutils
import six
import yaml
from ceilometer.event.storage import models
from ceilometer.i18n import _
from ceilometer.openstack.common import log
OPTS = [
cfg.StrOpt('definitions_cfg_file',
default="event_definitions.yaml",
help="Configuration file for event definitions."
),
cfg.BoolOpt('drop_unmatched_notifications',
default=False,
help='Drop notifications if no event definition matches. '
'(Otherwise, we convert them with just the default traits)'),
]
cfg.CONF.register_opts(OPTS, group='event')
LOG = log.getLogger(__name__)
class EventDefinitionException(Exception):
def __init__(self, message, definition_cfg):
super(EventDefinitionException, self).__init__(message)
self.definition_cfg = definition_cfg
def __str__(self):
return '%s %s: %s' % (self.__class__.__name__,
self.definition_cfg, self.message)
class TraitDefinition(object):
def __init__(self, name, trait_cfg, plugin_manager):
self.cfg = trait_cfg
self.name = name
type_name = trait_cfg.get('type', 'text')
if 'plugin' in trait_cfg:
plugin_cfg = trait_cfg['plugin']
if isinstance(plugin_cfg, six.string_types):
plugin_name = plugin_cfg
plugin_params = {}
else:
try:
plugin_name = plugin_cfg['name']
except KeyError:
raise EventDefinitionException(
_('Plugin specified, but no plugin name supplied for '
'trait %s') % name, self.cfg)
plugin_params = plugin_cfg.get('parameters')
if plugin_params is None:
plugin_params = {}
try:
plugin_ext = plugin_manager[plugin_name]
except KeyError:
raise EventDefinitionException(
_('No plugin named %(plugin)s available for '
'trait %(trait)s') % dict(plugin=plugin_name,
trait=name), self.cfg)
plugin_class = plugin_ext.plugin
self.plugin = plugin_class(**plugin_params)
else:
self.plugin = None
if 'fields' not in trait_cfg:
raise EventDefinitionException(
_("Required field in trait definition not specified: "
"'%s'") % 'fields',
self.cfg)
fields = trait_cfg['fields']
if not isinstance(fields, six.string_types):
# NOTE(mdragon): if not a string, we assume a list.
if len(fields) == 1:
fields = fields[0]
else:
fields = '|'.join('(%s)' % path for path in fields)
try:
self.fields = jsonpath_rw.parse(fields)
except Exception as e:
raise EventDefinitionException(
_("Parse error in JSONPath specification "
"'%(jsonpath)s' for %(trait)s: %(err)s")
% dict(jsonpath=fields, trait=name, err=e), self.cfg)
self.trait_type = models.Trait.get_type_by_name(type_name)
if self.trait_type is None:
raise EventDefinitionException(
_("Invalid trait type '%(type)s' for trait %(trait)s")
% dict(type=type_name, trait=name), self.cfg)
def _get_path(self, match):
if match.context is not None:
for path_element in self._get_path(match.context):
yield path_element
yield str(match.path)
def to_trait(self, notification_body):
values = [match for match in self.fields.find(notification_body)
if match.value is not None]
if self.plugin is not None:
value_map = [('.'.join(self._get_path(match)), match.value) for
match in values]
value = self.plugin.trait_value(value_map)
else:
value = values[0].value if values else None
if value is None:
return None
# NOTE(mdragon): some openstack projects (mostly Nova) emit ''
# for null fields for things like dates.
if self.trait_type != models.Trait.TEXT_TYPE and value == '':
return None
value = models.Trait.convert_value(self.trait_type, value)
return models.Trait(self.name, self.trait_type, value)
class EventDefinition(object):
DEFAULT_TRAITS = dict(
service=dict(type='text', fields='publisher_id'),
request_id=dict(type='text', fields='_context_request_id'),
tenant_id=dict(type='text', fields=['payload.tenant_id',
'_context_tenant']),
)
def __init__(self, definition_cfg, trait_plugin_mgr):
self._included_types = []
self._excluded_types = []
self.traits = dict()
self.cfg = definition_cfg
try:
event_type = definition_cfg['event_type']
traits = definition_cfg['traits']
except KeyError as err:
raise EventDefinitionException(
_("Required field %s not specified") % err.args[0], self.cfg)
if isinstance(event_type, six.string_types):
event_type = [event_type]
for t in event_type:
if t.startswith('!'):
self._excluded_types.append(t[1:])
else:
self._included_types.append(t)
if self._excluded_types and not self._included_types:
self._included_types.append('*')
for trait_name in self.DEFAULT_TRAITS:
self.traits[trait_name] = TraitDefinition(
trait_name,
self.DEFAULT_TRAITS[trait_name],
trait_plugin_mgr)
for trait_name in traits:
self.traits[trait_name] = TraitDefinition(
trait_name,
traits[trait_name],
trait_plugin_mgr)
def included_type(self, event_type):
for t in self._included_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def excluded_type(self, event_type):
for t in self._excluded_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def match_type(self, event_type):
return (self.included_type(event_type)
and not self.excluded_type(event_type))
@property
def is_catchall(self):
return '*' in self._included_types and not self._excluded_types
@staticmethod
def _extract_when(body):
"""Extract the generated datetime from the notification."""
# NOTE: I am keeping the logic the same as it was in the collector,
# However, *ALL* notifications should have a 'timestamp' field, it's
# part of the notification envelope spec. If this was put here because
# some openstack project is generating notifications without a
# timestamp, then that needs to be filed as a bug with the offending
# project (mdragon)
when = body.get('timestamp', body.get('_context_timestamp'))
if when:
return timeutils.normalize_time(timeutils.parse_isotime(when))
return timeutils.utcnow()
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
when = self._extract_when(notification_body)
traits = (self.traits[t].to_trait(notification_body)
for t in self.traits)
# Only accept non-None value traits ...
traits = [trait for trait in traits if trait is not None]
event = models.Event(message_id, event_type, when, traits)
return event
class NotificationEventsConverter(object):
"""Notification Event Converter
The NotificationEventsConverter handles the conversion of Notifications
from openstack systems into Ceilometer Events.
The conversion is handled according to event definitions in a config file.
The config is a list of event definitions. Order is significant, a
notification will be processed according to the LAST definition that
matches it's event_type. (We use the last matching definition because that
allows you to use YAML merge syntax in the definitions file.)
Each definition is a dictionary with the following keys (all are
required):
- event_type: this is a list of notification event_types this definition
will handle. These can be wildcarded with unix shell glob (not regex!)
wildcards.
An exclusion listing (starting with a '!') will exclude any types listed
from matching. If ONLY exclusions are listed, the definition will match
anything not matching the exclusions.
This item can also be a string, which will be taken as equivalent to 1
item list.
Examples:
* ['compute.instance.exists'] will only match
compute.intance.exists notifications
* "compute.instance.exists" Same as above.
* ["image.create", "image.delete"] will match
image.create and image.delete, but not anything else.
* "compute.instance.*" will match
compute.instance.create.start but not image.upload
* ['*.start','*.end', '!scheduler.*'] will match
compute.instance.create.start, and image.delete.end,
but NOT compute.instance.exists or
scheduler.run_instance.start
* '!image.*' matches any notification except image
notifications.
* ['*', '!image.*'] same as above.
- traits: (dict) The keys are trait names, the values are the trait
definitions. Each trait definition is a dictionary with the following
keys:
- type (optional): The data type for this trait. (as a string)
Valid options are: 'text', 'int', 'float' and 'datetime', defaults to
'text' if not specified.
- fields: a path specification for the field(s) in the notification you
wish to extract. The paths can be specified with a dot syntax
(e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is
also supported.
In either case, if the key for the field you are looking for contains
special characters, like '.', it will need to be quoted (with double
or single quotes) like so::
"payload.image_meta.'org.openstack__1__architecture'"
The syntax used for the field specification is a variant of JSONPath,
and is fairly flexible.
(see: https://github.com/kennknowles/python-jsonpath-rw for more info)
Specifications can be written to match multiple possible fields, the
value for the trait will be derived from the matching fields that
exist and have a non-null (i.e. is not None) values in the
notification.
By default the value will be the first such field. (plugins can alter
that, if they wish)
This configuration value is normally a string, for convenience, it can
be specified as a list of specifications, which will be OR'ed together
(a union query in jsonpath terms)
- plugin (optional): (dictionary) with the following keys:
- name: (string) name of a plugin to load
- parameters: (optional) Dictionary of keyword args to pass
to the plugin on initialization. See documentation on each plugin to
see what arguments it accepts.
For convenience, this value can also be specified as a string, which is
interpreted as a plugin name, which will be loaded with no parameters.
"""
def __init__(self, events_config, trait_plugin_mgr, add_catchall=True):
self.definitions = [
EventDefinition(event_def, trait_plugin_mgr)
for event_def in reversed(events_config)]
if add_catchall and not any(d.is_catchall for d in self.definitions):
event_def = dict(event_type='*', traits={})
self.definitions.append(EventDefinition(event_def,
trait_plugin_mgr))
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
edef = None
for d in self.definitions:
if d.match_type(event_type):
edef = d
break
if edef is None:
msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)')
% dict(type=event_type, msgid=message_id))
if cfg.CONF.event.drop_unmatched_notifications:
LOG.debug(msg)
else:
# If drop_unmatched_notifications is False, this should
# never happen. (mdragon)
LOG.error(msg)
return None
return edef.to_event(notification_body)
def get_config_file():
config_file = cfg.CONF.event.definitions_cfg_file
if not os.path.exists(config_file):
config_file = cfg.CONF.find_file(config_file)
return config_file
def setup_events(trait_plugin_mgr):
"""Setup the event definitions from yaml config file."""
config_file = get_config_file()
if config_file is not None:
LOG.debug(_("Event Definitions configuration file: %s"), config_file)
with open(config_file) as cf:
config = cf.read()
try:
events_config = yaml.safe_load(config)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = (_("Invalid YAML syntax in Event Definitions file "
"%(file)s at line: %(line)s, column: %(column)s.")
% dict(file=config_file,
line=mark.line + 1,
column=mark.column + 1))
else:
errmsg = (_("YAML error reading Event Definitions file "
"%(file)s")
% dict(file=config_file))
LOG.error(errmsg)
raise
else:
LOG.debug(_("No Event Definitions configuration file found!"
" Using default config."))
events_config = []
LOG.info(_("Event Definitions: %s"), events_config)
allow_drop = cfg.CONF.event.drop_unmatched_notifications
return NotificationEventsConverter(events_config,
trait_plugin_mgr,
add_catchall=not allow_drop)
| {
"content_hash": "8ca7e4634e608ac4da730ddc28b564be",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 78,
"avg_line_length": 39.29842931937173,
"alnum_prop": 0.5839994670929922,
"repo_name": "Juniper/ceilometer",
"id": "4ba177e3eefc010480bcd8586b26ae4cf5c2c53e",
"size": "15598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/event/converter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "6027"
},
{
"name": "Python",
"bytes": "2857750"
},
{
"name": "Shell",
"bytes": "4136"
}
],
"symlink_target": ""
} |
import json
import pyrax
pyrax.set_environment("scale12x")
pyrax.keyring_auth()
cf = pyrax.cloudfiles
try:
cf.delete_container("python-demo", del_objects=True)
print "Container deleted."
except pyrax.exceptions.NoSuchContainer:
print "Container does not exist."
| {
"content_hash": "87288f21f48014853f34056e77528f02",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 23,
"alnum_prop": 0.7536231884057971,
"repo_name": "rackerlabs/sdk_workshops",
"id": "39de3156ee64da8f51e9257546ad7933182ad8e4",
"size": "322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/files/cleanup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "86528"
},
{
"name": "PHP",
"bytes": "6033"
},
{
"name": "Python",
"bytes": "12094"
}
],
"symlink_target": ""
} |
"""
Module to hold application settings.
"""
import os
from time import strftime
#: Width of cells in pixels
CELL_WIDTH = 32
#: Height of cells in pixels
CELL_HEIGHT = 32
#: Max draw FPS of the simulation
MAX_FPS = 60
#: Time per simulation step in miliseconds (a lower step time results in a faster simulation, 0 = equal to draw speed)
SIMULATION_STEP_TIME = 50
#: Port at which the internal web-server listens
WEB_LISTEN_PORT = 8080
# Do not edit below this line.
ROOT_DIR = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
AGENT_DIR = os.path.join(ROOT_DIR, "_agents")
WORLD_DIR = os.path.join(ROOT_DIR, "_worlds")
EXPERIMENT_DIR = os.path.join(ROOT_DIR, "_experiments")
RESULTS_DIR = os.path.join(ROOT_DIR, "_results", strftime("%Y%m%dT%H%M%S"))
SIMULATIONS_RENDERS_DIR = os.path.join(ROOT_DIR, "_renders")
SIMULATION_RENDERS_DIR = os.path.join(SIMULATIONS_RENDERS_DIR, strftime("%Y%m%dT%H%M%S"))
WEBROOT_DIR = os.path.join(ROOT_DIR, "webroot")
WEBROOT_DATA_DIR = os.path.join(WEBROOT_DIR, "data")
| {
"content_hash": "68c19df78c0a90354842355f9e4b8394",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 118,
"avg_line_length": 34.733333333333334,
"alnum_prop": 0.7130518234165067,
"repo_name": "Beskhue/enactive-agents",
"id": "e28a0b336a16a37ff00446108f28774391858d85",
"size": "1042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enactiveagents/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1016"
},
{
"name": "HTML",
"bytes": "760"
},
{
"name": "JavaScript",
"bytes": "2678"
},
{
"name": "Python",
"bytes": "148456"
}
],
"symlink_target": ""
} |
"""
Cuteness Delivery System.
This program requests the top links from various subreddits of cute animals
and email them to participants.
"""
from __future__ import unicode_literals, absolute_import
import re
import sys
import praw
import django
import calendar
import requests
from pytz import timezone
from bs4 import BeautifulSoup
from operator import attrgetter
from datetime import date, datetime
from constants import LIMIT, EMAIL
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
django.setup()
from subscribers.models import Subscriber
USER_AGENT = 'python:deliver_cute:v1.0 (by /u/____OOOO____)'
TXT_CONTENT = 'Plain text message.'
EMAIL_SUBJECT_TEMPLATE = '{debug}Cute Pics for {d}, {m} {i} {y}'
FROM_NAME = 'Deliver Cute'
PIC_WIDTH = '400'
YT_PAT = re.compile(r'.*(youtu\.be|youtube\.com).*')
SRC_PAT = re.compile(r'http(s)?://i\.(imgur|reddituploads|redd).*\.[a-z]{3,4}')
def main(debug=False):
"""Gather then email top cute links."""
subscribers = subscribers_for_now(debug)
if not subscribers:
print('0 subscribers want cute delivered at {}'.format(get_now_hour()))
return 0
subject = get_email_subject(debug)
reddit = praw.Reddit(user_agent=USER_AGENT)
sent_count = 0
post_map = {}
found_posts = set()
for subscriber in subscribers:
posts_to_send = []
for name in subscriber.subreddit_names():
try:
posts_to_send.extend(post_map[name])
except KeyError:
posts = get_posts_from_reddit(reddit, name, LIMIT)
posts = fix_image_links(posts)
posts = dedupe_posts(posts, found_posts)
posts = list(posts)
post_map[name] = posts
posts_to_send.extend(posts)
posts_to_send = sort_posts(posts_to_send)
posts_to_send = htmlize_posts(posts_to_send)
body = get_email_body(subscriber, posts_to_send)
sent_count += send_email(subject, subscriber, body)
print('Email sent to {}...'.format(subscriber.email))
return sent_count
def get_now_hour():
"""Return an integer of the current hour in Pacific Standard Time."""
now = datetime.now(tz=timezone('US/Pacific'))
return now.hour
def subscribers_for_now(debug):
"""Collect subscribers with send_hour set to current time."""
if debug:
return Subscriber.objects.filter(email=EMAIL)
return Subscriber.objects.filter(send_hour=get_now_hour())
def get_posts_from_reddit(reddit, subreddit_name, limit):
"""Get subreddit names from given subreddit name."""
subreddit = reddit.get_subreddit(subreddit_name)
return subreddit.get_top_from_day(limit=limit)
def get_relevant_posts(post_map, subscriber):
"""Filter only those posts selected by the current subscriber."""
for subreddit_name in subscriber.subreddit_names():
for post in post_map[subreddit_name]:
yield post
def dedupe_posts(posts, found_posts):
"""Generate posts where duplicates have been removed by comparing url."""
for post in posts:
if post.url not in found_posts:
yield post
found_posts.add(post.url)
else:
print('Omitting duplicate {}'.format(post.url))
def sort_posts(posts):
"""Generate posts sorted by their upvote count."""
for post in sorted(posts, key=attrgetter('score'), reverse=True):
yield post
def fix_image_links(posts):
"""Make sure that each imgur link is directly to the content."""
for post in posts:
link = post.url
if YT_PAT.match(link):
continue
# Temporary measure until able to display gifv and gyfcat properly
if link.endswith('gifv') or link.endswith('mp4') or 'gfycat' in link:
continue
if not SRC_PAT.match(link):
try:
link = find_source_link(link)
except AttributeError as e:
print('Error trying to get img src at {}: {}'.format(link, e))
continue
link = re.sub(r'^//', 'http://', link)
post.url = link
yield post
def find_source_link(link):
"""Scrape the direct source link from imgur or other website."""
# Currently only works for imgur
response = requests.get(link)
html = BeautifulSoup(response.text, 'html.parser')
div = html.find('div', class_='post-image')
img = div.find('img')
return img.attrs['src']
def htmlize_posts(posts):
"""Generate each link as an html-ized image element."""
for post in posts:
subreddit = post.subreddit.display_name
context = {
'subreddit': subreddit,
'subreddit_name': '/r/' + subreddit,
'subreddit_url': 'https://www.reddit.com/r/' + subreddit,
'title': post.title,
'url': post.url,
'permalink': post.permalink,
'width': PIC_WIDTH,
}
yield render_to_string('image.html', context=context)
def get_email_body(subscriber, posts):
"""Format posts into HTML."""
context = {
'posts': posts,
'subscriber': subscriber,
'site_url': settings.SITE_URL,
}
return render_to_string('daily_email.html', context=context)
def get_email_subject(debug):
"""Format today's date into the email subject."""
today = date.today()
day_name = calendar.day_name[today.weekday()]
month_name = calendar.month_name[today.month]
return EMAIL_SUBJECT_TEMPLATE.format(
debug='DEBUG ' * debug,
d=day_name,
m=month_name,
i=today.day,
y=today.year,
)
def send_email(subject, subscriber, body):
"""Return number of emails sent using django mail with project specs."""
print('Sending email to {}...'.format(subscriber.email))
return send_mail(
subject,
TXT_CONTENT,
EMAIL,
[subscriber.email],
html_message=body,
fail_silently=False,
)
if __name__ == '__main__':
try:
debug = bool(sys.argv[1])
except IndexError:
debug = False
print('Debug is {}'.format(debug))
print('{} email sent.'.format(main(debug)))
| {
"content_hash": "b938ad6dcdf3e8708748db5c07fc01a8",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 79,
"avg_line_length": 30.40776699029126,
"alnum_prop": 0.6256385696040868,
"repo_name": "WillWeatherford/deliver-cute",
"id": "727fb766226b105566d1d1e5469343df80ea35af",
"size": "6282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "on_schedule.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1353"
},
{
"name": "Python",
"bytes": "41284"
}
],
"symlink_target": ""
} |
import os
from unittest import mock
import tempfile
from cinderclient import exceptions as cinder_exception
import eventlet
from oslo_concurrency import processutils
from oslo_serialization import jsonutils
from ec2api.api import image as image_api
from ec2api import exception
from ec2api.tests.unit import base
from ec2api.tests.unit import fakes
from ec2api.tests.unit import matchers
from ec2api.tests.unit import tools
AMI_MANIFEST_XML = """<?xml version="1.0" ?>
<manifest>
<version>2011-06-17</version>
<bundler>
<name>test-s3</name>
<version>0</version>
<release>0</release>
</bundler>
<machine_configuration>
<architecture>x86_64</architecture>
<block_device_mapping>
<mapping>
<virtual>ami</virtual>
<device>sda1</device>
</mapping>
<mapping>
<virtual>root</virtual>
<device>/dev/sda1</device>
</mapping>
<mapping>
<virtual>ephemeral0</virtual>
<device>sda2</device>
</mapping>
<mapping>
<virtual>swap</virtual>
<device>sda3</device>
</mapping>
</block_device_mapping>
<kernel_id>%(aki-id)s</kernel_id>
<ramdisk_id>%(ari-id)s</ramdisk_id>
</machine_configuration>

</manifest>
""" % {'aki-id': fakes.ID_EC2_IMAGE_AKI_1,
'ari-id': fakes.ID_EC2_IMAGE_ARI_1}
FILE_MANIFEST_XML = """<?xml version="1.0" ?>
<manifest>

</manifest>
"""
class ImageTestCase(base.ApiTestCase):
@mock.patch('ec2api.api.instance._is_ebs_instance')
def _test_create_image(self, instance_status, no_reboot, is_ebs_instance):
self.set_mock_db_items(fakes.DB_INSTANCE_2)
os_instance = mock.MagicMock()
os_instance.configure_mock(id=fakes.ID_OS_INSTANCE_2,
status=instance_status)
stop_called = iter([False, True])
os_instance.stop.side_effect = lambda: next(stop_called)
os_instance.get.side_effect = lambda: (setattr(os_instance, 'status',
'SHUTOFF')
if next(stop_called) else None)
image_id = fakes.random_ec2_id('ami')
os_image_id = fakes.random_os_id()
os_instance.create_image.return_value = os_image_id
self.glance.images.get.return_value = fakes.OSImage(
{'id': os_image_id},
from_get=True)
self.nova.servers.get.return_value = os_instance
is_ebs_instance.return_value = True
self.db_api.add_item.side_effect = tools.get_db_api_add_item(image_id)
resp = self.execute('CreateImage',
{'InstanceId': fakes.ID_EC2_INSTANCE_2,
'Name': 'fake_name',
'Description': 'fake desc',
'NoReboot': str(no_reboot)})
self.assertEqual({'imageId': image_id},
resp)
self.db_api.get_item_by_id.assert_called_once_with(
mock.ANY, fakes.ID_EC2_INSTANCE_2)
self.nova.servers.get.assert_called_once_with(fakes.ID_OS_INSTANCE_2)
is_ebs_instance.assert_called_once_with(mock.ANY, os_instance.id)
expected_image = {'is_public': False,
'description': 'fake desc'}
if no_reboot:
expected_image['os_id'] = os_image_id
self.db_api.add_item.assert_called_once_with(
mock.ANY, 'ami', expected_image)
if not no_reboot:
eventlet.sleep()
if not no_reboot:
os_instance.stop.assert_called_once_with()
os_instance.get.assert_called_once_with()
os_instance.start.assert_called_once_with()
if no_reboot:
os_instance.create_image.assert_called_once_with('fake_name')
else:
os_instance.create_image.assert_called_once_with(
'fake_name', metadata={'ec2_id': image_id})
self.db_api.update_item.assert_called_once_with(
mock.ANY, {'id': image_id,
'is_public': False,
'description': 'fake desc',
'os_id': os_image_id,
'vpc_id': None})
self.db_api.reset_mock()
self.nova.servers.reset_mock()
def test_create_image(self):
self._test_create_image('ACTIVE', False)
self._test_create_image('SHUTOFF', True)
@mock.patch('ec2api.api.instance._is_ebs_instance')
def test_register_image_by_url(self, is_ebs_instance):
self.set_mock_db_items(fakes.DB_INSTANCE_2)
is_ebs_instance.return_value = True
# Setup the mock parameters
image_id = fakes.random_ec2_id('ami')
os_image_id = fakes.random_os_id()
self.glance.images.create.return_value = fakes.OSImage(
{'id': os_image_id},
from_get=True)
self.db_api.add_item.side_effect = tools.get_db_api_add_item(image_id)
# Setup Import Command
import_command = 'RegisterImage'
# Setup the import arguments
args = {
'Name': 'TestImage123',
'ImageLocation':
fakes.LOCATION_IMAGE_2,
'Architecture': 'x86_64'
}
# Execute the import image process
resp = self.execute(import_command, args)
# Assert that the image returned is equal to what was expected
self.assertEqual({'imageId': image_id}, resp)
# Assert that Glance Image Create was called
self.glance.images.create.assert_called_once_with(
name='TestImage123',
disk_format='raw',
container_format='bare',
visibility='private',
architecture='x86_64',
image_location=fakes.LOCATION_IMAGE_2)
# Assert that Glance Image Import was called
self.glance.images.image_import.assert_called_once_with(
os_image_id,
method='web-download',
uri=fakes.LOCATION_IMAGE_2)
# Assert that the image was created
expected_image = {'is_public': False,
'os_id': mock.ANY,
'description': None}
self.db_api.add_item.assert_called_once_with(
mock.ANY, 'ami', expected_image)
# Reset all test settings/state
self.db_api.reset_mock()
self.glance.reset_mock()
@mock.patch('ec2api.api.instance._is_ebs_instance')
def test_create_image_invalid_parameters(self, is_ebs_instance):
self.set_mock_db_items(fakes.DB_INSTANCE_1)
is_ebs_instance.return_value = False
self.assert_execution_error('InvalidParameterValue', 'CreateImage',
{'InstanceId': fakes.ID_EC2_INSTANCE_1,
'Name': 'fake_name'})
@mock.patch('ec2api.api.image._s3_create')
def test_register_image_by_s3(self, s3_create):
s3_create.return_value = fakes.OSImage(fakes.OS_IMAGE_1)
self.db_api.add_item.side_effect = (
tools.get_db_api_add_item(fakes.ID_EC2_IMAGE_1))
resp = self.execute(
'RegisterImage',
{'ImageLocation': fakes.LOCATION_IMAGE_1})
self.assertThat(resp, matchers.DictMatches(
{'imageId': fakes.ID_EC2_IMAGE_1}))
s3_create.assert_called_once_with(
mock.ANY,
{'name': fakes.LOCATION_IMAGE_1,
'image_location': fakes.LOCATION_IMAGE_1})
s3_create.reset_mock()
resp = self.execute(
'RegisterImage',
{'ImageLocation': fakes.LOCATION_IMAGE_1,
'Name': 'an image name'})
self.assertThat(resp, matchers.DictMatches(
{'imageId': fakes.ID_EC2_IMAGE_1}))
s3_create.assert_called_once_with(
mock.ANY,
{'name': 'an image name',
'image_location': fakes.LOCATION_IMAGE_1})
@mock.patch('ec2api.api.ec2utils.get_os_image')
def test_register_image_by_bdm(self, get_os_image):
self.glance.images.create.return_value = (
fakes.OSImage(fakes.OS_IMAGE_2))
self.glance.images.upload.return_value = (
fakes.OSImage(fakes.OS_IMAGE_2))
self.cinder.volume_snapshots.get.side_effect = (
tools.get_by_1st_arg_getter(
{fakes.ID_OS_SNAPSHOT_1: (
fakes.OSSnapshot(fakes.OS_SNAPSHOT_1))},
notfound_exception=cinder_exception.NotFound(404)))
self.db_api.add_item.side_effect = (
tools.get_db_api_add_item(fakes.ID_EC2_IMAGE_2))
self.set_mock_db_items(fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2,
fakes.DB_IMAGE_AKI_1, fakes.DB_IMAGE_ARI_1)
get_os_image.side_effect = [fakes.OSImage(fakes.OS_IMAGE_AKI_1),
fakes.OSImage(fakes.OS_IMAGE_ARI_1)]
resp = self.execute(
'RegisterImage',
{'RootDeviceName': fakes.ROOT_DEVICE_NAME_IMAGE_2,
'Name': 'fake_name',
'KernelId': fakes.ID_EC2_IMAGE_AKI_1,
'RamdiskId': fakes.ID_EC2_IMAGE_ARI_1,
'BlockDeviceMapping.1.DeviceName': fakes.ROOT_DEVICE_NAME_IMAGE_2,
'BlockDeviceMapping.1.Ebs.SnapshotId': fakes.ID_EC2_SNAPSHOT_1,
'BlockDeviceMapping.2.DeviceName': '/dev/vdf',
'BlockDeviceMapping.2.Ebs.VolumeSize': '100',
'BlockDeviceMapping.2.Ebs.DeleteOnTermination': 'False',
'BlockDeviceMapping.3.DeviceName': '/dev/vdg',
'BlockDeviceMapping.3.Ebs.SnapshotId': fakes.ID_EC2_SNAPSHOT_1,
'BlockDeviceMapping.3.Ebs.VolumeSize': '55',
'BlockDeviceMapping.3.Ebs.DeleteOnTermination': 'True',
'BlockDeviceMapping.4.DeviceName': '/dev/vdh',
'BlockDeviceMapping.4.Ebs.SnapshotId': fakes.ID_EC2_SNAPSHOT_2})
self.assertThat(resp, matchers.DictMatches(
{'imageId': fakes.ID_EC2_IMAGE_2}))
self.db_api.add_item.assert_called_once_with(
mock.ANY, 'ami', {'os_id': fakes.ID_OS_IMAGE_2,
'is_public': False,
'description': None})
self.assertEqual(1, self.glance.images.create.call_count)
self.assertEqual((), self.glance.images.create.call_args[0])
self.assertIsInstance(
self.glance.images.create.call_args[1], dict)
bdm = self.glance.images.create.call_args[1].pop(
'block_device_mapping', 'null')
self.assertEqual(
{'visibility': 'private',
'name': 'fake_name',
'kernel_id': fakes.ID_OS_IMAGE_AKI_1,
'ramdisk_id': fakes.ID_OS_IMAGE_ARI_1,
'root_device_name': fakes.ROOT_DEVICE_NAME_IMAGE_2,
'container_format': 'bare',
'disk_format': 'raw',
'bdm_v2': 'True'},
self.glance.images.create.call_args[1])
self.assertEqual([{'boot_index': 0,
'delete_on_termination': True,
'destination_type': 'volume',
'device_name': fakes.ROOT_DEVICE_NAME_IMAGE_2,
'source_type': 'snapshot',
'snapshot_id': fakes.ID_OS_SNAPSHOT_1,
'volume_size': 1},
{'boot_index': -1,
'delete_on_termination': False,
'destination_type': 'volume',
'device_name': '/dev/vdf',
'source_type': 'blank',
'volume_size': 100},
{'boot_index': -1,
'delete_on_termination': True,
'destination_type': 'volume',
'device_name': '/dev/vdg',
'source_type': 'snapshot',
'snapshot_id': fakes.ID_OS_SNAPSHOT_1,
'volume_size': 55},
{'boot_index': -1,
'delete_on_termination': True,
'destination_type': 'volume',
'device_name': '/dev/vdh',
'source_type': 'snapshot',
'snapshot_id': fakes.ID_OS_SNAPSHOT_2}],
jsonutils.loads(bdm))
get_os_image.assert_has_calls(
[mock.call(mock.ANY, fakes.ID_EC2_IMAGE_AKI_1),
mock.call(mock.ANY, fakes.ID_EC2_IMAGE_ARI_1)])
self.cinder.volume_snapshots.get.assert_any_call(
fakes.ID_OS_SNAPSHOT_1)
def test_register_image_invalid_parameters(self):
self.assert_execution_error(
'InvalidParameterCombination', 'RegisterImage', {})
def test_deregister_image(self):
self._setup_model()
# normal flow
resp = self.execute('DeregisterImage',
{'ImageId': fakes.ID_EC2_IMAGE_1})
self.assertThat(resp, matchers.DictMatches({'return': True}))
self.db_api.delete_item.assert_called_once_with(
mock.ANY, fakes.ID_EC2_IMAGE_1)
self.glance.images.delete.assert_called_once_with(
fakes.ID_OS_IMAGE_1)
# deregister image which failed on asynchronously creation
self.glance.reset_mock()
image_id = fakes.random_ec2_id('ami')
self.add_mock_db_items({'id': image_id,
'os_id': None,
'state': 'failed'})
resp = self.execute('DeregisterImage',
{'ImageId': image_id})
self.assertThat(resp, matchers.DictMatches({'return': True}))
self.db_api.delete_item.assert_called_with(mock.ANY, image_id)
self.assertFalse(self.glance.images.delete.called)
def test_deregister_image_invalid_parameters(self):
self._setup_model()
self.assert_execution_error('InvalidAMIID.NotFound', 'DeregisterImage',
{'ImageId': fakes.random_ec2_id('ami')})
# deregister asynchronously creating image
image_id = fakes.random_ec2_id('ami')
self.add_mock_db_items({'id': image_id,
'os_id': None})
self.assert_execution_error('IncorrectState',
'DeregisterImage',
{'ImageId': image_id})
def test_describe_images(self):
self._setup_model()
resp = self.execute('DescribeImages', {})
self.assertThat(
resp,
matchers.DictMatches(
{'imagesSet': [fakes.EC2_IMAGE_1, fakes.EC2_IMAGE_2]},
orderless_lists=True),
verbose=True)
self.db_api.get_items.assert_any_call(mock.ANY, 'ami')
self.db_api.get_items.assert_any_call(mock.ANY, 'aki')
self.db_api.get_items.assert_any_call(mock.ANY, 'ari')
self.db_api.get_items_by_ids = tools.CopyingMock(
side_effect=self.db_api.get_items_by_ids.side_effect)
resp = self.execute('DescribeImages',
{'ImageId.1': fakes.ID_EC2_IMAGE_1})
self.assertThat(resp,
matchers.DictMatches(
{'imagesSet': [fakes.EC2_IMAGE_1]},
orderless_lists=True))
self.db_api.get_items_by_ids.assert_any_call(
mock.ANY, set([fakes.ID_EC2_IMAGE_1]))
self.check_filtering(
'DescribeImages', 'imagesSet',
[('architecture', 'x86_64'),
('block-device-mapping.device-name', '/dev/sdb2'),
('block-device-mapping.snapshot-id', fakes.ID_EC2_SNAPSHOT_1),
('block-device-mapping.volume-size', 22),
('description', 'fake desc'),
('image-id', fakes.ID_EC2_IMAGE_1),
('image-type', 'machine'),
('is-public', True),
('kernel_id', fakes.ID_EC2_IMAGE_AKI_1,),
('name', 'fake_name'),
('owner-id', fakes.ID_OS_PROJECT),
('ramdisk-id', fakes.ID_EC2_IMAGE_ARI_1),
('root-device-name', fakes.ROOT_DEVICE_NAME_IMAGE_1),
('root-device-type', 'instance-store'),
('state', 'available')])
self.check_tag_support(
'DescribeImages', 'imagesSet',
fakes.ID_EC2_IMAGE_1, 'imageId',
('ami', 'ari', 'aki'))
def test_describe_images_invalid_parameters(self):
self._setup_model()
self.assert_execution_error('InvalidAMIID.NotFound', 'DescribeImages',
{'ImageId.1': fakes.random_ec2_id('ami')})
self.glance.images.list.side_effect = lambda: []
self.assert_execution_error('InvalidAMIID.NotFound', 'DescribeImages',
{'ImageId.1': fakes.ID_EC2_IMAGE_1})
def test_describe_image_attributes(self):
self._setup_model()
def do_check(attr, ec2_image_id, response):
resp = self.execute('DescribeImageAttribute',
{'ImageId': ec2_image_id,
'Attribute': attr})
response['imageId'] = ec2_image_id
self.assertThat(resp,
matchers.DictMatches(response,
orderless_lists=True),
verbose=True)
do_check('launchPermission',
fakes.ID_EC2_IMAGE_2,
{'launchPermission': [{'group': 'all'}]})
do_check('kernel',
fakes.ID_EC2_IMAGE_1,
{'kernel': {'value': fakes.ID_EC2_IMAGE_AKI_1}})
do_check('ramdisk',
fakes.ID_EC2_IMAGE_1,
{'ramdisk': {'value': fakes.ID_EC2_IMAGE_ARI_1}})
do_check('rootDeviceName',
fakes.ID_EC2_IMAGE_1,
{'rootDeviceName': fakes.ROOT_DEVICE_NAME_IMAGE_1})
do_check('rootDeviceName',
fakes.ID_EC2_IMAGE_2,
{'rootDeviceName': fakes.ROOT_DEVICE_NAME_IMAGE_2})
do_check('blockDeviceMapping',
fakes.ID_EC2_IMAGE_1,
{'blockDeviceMapping': (
fakes.EC2_IMAGE_1['blockDeviceMapping'])})
do_check('blockDeviceMapping',
fakes.ID_EC2_IMAGE_2,
{'blockDeviceMapping': (
fakes.EC2_IMAGE_2['blockDeviceMapping'])})
def test_describe_image_attributes_invalid_parameters(self):
image_id = fakes.random_ec2_id('ami')
self.set_mock_db_items({'id': image_id,
'os_id': None})
self.assert_execution_error('IncorrectState',
'DescribeImageAttribute',
{'ImageId': image_id,
'Attribute': 'kernel'})
def test_modify_image_attributes(self):
self._setup_model()
resp = self.execute('ModifyImageAttribute',
{'imageId': fakes.ID_EC2_IMAGE_1,
'attribute': 'launchPermission',
'operationType': 'add',
'userGroup.1': 'all'})
self.assertThat(resp, matchers.DictMatches({'return': True}))
self.glance.images.update.assert_called_once_with(
fakes.ID_OS_IMAGE_1, visibility='public')
def test_modify_image_attributes_invalid_parameters(self):
image_id = fakes.random_ec2_id('ami')
self.set_mock_db_items({'id': image_id,
'os_id': None})
self.assert_execution_error('IncorrectState',
'ModifyImageAttribute',
{'ImageId': image_id,
'Attribute': 'kernel'})
def _setup_model(self):
self.set_mock_db_items(fakes.DB_IMAGE_1, fakes.DB_IMAGE_2,
fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2,
fakes.DB_IMAGE_AKI_1, fakes.DB_IMAGE_ARI_1,
fakes.DB_VOLUME_1, fakes. DB_VOLUME_2)
self.db_api.get_public_items.return_value = []
# NOTE(ft): glance.image.list returns an iterator, not just a list
self.glance.images.list.side_effect = (
lambda: (fakes.OSImage(i)
for i in (fakes.OS_IMAGE_1, fakes.OS_IMAGE_2)))
self.glance.images.get.side_effect = (
lambda os_id: (fakes.OSImage(fakes.OS_IMAGE_1, from_get=True)
if os_id == fakes.ID_OS_IMAGE_1 else
fakes.OSImage(fakes.OS_IMAGE_2, from_get=True)
if os_id == fakes.ID_OS_IMAGE_2 else
None))
class ImagePrivateTestCase(base.BaseTestCase):
def test_format_image(self):
image_ids = {fakes.ID_OS_IMAGE_1: fakes.ID_EC2_IMAGE_1,
fakes.ID_OS_IMAGE_AKI_1: fakes.ID_EC2_IMAGE_AKI_1,
fakes.ID_OS_IMAGE_ARI_1: fakes.ID_EC2_IMAGE_ARI_1}
os_image = {'id': fakes.ID_OS_IMAGE_1,
'owner': fakes.ID_OS_PROJECT,
'created_at': fakes.TIME_CREATE_IMAGE,
'visibility': 'private',
'status': 'active',
'container_format': 'ami',
'name': 'fake_name'}
# check name and location attributes for an unnamed image
os_image['image_location'] = 'location'
os_image['name'] = None
image = image_api._format_image(
'fake_context', fakes.DB_IMAGE_1, fakes.OSImage(os_image),
None, image_ids)
self.assertEqual('location', image['imageLocation'])
self.assertEqual('location', image['name'])
# check name and location attributes for complete image
os_image['image_location'] = None
os_image['name'] = 'fake_name'
image = image_api._format_image(
'fake_context', fakes.DB_IMAGE_1, fakes.OSImage(os_image),
None, image_ids)
self.assertEqual('None (fake_name)', image['imageLocation'])
self.assertEqual('fake_name', image['name'])
# check ebs image type for bdm_v2 mapping type
os_image['bdm_v2'] = True
os_image['root_device_name'] = '/dev/vda'
os_image['block_device_mapping'] = [
{'boot_index': 0,
'snapshot_id': fakes.ID_OS_SNAPSHOT_2,
'source_type': 'snapshot',
'destination_type': 'volume'}]
image = image_api._format_image(
'fake_context', fakes.DB_IMAGE_1, fakes.OSImage(os_image),
None, image_ids,
snapshot_ids={fakes.ID_OS_SNAPSHOT_2: fakes.ID_EC2_SNAPSHOT_2})
self.assertEqual('ebs', image['rootDeviceType'])
# check instance-store image attributes with no any device mappings
os_image['bdm_v2'] = False
os_image['root_device_name'] = '/dev/vda'
os_image['block_device_mapping'] = []
image = image_api._format_image(
'fake_context', fakes.DB_IMAGE_1, fakes.OSImage(os_image),
None, None)
self.assertEqual('instance-store', image['rootDeviceType'])
self.assertNotIn('blockDeviceMapping', image)
# check Glance status translation
os_image = fakes.OSImage({'id': fakes.ID_OS_IMAGE_1})
def check_status_translation(status, expected):
os_image.status = status
image = image_api._format_image(
'fake_context', fakes.DB_IMAGE_1, os_image, None, None)
self.assertEqual(expected, image['imageState'],
"Wrong '%s' Glance status translation" % status)
check_status_translation('queued', 'pending')
check_status_translation('saving', 'pending')
check_status_translation('active', 'available')
check_status_translation('killed', 'deregistered')
check_status_translation('pending_delete', 'deregistered')
check_status_translation('deleted', 'deregistered')
check_status_translation('deactivated', 'invalid')
check_status_translation('unknown-status', 'error')
# check internal state translation
os_image.status = 'queued'
def check_state_translation(state, expected):
os_image.image_state = state
image = image_api._format_image(
'fake_context', fakes.DB_IMAGE_1, os_image, None, None)
self.assertEqual(expected, image['imageState'],
"Wrong '%s' internal state translation" % state)
for state in ('downloading', 'decrypting', 'untarring', 'uploading'):
check_state_translation(state, 'pending')
for state in ('failed_download', 'failed_decrypt', 'failed_untar',
'failed_upload'):
check_state_translation(state, 'failed')
os_image.status = 'active'
check_state_translation('available', 'available')
check_state_translation('unknown-state', 'available')
def test_format_mappings(self):
db_api = self.mock_db()
# check virtual mapping formatting
properties = {
'mappings': [
{'virtual': 'ami', 'device': '/dev/sda'},
{'virtual': 'root', 'device': 'sda'},
{'virtual': 'ephemeral0', 'device': 'sdb'},
{'virtual': 'swap', 'device': 'sdc'},
{'virtual': 'ephemeral1', 'device': 'sdd'},
{'virtual': 'ephemeral2', 'device': 'sde'},
{'virtual': 'ephemeral', 'device': 'sdf'},
{'virtual': '/dev/sdf1', 'device': 'root'}],
}
expected = [
{'virtualName': 'ephemeral0', 'deviceName': '/dev/sdb'},
{'virtualName': 'swap', 'deviceName': '/dev/sdc'},
{'virtualName': 'ephemeral1', 'deviceName': '/dev/sdd'},
{'virtualName': 'ephemeral2', 'deviceName': '/dev/sde'},
]
result = image_api._format_mappings('fake_context', properties)
self.assertEqual(expected, result)
# check bdm v2 formatting
db_api.set_mock_items(fakes.DB_IMAGE_2, fakes.DB_VOLUME_3)
properties = {
'bdm_v2': True,
'block_device_mapping': [
{'boot_index': 0,
'snapshot_id': fakes.ID_OS_SNAPSHOT_1,
'source_type': 'snapshot',
'destination_type': 'volume'},
{'boot_index': None,
'snapshot_id': fakes.ID_OS_SNAPSHOT_2,
'source_type': 'snapshot',
'destination_type': 'volume'},
{'device_name': 'vdi',
'boot_index': -1,
'image_id': fakes.ID_OS_IMAGE_2,
'source_type': 'image',
'destination_type': 'volume',
'volume_size': 20},
{'device_name': 'vdv',
'boot_index': -1,
'volume_id': fakes.ID_OS_VOLUME_3,
'source_type': 'volume',
'destination_type': 'volume'},
{'device_name': 'vdb',
'boot_index': -1,
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 100,
'delete_on_termination': True},
],
}
expected = [
{'deviceName': 'vdx',
'ebs': {'snapshotId': fakes.ID_EC2_SNAPSHOT_1,
'deleteOnTermination': False}},
{'ebs': {'snapshotId': fakes.ID_EC2_SNAPSHOT_2,
'deleteOnTermination': False}},
{'deviceName': 'vdi',
'ebs': {'snapshotId': fakes.ID_EC2_IMAGE_2,
'volumeSize': 20,
'deleteOnTermination': False}},
{'deviceName': 'vdv',
'ebs': {'snapshotId': fakes.ID_EC2_VOLUME_3,
'deleteOnTermination': False}},
{'deviceName': 'vdb',
'ebs': {'volumeSize': 100,
'deleteOnTermination': True}},
]
result = image_api._format_mappings(
'fake_context', properties, root_device_name='vdx',
snapshot_ids={fakes.ID_OS_SNAPSHOT_1: fakes.ID_EC2_SNAPSHOT_1,
fakes.ID_OS_SNAPSHOT_2: fakes.ID_EC2_SNAPSHOT_2})
self.assertEqual(expected, result)
# check inheritance and generation of virtual name
properties = {
'mappings': [
{'device': 'vdd', 'virtual': 'ephemeral1'},
],
'bdm_v2': True,
'block_device_mapping': [
{'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap'},
{'device_name': 'vdc',
'source_type': 'blank',
'destination_type': 'local',
'volume_size': 5},
{'device_name': 'vde',
'source_type': 'blank',
'destination_type': 'local'},
],
}
expected = [
{'deviceName': '/dev/vdd', 'virtualName': 'ephemeral1'},
{'deviceName': '/dev/vdb', 'virtualName': 'swap'},
{'deviceName': 'vdc', 'virtualName': 'ephemeral0'},
{'deviceName': 'vde', 'virtualName': 'ephemeral2'},
]
result = image_api._format_mappings('fake_context', properties)
self.assertEqual(expected, result)
def test_get_db_items(self):
describer = image_api.ImageDescriber()
describer.context = base.create_context()
# NOTE(ft): the first requested image appears is user owend and public,
# the second is absent
db_api = self.mock_db()
db_api.set_mock_items(fakes.DB_IMAGE_1)
describer.ids = set([fakes.ID_EC2_IMAGE_1, fakes.ID_EC2_IMAGE_2])
self.assertRaises(exception.InvalidAMIIDNotFound,
describer.get_db_items)
def test_describe_images_being_created(self):
db_api = self.mock_db()
glance = self.mock_glance()
context = base.create_context()
image_id = fakes.random_ec2_id('ami')
image = {'id': image_id,
'os_id': None,
'is_public': False,
'description': 'fake desc'}
db_api.set_mock_items(image)
db_api.get_public_items.return_value = []
# describe cases when no glance image exists
glance.images.list.return_value = []
expected = {'imagesSet': [{'imageId': image_id,
'description': 'fake desc',
'imageOwnerId': fakes.ID_OS_PROJECT,
'imageState': 'pending',
'imageType': 'machine',
'isPublic': False}]}
# describe all images
result = image_api.describe_images(context)
self.assertEqual(expected, result)
# describe the image
result = image_api.describe_images(context, image_id=[image_id])
self.assertEqual(expected, result)
# describe with filter
result = image_api.describe_images(
context, filter=[{'name': 'name', 'value': 'noname'}])
self.assertEqual({'imagesSet': []}, result)
# describe failed image
image['state'] = 'failed'
expected['imagesSet'][0]['imageState'] = 'failed'
result = image_api.describe_images(base.create_context())
self.assertEqual(expected, result)
# describe cases when glance image exists, db item is yet not updated
del image['state']
os_image_id = fakes.random_os_id()
os_image = {'id': os_image_id,
'owner': fakes.ID_OS_PROJECT,
'status': 'active',
'visibility': 'private',
'ec2_id': image_id}
glance.images.list.return_value = [fakes.OSImage(os_image)]
expected['imagesSet'] = [{
'architecture': None,
'creationDate': None,
'description': 'fake desc',
'imageId': image_id,
'imageLocation': 'None (None)',
'imageOwnerId': fakes.ID_OS_PROJECT,
'imageState': 'available',
'imageType': 'machine',
'isPublic': False,
'name': None,
'rootDeviceType': 'instance-store'}]
# describe all images
result = image_api.describe_images(context)
self.assertEqual(expected, result)
db_api.update_item.assert_called_once_with(
context, tools.update_dict(image, {'os_id': os_image_id}))
# describe the image
db_api.reset_mock()
result = image_api.describe_images(context, image_id=[image_id])
self.assertEqual(expected, result)
db_api.update_item.assert_called_once_with(
context, tools.update_dict(image, {'os_id': os_image_id}))
class S3TestCase(base.BaseTestCase):
def test_s3_parse_manifest(self):
db_api = self.mock_db()
glance = self.mock_glance()
db_api.set_mock_items(fakes.DB_IMAGE_AKI_1, fakes.DB_IMAGE_ARI_1)
glance.images.get.side_effect = (
tools.get_by_1st_arg_getter({
fakes.ID_OS_IMAGE_AKI_1: fakes.OSImage(fakes.OS_IMAGE_AKI_1),
fakes.ID_OS_IMAGE_ARI_1: fakes.OSImage(fakes.OS_IMAGE_ARI_1)}))
metadata, image_parts, key, iv = image_api._s3_parse_manifest(
base.create_context(), AMI_MANIFEST_XML)
expected_metadata = {
'disk_format': 'ami',
'container_format': 'ami',
'architecture': 'x86_64',
'kernel_id': fakes.ID_OS_IMAGE_AKI_1,
'ramdisk_id': fakes.ID_OS_IMAGE_ARI_1,
'mappings': [
{"device": "sda1", "virtual": "ami"},
{"device": "/dev/sda1", "virtual": "root"},
{"device": "sda2", "virtual": "ephemeral0"},
{"device": "sda3", "virtual": "swap"}]}
self.assertThat(metadata,
matchers.DictMatches(expected_metadata,
orderless_lists=True))
self.assertThat(image_parts,
matchers.ListMatches(['foo']))
self.assertEqual('foo', key)
self.assertEqual('foo', iv)
db_api.get_items_ids.assert_any_call(
mock.ANY, 'aki', item_ids=(fakes.ID_EC2_IMAGE_AKI_1,),
item_os_ids=None)
db_api.get_items_ids.assert_any_call(
mock.ANY, 'ari', item_ids=(fakes.ID_EC2_IMAGE_ARI_1,),
item_os_ids=None)
def test_s3_create_image_locations(self):
self.configure(image_decryption_dir=None)
glance = self.mock_glance()
_handle, tempf = tempfile.mkstemp()
fake_context = base.create_context()
@mock.patch('ec2api.api.image._s3_untarzip_image')
@mock.patch('ec2api.api.image._s3_decrypt_image')
@mock.patch('ec2api.api.image._s3_download_file')
@mock.patch('ec2api.api.image._s3_conn')
def do_test(s3_conn, s3_download_file, s3_decrypt_image,
s3_untarzip_image):
(s3_conn.return_value.
get_object.return_value) = {'Body': FILE_MANIFEST_XML}
s3_download_file.return_value = tempf
s3_untarzip_image.return_value = tempf
os_image_id = fakes.random_os_id()
(glance.images.create.return_value) = (
fakes.OSImage({'id': os_image_id,
'status': 'queued'}))
data = [
({'image_location': 'testbucket_1/test.img.manifest.xml'},
'testbucket_1', 'test.img.manifest.xml'),
({'image_location': '/testbucket_2/test.img.manifest.xml'},
'testbucket_2', 'test.img.manifest.xml')]
for mdata, bucket, manifest in data:
image = image_api._s3_create(fake_context, mdata)
eventlet.sleep()
self.glance.images.update.assert_called_with(
os_image_id, image_state='available')
self.glance.images.upload.assert_any_call(
os_image_id, mock.ANY)
s3_conn.return_value.get_object.assert_called_with(
Bucket=bucket, Key=manifest)
s3_download_file.assert_called_with(
mock.ANY, bucket, 'foo', mock.ANY)
s3_decrypt_image.assert_called_with(
fake_context, mock.ANY, 'foo', 'foo', mock.ANY)
s3_untarzip_image.assert_called_with(mock.ANY, mock.ANY)
do_test()
@mock.patch('ec2api.api.image.eventlet.spawn_n')
def test_s3_create_bdm(self, spawn_n):
glance = self.mock_glance()
metadata = {'image_location': 'fake_bucket/fake_manifest',
'root_device_name': '/dev/sda1',
'block_device_mapping': [
{'device_name': '/dev/sda1',
'snapshot_id': fakes.ID_OS_SNAPSHOT_1,
'delete_on_termination': True},
{'device_name': '/dev/sda2',
'virtual_name': 'ephemeral0'},
{'device_name': '/dev/sdb0',
'no_device': True}]}
fake_context = base.create_context()
with mock.patch('ec2api.api.image._s3_conn') as s3_conn:
(s3_conn.return_value.
get_object.return_value) = {'Body': FILE_MANIFEST_XML}
image_api._s3_create(fake_context, metadata)
glance.images.create.assert_called_once_with(
disk_format='ami', container_format='ami',
visibility='private', architecture='x86_64',
image_state='pending', root_device_name='/dev/sda1',
block_device_mapping=[{'device_name': '/dev/sda1',
'snapshot_id': fakes.ID_OS_SNAPSHOT_1,
'delete_on_termination': True},
{'device_name': '/dev/sda2',
'virtual_name': 'ephemeral0'},
{'device_name': '/dev/sdb0',
'no_device': True}],
image_location='fake_bucket/fake_manifest')
def test_s3_malicious_tarballs(self):
self.assertRaises(
exception.EC2InvalidException,
image_api._s3_test_for_malicious_tarball,
"/unused", os.path.join(os.path.dirname(__file__), 'abs.tar.gz'))
self.assertRaises(
exception.EC2InvalidException,
image_api._s3_test_for_malicious_tarball,
"/unused", os.path.join(os.path.dirname(__file__), 'rel.tar.gz'))
def test_decrypt_text(self):
public_key = os.path.join(os.path.dirname(__file__), 'test_cert.pem')
private_key = os.path.join(os.path.dirname(__file__),
'test_private_key.pem')
subject = "/C=RU/ST=Moscow/L=Moscow/O=Progmatic/CN=RootCA"
certificate_file = processutils.execute('openssl',
'req', '-x509', '-new',
'-key', private_key,
'-days', '365',
'-out', public_key,
'-subj', subject)
text = "some @#!%^* test text"
process_input = text.encode("ascii")
enc, _err = processutils.execute('openssl',
'rsautl',
'-certin',
'-encrypt',
'-inkey', public_key,
process_input=process_input,
binary=True)
self.assertRaises(exception.EC2Exception, image_api._decrypt_text, enc)
self.configure(x509_root_private_key=private_key)
dec = image_api._decrypt_text(enc)
self.assertIsInstance(dec, bytes)
dec = dec.decode('ascii')
self.assertEqual(text, dec)
| {
"content_hash": "53d801738f609d6dcb99e79af0029750",
"timestamp": "",
"source": "github",
"line_count": 971,
"max_line_length": 79,
"avg_line_length": 43.42739443872296,
"alnum_prop": 0.5139916524378676,
"repo_name": "openstack/ec2-api",
"id": "5885ca45746e9bb0fb12de9fd25a9e6b8faeab8d",
"size": "42757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ec2api/tests/unit/test_image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1350398"
},
{
"name": "Shell",
"bytes": "44627"
}
],
"symlink_target": ""
} |
import logging
from fabric import api
from oslo_config import cfg
from cloudferrylib.copy_engines import base
from cloudferrylib.utils import cmd_cfg
from cloudferrylib.utils import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class SSHFileToFile(base.BaseCopier):
def transfer(self, data):
if CONF.migrate.direct_compute_transfer:
return self.transfer_direct(data)
LOG.debug("| | copy file")
ssh_ip_src = self.src_cloud.cloud_config.cloud.ssh_host
ssh_ip_dst = self.dst_cloud.cloud_config.cloud.ssh_host
with utils.forward_agent(CONF.migrate.key_filename), \
utils.up_ssh_tunnel(data['host_dst'], ssh_ip_dst,
ssh_ip_src) as port:
if CONF.migrate.file_compression == "dd":
dd_dst = cmd_cfg.dd_cmd_of
ssh_cmd_dst = cmd_cfg.ssh_cmd_port
ssh_dst = ssh_cmd_dst(port, 'localhost', dd_dst)
dd_src = cmd_cfg.dd_cmd_if
ssh_cmd_src = cmd_cfg.ssh_cmd
ssh_src = ssh_cmd_src(data['host_src'], dd_src)
process = ssh_src >> ssh_dst
process = process('1M',
data['path_src'],
'1M',
data['path_dst'])
self.src_cloud.ssh_util.execute(process)
elif CONF.migrate.file_compression == "gzip":
dd = cmd_cfg.dd_cmd_of
gunzip_dd = cmd_cfg.gunzip_cmd >> dd
ssh_cmd_dst = cmd_cfg.ssh_cmd_port
ssh_dst = ssh_cmd_dst(port, 'localhost', gunzip_dd)
gzip_cmd = cmd_cfg.gzip_cmd
ssh_cmd_src = cmd_cfg.ssh_cmd
ssh_src = ssh_cmd_src(data['host_src'], gzip_cmd)
process = ssh_src >> ssh_dst
process = process(CONF.migrate.level_compression,
data['path_src'], '1M', data['path_dst'])
self.src_cloud.ssh_util.execute(process)
def transfer_direct(self, data):
ssh_attempts = CONF.migrate.ssh_connection_attempts
LOG.debug("| | copy file")
if CONF.src.ssh_user != 'root' or CONF.dst.ssh_user != 'root':
LOG.critical("This operation needs 'sudo' access rights, that is "
"currently not implemented in this driver. "
"Please use the default driver from "
"cloudferrylib/copy_engines/.")
with api.settings(host_string=data['host_src'],
connection_attempts=ssh_attempts), \
utils.forward_agent(CONF.migrate.key_filename):
if CONF.migrate.file_compression == "dd":
dd_dst = cmd_cfg.dd_cmd_of
ssh_cmd_dst = cmd_cfg.ssh_cmd
ssh_dst = ssh_cmd_dst(data['host_dst'], dd_dst)
dd_src = cmd_cfg.dd_cmd_if
process = dd_src >> ssh_dst
process = process('1M',
data['path_src'],
'1M',
data['path_dst'])
self.src_cloud.ssh_util.execute(process,
host_exec=data['host_src'])
elif CONF.migrate.file_compression == "gzip":
dd = cmd_cfg.dd_cmd_of
gunzip_dd = cmd_cfg.gunzip_cmd >> dd
ssh_cmd_dst = cmd_cfg.ssh_cmd
ssh_dst = ssh_cmd_dst(data['host_dst'], gunzip_dd)
gzip_cmd = cmd_cfg.gzip_cmd
process = gzip_cmd >> ssh_dst
process = process(CONF.migrate.level_compression,
data['path_src'], '1M', data['path_dst'])
self.src_cloud.ssh_util.execute(process,
host_exec=data['host_src'])
| {
"content_hash": "4eb46607e00e89e26711510fde10d51e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 78,
"avg_line_length": 40.673469387755105,
"alnum_prop": 0.49573507275464124,
"repo_name": "mgrygoriev/CloudFerry",
"id": "b4ca598e1b1bc149e2a44a188262acb5229e83f1",
"size": "4561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudferrylib/copy_engines/ssh_file_to_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "775433"
},
{
"name": "Ruby",
"bytes": "5181"
},
{
"name": "Shell",
"bytes": "34787"
}
],
"symlink_target": ""
} |
Subsets and Splits