Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
3,500 |
def first(self):
"""Return a (value, source) pair for the first object found for
this view. This amounts to the first element returned by
`resolve`. If no values are available, a NotFoundError is
raised.
"""
pairs = self.resolve()
try:
return iter_first(pairs)
except __HOLE__:
raise NotFoundError(u"{0} not found".format(self.name))
|
ValueError
|
dataset/ETHPy150Open beetbox/beets/beets/util/confit.py/ConfigView.first
|
3,501 |
def keys(self):
"""Returns a list containing all the keys available as subviews
of the current views. This enumerates all the keys in *all*
dictionaries matching the current view, in contrast to
``view.get(dict).keys()``, which gets all the keys for the
*first* dict matching the view. If the object for this view in
any source is not a dict, then a ConfigTypeError is raised. The
keys are ordered according to how they appear in each source.
"""
keys = []
for dic, _ in self.resolve():
try:
cur_keys = dic.keys()
except __HOLE__:
raise ConfigTypeError(
u'{0} must be a dict, not {1}'.format(
self.name, type(dic).__name__
)
)
for key in cur_keys:
if key not in keys:
keys.append(key)
return keys
|
AttributeError
|
dataset/ETHPy150Open beetbox/beets/beets/util/confit.py/ConfigView.keys
|
3,502 |
def all_contents(self):
"""Iterates over all subviews from collections at this view from
*all* sources. If the object for this view in any source is not
iterable, then a ConfigTypeError is raised. This method is
intended to be used when the view indicates a list; this method
will concatenate the contents of the list from all sources.
"""
for collection, _ in self.resolve():
try:
it = iter(collection)
except __HOLE__:
raise ConfigTypeError(
u'{0} must be an iterable, not {1}'.format(
self.name, type(collection).__name__
)
)
for value in it:
yield value
# Validation and conversion.
|
TypeError
|
dataset/ETHPy150Open beetbox/beets/beets/util/confit.py/ConfigView.all_contents
|
3,503 |
def resolve(self):
for collection, source in self.parent.resolve():
try:
value = collection[self.key]
except IndexError:
# List index out of bounds.
continue
except KeyError:
# Dict key does not exist.
continue
except __HOLE__:
# Not subscriptable.
raise ConfigTypeError(
u"{0} must be a collection, not {1}".format(
self.parent.name, type(collection).__name__
)
)
yield value, source
|
TypeError
|
dataset/ETHPy150Open beetbox/beets/beets/util/confit.py/Subview.resolve
|
3,504 |
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(
None, None,
u'expected a mapping node, but found %s' % node.id,
node.start_mark
)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except __HOLE__ as exc:
raise yaml.constructor.ConstructorError(
u'while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc,
key_node.start_mark
)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
# Allow bare strings to begin with %. Directives are still detected.
|
TypeError
|
dataset/ETHPy150Open beetbox/beets/beets/util/confit.py/Loader.construct_mapping
|
3,505 |
def load_yaml(filename):
"""Read a YAML document from a file. If the file cannot be read or
parsed, a ConfigReadError is raised.
"""
try:
with open(filename, 'r') as f:
return yaml.load(f, Loader=Loader)
except (__HOLE__, yaml.error.YAMLError) as exc:
raise ConfigReadError(filename, exc)
# YAML dumping.
|
IOError
|
dataset/ETHPy150Open beetbox/beets/beets/util/confit.py/load_yaml
|
3,506 |
def convert(self, value, view):
"""Ensure that the value follows at least one template.
"""
is_mapping = isinstance(self.template, MappingTemplate)
for candidate in self.allowed:
try:
if is_mapping:
if isinstance(candidate, Filename) and \
candidate.relative_to:
next_template = candidate.template_with_relatives(
view,
self.template
)
next_template.subtemplates[view.key] = as_template(
candidate
)
else:
next_template = MappingTemplate({view.key: candidate})
return view.parent.get(next_template)[view.key]
else:
return view.get(candidate)
except ConfigTemplateError:
raise
except ConfigError:
pass
except __HOLE__ as exc:
raise ConfigTemplateError(exc)
self.fail(
u'must be one of {0}, not {1}'.format(
repr(self.allowed), repr(value)
),
view
)
|
ValueError
|
dataset/ETHPy150Open beetbox/beets/beets/util/confit.py/OneOf.convert
|
3,507 |
def convert(self, value, view):
if isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
if isinstance(value, STRING):
if self.split:
return value.split()
else:
return [value]
try:
value = list(value)
except __HOLE__:
self.fail(u'must be a whitespace-separated string or a list',
view, True)
def convert(x):
if isinstance(x, STRING):
return x
elif isinstance(x, bytes):
return x.decode('utf8', 'ignore')
else:
self.fail(u'must be a list of strings', view, True)
return list(map(convert, value))
|
TypeError
|
dataset/ETHPy150Open beetbox/beets/beets/util/confit.py/StrSeq.convert
|
3,508 |
def resolve_relative_to(self, view, template):
if not isinstance(template, (collections.Mapping, MappingTemplate)):
# disallow config.get(Filename(relative_to='foo'))
raise ConfigTemplateError(
u'relative_to may only be used when getting multiple values.'
)
elif self.relative_to == view.key:
raise ConfigTemplateError(
u'{0} is relative to itself'.format(view.name)
)
elif self.relative_to not in view.parent.keys():
# self.relative_to is not in the config
self.fail(
(
u'needs sibling value "{0}" to expand relative path'
).format(self.relative_to),
view
)
old_template = {}
old_template.update(template.subtemplates)
# save time by skipping MappingTemplate's init loop
next_template = MappingTemplate({})
next_relative = self.relative_to
# gather all the needed templates and nothing else
while next_relative is not None:
try:
# pop to avoid infinite loop because of recursive
# relative paths
rel_to_template = old_template.pop(next_relative)
except __HOLE__:
if next_relative in template.subtemplates:
# we encountered this config key previously
raise ConfigTemplateError((
u'{0} and {1} are recursively relative'
).format(view.name, self.relative_to))
else:
raise ConfigTemplateError((
u'missing template for {0}, needed to expand {1}\'s' +
u'relative path'
).format(self.relative_to, view.name))
next_template.subtemplates[next_relative] = rel_to_template
next_relative = rel_to_template.relative_to
return view.parent.get(next_template)[self.relative_to]
|
KeyError
|
dataset/ETHPy150Open beetbox/beets/beets/util/confit.py/Filename.resolve_relative_to
|
3,509 |
def _listdir(self, path):
try:
return os.listdir(path)
except __HOLE__ as e:
self.logger.error(_('ERROR: Failed to get paths to drive '
'partitions: %s') % e)
return []
|
OSError
|
dataset/ETHPy150Open openstack/swift/swift/container/updater.py/ContainerUpdater._listdir
|
3,510 |
@classmethod
def setUpClass(cls):
'''
Prepearing for all tests.
'''
dbfile_handle, cls.dbfilename = tempfile.mkstemp(
suffix='.db', prefix='test')
os.close(dbfile_handle)
# The annotation files are large so they are not inclided in repo,
# download it manualy with
# gffutils/test/data/download-large-annotation-files.sh
try:
gffutils.create_db(
cls.gff_file, cls.dbfilename, **cls.create_db_kwargs)
except __HOLE__:
raise EnvironmentError(
"Annotation files not found. Download them manualy by "
"running "
"gffutils/test/data/download-large-annotation-files.sh")
cls.db = gffutils.FeatureDB(cls.dbfilename)
# Chromosome sizes for testing fetching features from regions
cls.chromosome_sizes = {}
with open(cls.chromsizes_file) as chromosome_sizes:
for chromosome_size in chromosome_sizes:
chromosome, size = chromosome_size.split()
size = int(size)
cls.chromosome_sizes[chromosome] = size
random.seed(1842346386)
print('Preparation finished')
|
ValueError
|
dataset/ETHPy150Open daler/gffutils/gffutils/test/performance_test.py/PerformanceTestFeatureDB.setUpClass
|
3,511 |
def make_sure_path_exists(path):
"""
Ensures that a directory exists.
:param path: A directory path.
"""
logging.debug('Making sure path exists: {0}'.format(path))
try:
os.makedirs(path)
except __HOLE__ as exception:
if exception.errno != errno.EEXIST:
return False
return True
|
OSError
|
dataset/ETHPy150Open audreyr/cookiecutter/cookiecutter/utils.py/make_sure_path_exists
|
3,512 |
def __init__(self):
"""
Try and initialize with Django settings.
"""
self.option_list = parser.option_groups[0].option_list
# Some constants about the software.
self["GNOTTY_VERSION"] = __version__
self["GNOTTY_VERSION_STRING"] = __version_string__
self["GNOTTY_PROJECT_URL"] = __url__
try:
from django.conf import settings
for k, v in parser.defaults.items():
self[k] = getattr(settings, "GNOTTY_%s" % k, v)
self.set_max_message_length()
except __HOLE__:
pass
|
ImportError
|
dataset/ETHPy150Open stephenmcd/gnotty/gnotty/conf.py/Settings.__init__
|
3,513 |
@classmethod
def _process_points(cls, points):
"""
Format `points` parameter.
Input:
a value or (timestamp, value) pair or a list of value or (timestamp, value) pairs
Returns:
list of (timestamp, float value) pairs
"""
now = time.time()
points_lst = points if isinstance(points, list) else [points]
def rec_parse(points_lst):
"""
Recursively parse a list of values or a list of (timestamp, value) pairs to a list of
(timestamp, `float` value) pairs.
"""
try:
if not points_lst:
return []
point = points_lst.pop()
timestamp = now if isinstance(point, Number) else point[0]
value = float(point) if isinstance(point, Number) else float(point[1])
point = [(timestamp, value)]
return point + rec_parse(points_lst)
except __HOLE__ as e:
raise TypeError(
u"{0}: "
"`points` parameter must use real numerical values.".format(e)
)
except IndexError as e:
raise IndexError(
u"{0}: "
u"`points` must be a list of values or "
u"a list of (timestamp, value) pairs".format(e)
)
return rec_parse(points_lst)
|
TypeError
|
dataset/ETHPy150Open DataDog/datadogpy/datadog/api/metrics.py/Metric._process_points
|
3,514 |
@classmethod
def send(cls, metrics=None, **single_metric):
"""
Submit a metric or a list of metrics to the metric API
:param metric: the name of the time series
:type metric: string
:param points: a (timestamp, value) pair or list of (timestamp, value) pairs
:type points: list
:param host: host name that produced the metric
:type host: string
:param tags: list of tags associated with the metric.
:type tags: string list
:param type: type of the metric
:type type: 'gauge' or 'counter' string
:returns: JSON response from HTTP request
"""
def rename_metric_type(metric):
"""
FIXME DROPME in 1.0:
API documentation was illegitimately promoting usage of `metric_type` parameter
instead of `type`.
To be consistent and avoid 'backward incompatibilities', properly rename this parameter.
"""
if 'metric_type' in metric:
metric['type'] = metric.pop('metric_type')
# Set the right endpoint
cls._class_url = cls._METRIC_SUBMIT_ENDPOINT
# Format the payload
try:
if metrics:
for metric in metrics:
if isinstance(metric, dict):
rename_metric_type(metric)
metric['points'] = cls._process_points(metric['points'])
metrics_dict = {"series": metrics}
else:
rename_metric_type(single_metric)
single_metric['points'] = cls._process_points(single_metric['points'])
metrics = [single_metric]
metrics_dict = {"series": metrics}
except __HOLE__:
raise KeyError("'points' parameter is required")
return super(Metric, cls).send(attach_host_name=True, **metrics_dict)
|
KeyError
|
dataset/ETHPy150Open DataDog/datadogpy/datadog/api/metrics.py/Metric.send
|
3,515 |
@classmethod
def query(cls, **params):
"""
Query metrics from Datadog
:param start: query start timestamp
:type start: POSIX timestamp
:param end: query end timestamp
:type end: POSIX timestamp
:param query: metric query
:type query: string query
:return: JSON response from HTTP request
*start* and *end* should be less than 24 hours apart.
It is *not* meant to retrieve metric data in bulk.
>>> api.Metric.query(start=int(time.time()) - 3600, end=int(time.time()),
query='avg:system.cpu.idle{*}')
"""
# Set the right endpoint
cls._class_url = cls._METRIC_QUERY_ENDPOINT
# `from` is a reserved keyword in Python, therefore
# `api.Metric.query(from=...)` is not permited
# -> map `start` to `from` and `end` to `to`
try:
params['from'] = params.pop('start')
params['to'] = params.pop('end')
except __HOLE__ as e:
raise ApiError("The parameter '{0}' is required".format(e.args[0]))
return super(Metric, cls)._search(**params)
|
KeyError
|
dataset/ETHPy150Open DataDog/datadogpy/datadog/api/metrics.py/Metric.query
|
3,516 |
@classmethod
def configure(cls, settings):
kwargs = super(RedisCache, cls).configure(settings)
try:
from redis import StrictRedis
except __HOLE__: # pragma: no cover
raise ImportError("You must 'pip install redis' before using "
"redis as the database")
db_url = settings.get('db.url')
kwargs['db'] = StrictRedis.from_url(db_url)
return kwargs
|
ImportError
|
dataset/ETHPy150Open mathcamp/pypicloud/pypicloud/cache/redis_cache.py/RedisCache.configure
|
3,517 |
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int, long, datetime.datetime, datetime.date, datetime.time, float)):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except __HOLE__, e:
raise DjangoUnicodeDecodeError(s, *e.args)
return s
|
UnicodeDecodeError
|
dataset/ETHPy150Open klipstein/dojango/dojango/util/__init__.py/force_unicode
|
3,518 |
def is_number(s):
"""Is this the right way for checking a number.
Maybe there is a better pythonic way :-)"""
try:
int(s)
return True
except TypeError:
pass
except __HOLE__:
pass
return False
|
ValueError
|
dataset/ETHPy150Open klipstein/dojango/dojango/util/__init__.py/is_number
|
3,519 |
@tests.test
def inc_dec():
"""Tests inc() and dec() methods."""
try:
mc = os.environ['VOLATILETESTS_MEMCACHED']
except __HOLE__:
print 'VOLATILETESTS_MEMCACHED is not set; skipped'
return
mc = MemcachedCache([mc], key_prefix='volatiletests_%s' % time.time())
def tpl(method, r1, r2):
unit, store = make_unit(mc)
op = getattr(unit, method)
key = method + 'num'
assert unit.get(key) is None
assert store.get(unit.subkey(key)) is None
for ref in unit.subkeys_transaction:
assert ref() is None or key not in ref()
unit.set(key, 5)
op(key)
assert unit.get(key) == r1
assert store.get(unit.subkey(key)) == r1
for ref in unit.subkeys_transaction:
assert key in ref()
op(key, 2)
assert unit.get(key) == r2
assert store.get(unit.subkey(key)) == r2
for ref in unit.subkeys_transaction:
assert key in ref()
tpl('inc', 6, 8)
tpl('dec', 4, 2)
|
KeyError
|
dataset/ETHPy150Open StyleShare/flask-volatile/volatiletests/unit.py/inc_dec
|
3,520 |
def encode_blob_data(data):
try:
return 'utf-8', data.decode('utf-8')
except __HOLE__:
return 'base64', b64encode(data)
|
UnicodeDecodeError
|
dataset/ETHPy150Open hulu/restfulgit/restfulgit/plumbing/converters.py/encode_blob_data
|
3,521 |
@data_factory(label="Pandas Table", identifier=has_extension('csv csv txt tsv tbl dat'))
def pandas_read_table(path, **kwargs):
""" A factory for reading tabular data using pandas
:param path: path/to/file
:param kwargs: All kwargs are passed to pandas.read_csv
:returns: :class:`glue.core.data.Data` object
"""
import pandas as pd
try:
from pandas.parser import CParserError
except __HOLE__: # pragma: no cover
from pandas._parser import CParserError
# iterate over common delimiters to search for best option
delimiters = kwargs.pop('delimiter', [None] + list(',|\t '))
fallback = None
for d in delimiters:
try:
indf = pd.read_csv(path, delimiter=d, **kwargs)
# ignore files parsed to empty dataframes
if len(indf) == 0:
continue
# only use files parsed to single-column dataframes
# if we don't find a better strategy
if len(indf.columns) < 2:
fallback = indf
continue
return panda_process(indf)
except CParserError:
continue
if fallback is not None:
return panda_process(fallback)
raise IOError("Could not parse %s using pandas" % path)
|
ImportError
|
dataset/ETHPy150Open glue-viz/glue/glue/core/data_factories/pandas.py/pandas_read_table
|
3,522 |
def _parse_driver_info(node):
"""Gets the information needed for accessing the node.
:param node: the Node object.
:returns: dictionary of information.
:raises: InvalidParameterValue if any required parameters are incorrect.
:raises: MissingParameterValue if any required parameters are missing.
"""
info = node.driver_info
d_info = {}
error_msgs = []
d_info['username'] = info.get('fuel_username', 'root')
d_info['key_filename'] = info.get('fuel_key_filename',
'/etc/ironic/fuel_key')
if not os.path.isfile(d_info['key_filename']):
error_msgs.append(_("SSH key file %s not found.") %
d_info['key_filename'])
try:
d_info['port'] = int(info.get('fuel_ssh_port', 22))
except __HOLE__:
error_msgs.append(_("'fuel_ssh_port' must be an integer."))
if error_msgs:
msg = (_('The following errors were encountered while parsing '
'driver_info:\n%s') % '\n'.join(error_msgs))
raise exception.InvalidParameterValue(msg)
d_info['script'] = info.get('fuel_deploy_script', 'bareon-provision')
return d_info
|
ValueError
|
dataset/ETHPy150Open openstack/bareon/contrib/ironic/ironic-fa-deploy/ironic_fa_deploy/modules/fuel_agent.py/_parse_driver_info
|
3,523 |
def _get_deploy_data(context, image_source):
glance = image_service.GlanceImageService(version=2, context=context)
image_props = glance.show(image_source).get('properties', {})
LOG.debug('Image %s properties are: %s', image_source, image_props)
try:
disk_data = json.loads(image_props['mos_disk_info'])
except KeyError:
raise exception.MissingParameterValue(_('Image %s does not contain '
'disk layout data.') %
image_source)
except __HOLE__:
raise exception.InvalidParameterValue(_('Invalid disk layout data for '
'image %s') % image_source)
data = FUEL_AGENT_PROVISION_TEMPLATE.copy()
data['ks_meta']['pm_data']['ks_spaces'] = disk_data
return data
|
ValueError
|
dataset/ETHPy150Open openstack/bareon/contrib/ironic/ironic-fa-deploy/ironic_fa_deploy/modules/fuel_agent.py/_get_deploy_data
|
3,524 |
def compare(reference, new, ignored_keys=None):
"""Compare two dictionaries and return annotated difference.
Positional arguments:
reference -- the reference dictionary
new -- the new dictionary
Keyword arguments:
ignored_keys -- keys to ignore in reference and new
"""
missing_from_new = {}
different = {}
modified = {}
ret = {}
if ignored_keys is None:
ignored_keys = set()
for key1, value1 in reference.viewitems():
if key1 in ignored_keys:
try:
del new[key1]
except __HOLE__:
pass
continue
else:
try:
value2 = new[key1]
except KeyError:
missing_from_new[key1] = value1
else:
try:
rec_comp = compare(value1, value2,
ignored_keys=ignored_keys)
if rec_comp:
modified[key1] = rec_comp
except AttributeError:
if value1 != value2:
different[key1] = {'reference': value1, 'new': value2}
del new[key1]
missing_from_reference = new
for k, v in {'different': different,
'missing_from_reference': missing_from_reference,
'missing_from_new': missing_from_new,
'modified': modified}.viewitems():
if v:
ret[k] = v
return ret
|
KeyError
|
dataset/ETHPy150Open rcbops/rpc-openstack/maas/testing/compare-definitions.py/compare
|
3,525 |
def sync(self, master_issue_id, slave_issue_id):
_master_issue_id = master_issue_id
_slave_issue_id = slave_issue_id
_result = None
try:
if not master_issue_id and slave_issue_id:
_master_issue_id = self.issue_binder.slaveIssueIdToMasterIssueId(slave_issue_id)
_result = _master_issue_id
elif not slave_issue_id and master_issue_id:
_slave_issue_id = self.issue_binder.masterIssueIdToSlaveIssueId(master_issue_id)
_result = _slave_issue_id
self._sync(_master_issue_id, _slave_issue_id, self.last_run, self.current_run)
except __HOLE__, error:
print error
return _result
|
KeyError
|
dataset/ETHPy150Open JetBrains/youtrack-rest-python-library/python/sync/issues.py/AsymmetricIssueMerger.sync
|
3,526 |
def __init__(self, _factory=message.Message, **_3to2kwargs):
if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
else: policy = compat32
"""_factory is called with no arguments to create a new message obj
The policy keyword specifies a policy object that controls a number of
aspects of the parser's operation. The default policy maintains
backward compatibility.
"""
self._factory = _factory
self.policy = policy
try:
_factory(policy=self.policy)
self._factory_kwds = lambda: {'policy': self.policy}
except __HOLE__:
# Assume this is an old-style factory
self._factory_kwds = lambda: {}
self._input = BufferedSubFile()
self._msgstack = []
if PY3:
self._parse = self._parsegen().__next__
else:
self._parse = self._parsegen().next
self._cur = None
self._last = None
self._headersonly = False
# Non-public interface for supporting Parser's headersonly flag
|
TypeError
|
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/email/feedparser.py/FeedParser.__init__
|
3,527 |
def _call_parse(self):
try:
self._parse()
except __HOLE__:
pass
|
StopIteration
|
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/email/feedparser.py/FeedParser._call_parse
|
3,528 |
def none_missing(df, columns=None):
"""
Asserts that there are no missing values (NaNs) in the DataFrame.
Parameters
----------
df : DataFrame
columns : list
list of columns to restrict the check to
Returns
-------
df : DataFrame
same as the original
"""
if columns is None:
columns = df.columns
try:
assert not df[columns].isnull().any().any()
except __HOLE__ as e:
missing = df[columns].isnull()
msg = generic.bad_locations(missing)
e.args = msg
raise
return df
|
AssertionError
|
dataset/ETHPy150Open TomAugspurger/engarde/engarde/checks.py/none_missing
|
3,529 |
def is_shape(df, shape):
"""
Asserts that the DataFrame is of a known shape.
Parameters
==========
df : DataFrame
shape : tuple
(n_rows, n_columns). Use None or -1 if you don't care
about a dimension.
Returns
=======
df : DataFrame
"""
try:
check = np.all(np.equal(df.shape, shape) | (np.equal(shape, [-1, -1]) |
np.equal(shape, [None, None])))
assert check
except __HOLE__ as e:
msg = ("Expected shape: {}\n"
"\t\tActual shape: {}".format(shape, df.shape))
e.args = (msg,)
raise
return df
|
AssertionError
|
dataset/ETHPy150Open TomAugspurger/engarde/engarde/checks.py/is_shape
|
3,530 |
def unique_index(df):
"""
Assert that the index is unique
Parameters
==========
df : DataFrame
Returns
=======
df : DataFrame
"""
try:
assert df.index.is_unique
except __HOLE__ as e:
e.args = df.index.get_duplicates()
raise
return df
|
AssertionError
|
dataset/ETHPy150Open TomAugspurger/engarde/engarde/checks.py/unique_index
|
3,531 |
def get_feed(self, url=None):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
if url:
bits = url.split('/')
else:
bits = []
try:
obj = self.get_object(bits)
except __HOLE__:
raise FeedDoesNotExist
return super(Feed, self).get_feed(obj, self.request)
|
ObjectDoesNotExist
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/contrib/syndication/feeds.py/Feed.get_feed
|
3,532 |
def keys_present(name, number, save_dir, region=None, key=None, keyid=None, profile=None):
'''
.. versionadded:: 2015.8.0
Ensure the IAM access keys are present.
name (string)
The name of the new user.
number (int)
Number of keys that user should have.
save_dir (string)
The directory that the key/keys will be saved. Keys are saved to a file named according
to the username privided.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if not __salt__['boto_iam.get_user'](name, region, key, keyid, profile):
ret['result'] = False
ret['comment'] = 'IAM User {0} does not exist.'.format(name)
return ret
if not isinstance(number, int):
ret['comment'] = 'The number of keys must be an integer.'
ret['result'] = False
return ret
if not os.path.isdir(save_dir):
ret['comment'] = 'The directory {0} does not exist.'.format(save_dir)
ret['result'] = False
return ret
keys = __salt__['boto_iam.get_all_access_keys'](user_name=name, region=region, key=key,
keyid=keyid, profile=profile)
if isinstance(keys, str):
log.debug('keys are : false {0}'.format(keys))
error, message = _get_error(keys)
ret['comment'] = 'Could not get keys.\n{0}\n{1}'.format(error, message)
ret['result'] = False
return ret
keys = keys['list_access_keys_response']['list_access_keys_result']['access_key_metadata']
log.debug('Keys are : {0}.'.format(keys))
if len(keys) >= number:
ret['comment'] = 'The number of keys exist for user {0}'.format(name)
ret['result'] = True
return ret
if __opts__['test']:
ret['comment'] = 'Access key is set to be created for {0}.'.format(name)
ret['result'] = None
return ret
new_keys = {}
for i in range(number-len(keys)):
created = __salt__['boto_iam.create_access_key'](name, region, key, keyid, profile)
if isinstance(created, str):
error, message = _get_error(created)
ret['comment'] = 'Could not create keys.\n{0}\n{1}'.format(error, message)
ret['result'] = False
return ret
log.debug('Created is : {0}'.format(created))
response = 'create_access_key_response'
result = 'create_access_key_result'
new_keys['key-{0}'.format(i)] = created[response][result]['access_key']['access_key_id']
new_keys['key_id-{0}'.format(i)] = created[response][result]['access_key']['secret_access_key']
try:
with salt.utils.fopen('{0}/{1}'.format(save_dir, name), 'a') as _wrf:
for key_id, access_key in new_keys.items():
_wrf.write('{0}\n{1}\n'.format(key_id, access_key))
ret['comment'] = 'Keys have been written to file {0}/{1}.'.format(save_dir, name)
ret['result'] = True
ret['changes'] = new_keys
return ret
except __HOLE__:
ret['comment'] = 'Could not write to file {0}/{1}.'.format(save_dir, name)
ret['result'] = False
return ret
|
IOError
|
dataset/ETHPy150Open saltstack/salt/salt/states/boto_iam.py/keys_present
|
3,533 |
def server_cert_present(name, public_key, private_key, cert_chain=None, path=None,
region=None, key=None, keyid=None, profile=None):
'''
Crete server certificate.
.. versionadded:: 2015.8.0
name (string)
The name for the server certificate. Do not include the path in this value.
public_key (string)
The contents of the public key certificate in PEM-encoded format.
private_key (string)
The contents of the private key in PEM-encoded format.
cert_chain (string)
The contents of the certificate chain. This is typically a
concatenation of the PEM-encoded public key certificates of the chain.
path (string)
The path for the server certificate.
region (string)
The name of the region to connect to.
key (string)
The key to be used in order to connect
keyid (string)
The keyid to be used in order to connect
profile (string)
The profile that contains a dict of region, key, keyid
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
exists = __salt__['boto_iam.get_server_certificate'](name, region, key, keyid, profile)
log.debug('Variables are : {0}.'.format(locals()))
if exists:
ret['comment'] = 'Certificate {0} exists.'.format(name)
return ret
if 'salt://' in public_key:
try:
public_key = __salt__['cp.get_file_str'](public_key)
except __HOLE__ as e:
log.debug(e)
ret['comment'] = 'File {0} not found.'.format(public_key)
ret['result'] = False
return ret
if 'salt://' in private_key:
try:
private_key = __salt__['cp.get_file_str'](private_key)
except IOError as e:
log.debug(e)
ret['comment'] = 'File {0} not found.'.format(private_key)
ret['result'] = False
return ret
if cert_chain is not None and 'salt://' in cert_chain:
try:
cert_chain = __salt__['cp.get_file_str'](cert_chain)
except IOError as e:
log.debug(e)
ret['comment'] = 'File {0} not found.'.format(cert_chain)
ret['result'] = False
return ret
if __opts__['test']:
ret['comment'] = 'Server certificate {0} is set to be created.'.format(name)
ret['result'] = None
return ret
created = __salt__['boto_iam.upload_server_cert'](name, public_key, private_key, cert_chain,
path, region, key, keyid, profile)
if created is not False:
ret['comment'] = 'Certificate {0} was created.'.format(name)
ret['changes'] = created
return ret
ret['result'] = False
ret['comment'] = 'Certificate {0} failed to be created.'.format(name)
return ret
|
IOError
|
dataset/ETHPy150Open saltstack/salt/salt/states/boto_iam.py/server_cert_present
|
3,534 |
def __enter__(self):
try:
self.thread_local.count += 1
except __HOLE__:
self.thread_local.count = 1
|
AttributeError
|
dataset/ETHPy150Open enthought/comtypes/comtypes/safearray.py/_SafeArrayAsNdArrayContextManager.__enter__
|
3,535 |
def _midlSAFEARRAY(itemtype):
"""This function mimics the 'SAFEARRAY(aType)' IDL idiom. It
returns a subtype of SAFEARRAY, instances will be built with a
typecode VT_... corresponding to the aType, which must be one of
the supported ctypes.
"""
try:
return POINTER(_safearray_type_cache[itemtype])
except __HOLE__:
sa_type = _make_safearray_type(itemtype)
_safearray_type_cache[itemtype] = sa_type
return POINTER(sa_type)
|
KeyError
|
dataset/ETHPy150Open enthought/comtypes/comtypes/safearray.py/_midlSAFEARRAY
|
3,536 |
def _make_safearray_type(itemtype):
# Create and return a subclass of tagSAFEARRAY
from comtypes.automation import _ctype_to_vartype, VT_RECORD, \
VT_UNKNOWN, IDispatch, VT_DISPATCH
meta = type(_safearray.tagSAFEARRAY)
sa_type = meta.__new__(meta,
"SAFEARRAY_%s" % itemtype.__name__,
(_safearray.tagSAFEARRAY,), {})
try:
vartype = _ctype_to_vartype[itemtype]
extra = None
except __HOLE__:
if issubclass(itemtype, Structure):
try:
guids = itemtype._recordinfo_
except AttributeError:
extra = None
else:
from comtypes.typeinfo import GetRecordInfoFromGuids
extra = GetRecordInfoFromGuids(*guids)
vartype = VT_RECORD
elif issubclass(itemtype, POINTER(IDispatch)):
vartype = VT_DISPATCH
extra = pointer(itemtype._iid_)
elif issubclass(itemtype, POINTER(IUnknown)):
vartype = VT_UNKNOWN
extra = pointer(itemtype._iid_)
else:
raise TypeError(itemtype)
@Patch(POINTER(sa_type))
class _(object):
# Should explain the ideas how SAFEARRAY is used in comtypes
_itemtype_ = itemtype # a ctypes type
_vartype_ = vartype # a VARTYPE value: VT_...
_needsfree = False
@classmethod
def create(cls, value, extra=None):
"""Create a POINTER(SAFEARRAY_...) instance of the correct
type; value is an object containing the items to store.
Python lists, tuples, and array.array instances containing
compatible item types can be passed to create
one-dimensional arrays. To create multidimensional arrys,
numpy arrays must be passed.
"""
if npsupport.isndarray(value):
return cls.create_from_ndarray(value, extra)
# For VT_UNKNOWN or VT_DISPATCH, extra must be a pointer to
# the GUID of the interface.
#
# For VT_RECORD, extra must be a pointer to an IRecordInfo
# describing the record.
# XXX How to specify the lbound (3. parameter to CreateVectorEx)?
# XXX How to write tests for lbound != 0?
pa = _safearray.SafeArrayCreateVectorEx(cls._vartype_,
0,
len(value),
extra)
if not pa:
if cls._vartype_ == VT_RECORD and extra is None:
raise TypeError("Cannot create SAFEARRAY type VT_RECORD without IRecordInfo.")
# Hm, there may be other reasons why the creation fails...
raise MemoryError()
# We now have a POINTER(tagSAFEARRAY) instance which we must cast
# to the correct type:
pa = cast(pa, cls)
# Now, fill the data in:
ptr = POINTER(cls._itemtype_)() # container for the values
_safearray.SafeArrayAccessData(pa, byref(ptr))
try:
if isinstance(value, array.array):
addr, n = value.buffer_info()
nbytes = len(value) * sizeof(cls._itemtype_)
memmove(ptr, addr, nbytes)
else:
for index, item in enumerate(value):
ptr[index] = item
finally:
_safearray.SafeArrayUnaccessData(pa)
return pa
@classmethod
def create_from_ndarray(cls, value, extra, lBound=0):
from comtypes.automation import VARIANT
# If processing VARIANT, makes sure the array type is correct.
if cls._itemtype_ is VARIANT:
if value.dtype != npsupport.VARIANT_dtype:
value = _ndarray_to_variant_array(value)
else:
ai = value.__array_interface__
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
if cls._itemtype_ != numpy.ctypeslib._typecodes[ai["typestr"]]:
raise TypeError("Wrong array item type")
# SAFEARRAYs have Fortran order; convert the numpy array if needed
if not value.flags.f_contiguous:
value = numpy.array(value, order="F")
# For VT_UNKNOWN or VT_DISPATCH, extra must be a pointer to
# the GUID of the interface.
#
# For VT_RECORD, extra must be a pointer to an IRecordInfo
# describing the record.
rgsa = (_safearray.SAFEARRAYBOUND * value.ndim)()
nitems = 1
for i, d in enumerate(value.shape):
nitems *= d
rgsa[i].cElements = d
rgsa[i].lBound = lBound
pa = _safearray.SafeArrayCreateEx(cls._vartype_,
value.ndim, # cDims
rgsa, # rgsaBound
extra) # pvExtra
if not pa:
if cls._vartype_ == VT_RECORD and extra is None:
raise TypeError("Cannot create SAFEARRAY type VT_RECORD without IRecordInfo.")
# Hm, there may be other reasons why the creation fails...
raise MemoryError()
# We now have a POINTER(tagSAFEARRAY) instance which we must cast
# to the correct type:
pa = cast(pa, cls)
# Now, fill the data in:
ptr = POINTER(cls._itemtype_)() # pointer to the item values
_safearray.SafeArrayAccessData(pa, byref(ptr))
try:
nbytes = nitems * sizeof(cls._itemtype_)
memmove(ptr, value.ctypes.data, nbytes)
finally:
_safearray.SafeArrayUnaccessData(pa)
return pa
@classmethod
def from_param(cls, value):
if not isinstance(value, cls):
value = cls.create(value, extra)
value._needsfree = True
return value
def __getitem__(self, index):
# pparray[0] returns the whole array contents.
if index != 0:
raise IndexError("Only index 0 allowed")
return self.unpack()
def __setitem__(self, index, value):
# XXX Need this to implement [in, out] safearrays in COM servers!
## print "__setitem__", index, value
raise TypeError("Setting items not allowed")
def __ctypes_from_outparam__(self):
self._needsfree = True
return self[0]
def __del__(self, _SafeArrayDestroy=_safearray.SafeArrayDestroy):
if self._needsfree:
_SafeArrayDestroy(self)
def _get_size(self, dim):
"Return the number of elements for dimension 'dim'"
ub = _safearray.SafeArrayGetUBound(self, dim) + 1
lb = _safearray.SafeArrayGetLBound(self, dim)
return ub - lb
def unpack(self):
"""Unpack a POINTER(SAFEARRAY_...) into a Python tuple or ndarray."""
dim = _safearray.SafeArrayGetDim(self)
if dim == 1:
num_elements = self._get_size(1)
result = self._get_elements_raw(num_elements)
if safearray_as_ndarray:
import numpy
return numpy.asarray(result)
return tuple(result)
elif dim == 2:
# get the number of elements in each dimension
rows, cols = self._get_size(1), self._get_size(2)
# get all elements
result = self._get_elements_raw(rows * cols)
# this must be reshaped and transposed because it is
# flat, and in VB order
if safearray_as_ndarray:
import numpy
return numpy.asarray(result).reshape((cols, rows)).T
result = [tuple(result[r::rows]) for r in range(rows)]
return tuple(result)
else:
lowerbounds = [_safearray.SafeArrayGetLBound(self, d)
for d in range(1, dim+1)]
indexes = (c_long * dim)(*lowerbounds)
upperbounds = [_safearray.SafeArrayGetUBound(self, d)
for d in range(1, dim+1)]
row = self._get_row(0, indexes, lowerbounds, upperbounds)
if safearray_as_ndarray:
import numpy
return numpy.asarray(row)
return row
def _get_elements_raw(self, num_elements):
"""Returns a flat list or ndarray containing ALL elements in
the safearray."""
from comtypes.automation import VARIANT
# XXX Not sure this is true:
# For VT_UNKNOWN and VT_DISPATCH, we should retrieve the
# interface iid by SafeArrayGetIID().
ptr = POINTER(self._itemtype_)() # container for the values
_safearray.SafeArrayAccessData(self, byref(ptr))
try:
if self._itemtype_ == VARIANT:
# We have to loop over each item, so we get no
# speedup by creating an ndarray here.
return [i.value for i in ptr[:num_elements]]
elif issubclass(self._itemtype_, POINTER(IUnknown)):
iid = _safearray.SafeArrayGetIID(self)
itf = com_interface_registry[str(iid)]
# COM interface pointers retrieved from array
# must be AddRef()'d if non-NULL.
elems = ptr[:num_elements]
result = []
# We have to loop over each item, so we get no
# speedup by creating an ndarray here.
for p in elems:
if bool(p):
p.AddRef()
result.append(p.QueryInterface(itf))
else:
# return a NULL-interface pointer.
result.append(POINTER(itf)())
return result
else:
# If the safearray element are NOT native python
# objects, the containing safearray must be kept
# alive until all the elements are destroyed.
if not issubclass(self._itemtype_, Structure):
# Create an ndarray if requested. This is where
# we can get the most speed-up.
# XXX Only try to convert types known to
# numpy.ctypeslib.
if (safearray_as_ndarray and self._itemtype_ in
numpy.ctypeslib._typecodes.values()):
arr = numpy.ctypeslib.as_array(ptr,
(num_elements,))
return arr.copy()
return ptr[:num_elements]
def keep_safearray(v):
v.__keepref = self
return v
return [keep_safearray(x) for x in ptr[:num_elements]]
finally:
_safearray.SafeArrayUnaccessData(self)
def _get_row(self, dim, indices, lowerbounds, upperbounds):
# loop over the index of dimension 'dim'
# we have to restore the index of the dimension we're looping over
restore = indices[dim]
result = []
obj = self._itemtype_()
pobj = byref(obj)
if dim+1 == len(indices):
# It should be faster to lock the array and get a whole row at once?
# How to calculate the pointer offset?
for i in range(indices[dim], upperbounds[dim]+1):
indices[dim] = i
_safearray.SafeArrayGetElement(self, indices, pobj)
result.append(obj.value)
else:
for i in range(indices[dim], upperbounds[dim]+1):
indices[dim] = i
result.append(self._get_row(dim+1, indices, lowerbounds, upperbounds))
indices[dim] = restore
return tuple(result) # for compatibility with pywin32.
@Patch(POINTER(POINTER(sa_type)))
class __(object):
@classmethod
def from_param(cls, value):
if isinstance(value, cls._type_):
return byref(value)
return byref(cls._type_.create(value, extra))
def __setitem__(self, index, value):
# create an LP_SAFEARRAY_... instance
pa = self._type_.create(value, extra)
# XXX Must we destroy the currently contained data?
# fill it into self
super(POINTER(POINTER(sa_type)), self).__setitem__(index, pa)
return sa_type
|
KeyError
|
dataset/ETHPy150Open enthought/comtypes/comtypes/safearray.py/_make_safearray_type
|
3,537 |
def handle_read(self):
self.rbuf += self.recv(self.BUFFER_SIZE)
while self.__hasEnoughBytes():
magic, cmd, keylen, extralen, datatype, vb, remaining, opaque, cas=\
struct.unpack(REQ_PKT_FMT, self.rbuf[:MIN_RECV_PACKET])
assert magic == REQ_MAGIC_BYTE
assert keylen <= remaining, "Keylen is too big: %d > %d" \
% (keylen, remaining)
assert extralen == memcacheConstants.EXTRA_HDR_SIZES.get(cmd, 0), \
"Extralen is too large for cmd 0x%x: %d" % (cmd, extralen)
# Grab the data section of this request
data=self.rbuf[MIN_RECV_PACKET:MIN_RECV_PACKET+remaining]
assert len(data) == remaining
# Remove this request from the read buffer
self.rbuf=self.rbuf[MIN_RECV_PACKET+remaining:]
# Process the command
cmdVal = self.processCommand(cmd, keylen, vb, extralen, cas, data)
# Queue the response to the client if applicable.
if cmdVal:
try:
status, cas, response = cmdVal
except __HOLE__:
print "Got", cmdVal
raise
dtype=0
extralen=memcacheConstants.EXTRA_HDR_SIZES.get(cmd, 0)
self.wbuf += struct.pack(RES_PKT_FMT,
RES_MAGIC_BYTE, cmd, keylen,
extralen, dtype, status,
len(response), opaque, cas) + response
|
ValueError
|
dataset/ETHPy150Open dustin/memcached-test/testServer.py/MemcachedBinaryChannel.handle_read
|
3,538 |
def prepare_for_inactivity(self):
"""Can be called before times when this iterator instance will not be used for some period of
time.
This is necessary to do on some platforms, such as Windows, where open file handles should be closed
while no real work is going on.
No calls are necessary to bring it out of this mode. The next invocation of any method on this
instance will result in the instance no longer being considered inactive.
"""
# This is a pain, but Windows does not allow for anyone to delete a file or its parent directory while
# someone has a file handle open to it. So, to be nice, we should close ours. However, it does limit
# our ability to detect and more easily handle log rotates. (Ok, it is not completely true that Windows does
# not allow for file deletes while someone has a file handle open, but you have to use the native win32 api
# to be able to open files that work in such a way.. and it still does not allow for the parent dirs to be
# deleted.)
close_file = (sys.platform == 'win32')
# also close any files that haven't been modified for a certain amount of time.
# This can help prevent errors from having too many open files if we are scanning
# a directory with many files in it.
if self.__max_modification_duration:
try:
current_datetime = datetime.datetime.now()
delta = current_datetime - self.__modification_time
total_micros = delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6
if total_micros > self.__max_modification_duration * 10**6:
close_file = True
except __HOLE__:
pass
if close_file:
for pending in self.__pending_files:
self.__close_file(pending)
|
OSError
|
dataset/ETHPy150Open scalyr/scalyr-agent-2/scalyr_agent/log_processing.py/LogFileIterator.prepare_for_inactivity
|
3,539 |
def __refresh_pending_files(self, current_time):
"""Check to see if __pending_files needs to be adjusted due to log rotation or the
current log growing.
This should be called periodically to sync the state of the file system with the metadata
tracked by this abstraction.
@param current_time: If not None, the value to use for the current_time. Used for testing purposes.
@type current_time: float or None
"""
has_no_position = len(self.__pending_files) == 0
# Check to see if the last file in our pending files is still at the path of the real log file.
if len(self.__pending_files) > 0 and self.__pending_files[-1].is_log_file:
current_log_file = self.__pending_files[-1]
else:
current_log_file = None
# First, try to see if the file at the log file path still exists, and if so, what's size and inode is.
try:
# Get the latest size and inode of the file at __path.
stat_result = self.__file_system.stat(self.__path)
latest_inode = stat_result.st_ino
latest_size = stat_result.st_size
self.__modification_time = datetime.datetime.fromtimestamp(stat_result.st_mtime)
# See if it is rotated by checking out the file handle we last opened to this file path.
if current_log_file is not None:
if (current_log_file.last_known_size > latest_size or
self.__file_system.trust_inodes and current_log_file.inode != latest_inode):
# Ok, the log file has rotated. We need to add in a new entry to represent this.
# But, we also take this opportunity to see if the current entry we had for the log file has
# grown in length since the last time we checked it, which is possible. This is the last time
# we have to check it since theorectically, the file would have been fully rotated before a new
# log file was created to take its place.
if current_log_file.file_handle is not None:
current_log_file.last_known_size = max(
current_log_file.last_known_size,
self.__file_system.get_file_size(current_log_file.file_handle))
elif not self.__file_system.trust_inodes:
# If we do not have the file handle open (probably because we are on a win32 system) and
# we do not trust inodes, then there is no way to get back to the original contents, so we
# just mark this file portion as now invalid.
current_log_file.valid = False
current_log_file.is_log_file = False
current_log_file.position_end = current_log_file.position_start + current_log_file.last_known_size
# Note, we do not yet detect if current_log_file is actually pointing to the same inode as the
# log_path. This could be true if the log file was copied to another location and then truncated
# in place (a commom mode of operation used by logrotate). If this is the case, then the
# file_handle in current_log_file will eventually fail since it will seek to a location no longer
# in the file. We handle that fairly cleanly in __fill_buffer so no need to do it here. However,
# if we want to look for the file where the previous log file was copied, this is where we would
# do it. That is a future feature.
# Add in an entry for the file content at log_path.
self.__add_entry_for_log_path(latest_inode)
else:
# It has not been rotated. So we just update the size of the current entry.
current_log_file.last_known_size = latest_size
current_log_file.position_end = current_log_file.position_start + latest_size
else:
# There is no entry representing the file at log_path, but it does exist, so we need to add it in.
self.__add_entry_for_log_path(latest_inode)
except __HOLE__, e:
# The file could have disappeared or the file permissions could have changed such that we can no longer
# read it. We have to handle these cases gracefully. We treat both like it has disappeared from our point
# of view.
if e.errno == errno.ENOENT or e.errno == errno.EACCES:
# The file doesn't exist. See if we think we have a file handle that is for the log path, and if
# so, update it to reflect it no longer is.
if current_log_file is not None:
current_log_file.is_log_file = False
if current_log_file.file_handle is not None:
current_log_file.last_known_size = max(current_log_file.last_known_size,
self.__file_system.get_file_size(
current_log_file.file_handle))
current_log_file.position_end = current_log_file.position_start + current_log_file.last_known_size
if self.__log_deletion_time is None:
self.__log_deletion_time = current_time
self.at_end = current_time - self.__log_deletion_time > self.__log_deletion_delay
else:
raise
if has_no_position and len(self.__pending_files) > 0:
self.__position = self.__pending_files[-1].position_end
|
OSError
|
dataset/ETHPy150Open scalyr/scalyr-agent-2/scalyr_agent/log_processing.py/LogFileIterator.__refresh_pending_files
|
3,540 |
def __open_file_by_path(self, file_path, starting_inode=None):
"""Open the file at the specified path and return a file handle and the inode for the file
Some work is done to ensure that returned inode actually is for the file represented by the file handle.
@param file_path: The path of the file to open
@param starting_inode: If not None, then the expected inode of the file. This is used as a hint, but does
not guarantee the returned inode will match this.
@type file_path: str
@type starting_inode: int
@return: A tuple of the file handle, the size, and the current inode of the file at that path.
@rtype: (FileIO, int, int)
"""
pending_file = None
try:
attempts_left = 3
try:
# Because there is no atomic way to open a file and get its inode, we have to do a little bit of
# extra work here. We look at the inode, open the file, and look at the inode again.. if the
# inode hasn't changed, then we return it and the file handle.. otherwise, we try again. We
# only try three times at most.
while attempts_left > 0:
if starting_inode is None and self.__file_system.trust_inodes:
starting_inode = self.__file_system.stat(file_path).st_ino
pending_file = self.__file_system.open(file_path)
second_stat = self.__file_system.stat(file_path)
if not self.__file_system.trust_inodes or starting_inode == second_stat.st_ino:
new_file = pending_file
pending_file = None
return new_file, second_stat.st_size, second_stat.st_ino
pending_file.close()
pending_file = None
attempts_left -= 1
starting_inode = None
except __HOLE__, error:
if error.errno == 13:
log.warn('Permission denied while attempting to read file \'%s\'', file_path,
limit_once_per_x_secs=60, limit_key=('invalid-perm' + file_path))
else:
log.warn('Error seen while attempting to read file \'%s\' with errno=%d', file_path, error.errno,
limit_once_per_x_secs=60, limit_key=('unknown-io' + file_path))
return None, None, None
except OSError, e:
if e.errno == errno.ENOENT:
log.warn('File unexpectantly missing when trying open it')
else:
log.warn('OSError seen while attempting to read file \'%s\' with errno=%d', file_path, e.errno,
limit_once_per_x_secs=60, limit_key=('unknown-os' + file_path))
return None, None, None
finally:
if pending_file is not None:
pending_file.close()
return None, None, None
|
IOError
|
dataset/ETHPy150Open scalyr/scalyr-agent-2/scalyr_agent/log_processing.py/LogFileIterator.__open_file_by_path
|
3,541 |
def __apply_redaction_rule(self, line, redaction_rule):
"""Applies the specified redaction rule on line and returns the result.
@param line: The input line
@param redaction_rule: The redaction rule.
@return: A sequence of two elements, the line with the redaction applied (if any) and True or False
indicating if a redaction was applied.
"""
try:
(result, matches) = redaction_rule.redaction_expression.subn(
redaction_rule.replacement_text, line)
except __HOLE__:
# if our line contained non-ascii characters and our redaction_rules
# are unicode, then the previous replace will fail.
# Try again, but this time convert the line to utf-8
(result, matches) = redaction_rule.redaction_expression.subn(
redaction_rule.replacement_text, line.decode( 'utf-8' ))
if matches > 0:
# if our result is a unicode string, lets convert it back to utf-8
# to avoid any conflicts
if type( result ) == unicode:
result = result.encode( 'utf-8' )
self.total_redactions += 1
redaction_rule.total_lines += 1
redaction_rule.total_redactions += matches
return result, matches > 0
|
UnicodeDecodeError
|
dataset/ETHPy150Open scalyr/scalyr-agent-2/scalyr_agent/log_processing.py/LogLineRedacter.__apply_redaction_rule
|
3,542 |
def __can_read_file(self, file_path):
"""Determines if this process can read the file at the path.
@param file_path: The file path
@type file_path: str
@return: True if it can be read.
@rtype: bool
"""
try:
fp = open(file_path, 'r')
fp.close()
except __HOLE__, error:
if error.errno == 13:
return False
return True
|
IOError
|
dataset/ETHPy150Open scalyr/scalyr-agent-2/scalyr_agent/log_processing.py/LogMatcher.__can_read_file
|
3,543 |
def configure_values_keyvalues(self, msg, values, target, keys):
''' Helper that can be used for configure_values for parsing in
'key=value' strings from a values field. The key name must be
in the keys list, and target.key=value is set.
'''
if values is None:
return None
kvs = values.split('|')
for kv in kvs:
try:
# key=value
(key, value) = kv.split('=', 1)
if value is not None and not value.strip():
value = None
except __HOLE__:
# value only
key = keys[kvs.index(kv)]
value = kv
if key not in keys:
raise ValueError, "invalid key: %s" % key
if value is not None:
setattr(target, key, value)
return None
|
ValueError
|
dataset/ETHPy150Open coreemu/core/daemon/core/conf.py/ConfigurableManager.configure_values_keyvalues
|
3,544 |
@classmethod
def configure(cls, mgr, msg):
''' Handle configuration messages for this object.
'''
reply = None
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
ifacenum = msg.gettlv(coreapi.CORE_TLV_CONF_IFNUM)
if ifacenum is not None:
nodenum = nodenum*1000 + ifacenum
if mgr.verbose:
mgr.info("received configure message for %s nodenum:%s" % (cls._name, str(nodenum)))
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
if mgr.verbose:
mgr.info("replying to configure request for %s model" %
cls._name)
# when object name is "all", the reply to this request may be None
# if this node has not been configured for this model; otherwise we
# reply with the defaults for this model
if objname == "all":
defaults = None
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE
else:
defaults = cls.getdefaultvalues()
typeflags = coreapi.CONF_TYPE_FLAGS_NONE
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
if values is None:
# node has no active config for this model (don't send defaults)
return None
# reply with config options
reply = cls.toconfmsg(0, nodenum, typeflags, values)
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
if objname == "all":
mgr.clearconfig(nodenum)
#elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
else:
# store the configuration values for later use, when the node
# object has been created
if objname is None:
mgr.info("no configuration object for node %s" % nodenum)
return None
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
defaults = cls.getdefaultvalues()
if values_str is None:
# use default or preconfigured values
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
else:
# use new values supplied from the conf message
values = values_str.split('|')
# determine new or old style config
new = cls.haskeyvalues(values)
if new:
new_values = list(defaults)
keys = cls.getnames()
for v in values:
key, value = v.split('=', 1)
try:
new_values[keys.index(key)] = value
except __HOLE__:
mgr.info("warning: ignoring invalid key '%s'" % key)
values = new_values
mgr.setconfig(nodenum, objname, values)
return reply
|
ValueError
|
dataset/ETHPy150Open coreemu/core/daemon/core/conf.py/Configurable.configure
|
3,545 |
def load_sample(name=None):
if len(argv) > 1:
img0 = cv.LoadImage(argv[1], cv.CV_LOAD_IMAGE_COLOR)
elif name is not None:
try:
img0 = cv.LoadImage(name, cv.CV_LOAD_IMAGE_COLOR)
except __HOLE__:
urlbase = 'https://raw.github.com/Itseez/opencv/master/samples/c/'
file = name.split('/')[-1]
filedata = urllib2.urlopen(urlbase+file).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
return img0
|
IOError
|
dataset/ETHPy150Open fatcloud/PyCV-time/opencv-official-samples/earlier/cvutils.py/load_sample
|
3,546 |
def get_rrset(self, section, name, rdclass, rdtype,
covers=dns.rdatatype.NONE, deleting=None, create=False,
force_unique=False):
"""Get the RRset with the given attributes in the specified section.
If the RRset is not found, None is returned.
@param section: the section of the message to look in, e.g.
self.answer.
@type section: list of dns.rrset.RRset objects
@param name: the name of the RRset
@type name: dns.name.Name object
@param rdclass: the class of the RRset
@type rdclass: int
@param rdtype: the type of the RRset
@type rdtype: int
@param covers: the covers value of the RRset
@type covers: int
@param deleting: the deleting value of the RRset
@type deleting: int
@param create: If True, create the RRset if it is not found.
The created RRset is appended to I{section}.
@type create: bool
@param force_unique: If True and create is also True, create a
new RRset regardless of whether a matching RRset exists already.
@type force_unique: bool
@rtype: dns.rrset.RRset object or None"""
try:
rrset = self.find_rrset(section, name, rdclass, rdtype, covers,
deleting, create, force_unique)
except __HOLE__:
rrset = None
return rrset
|
KeyError
|
dataset/ETHPy150Open catap/namebench/nb_third_party/dns/message.py/Message.get_rrset
|
3,547 |
@answer_proc('choice-multiple', 'choice-multiple-freeform')
def process_multiple(question, answer):
multiple = []
multiple_freeform = []
requiredcount = 0
required = question.getcheckdict().get('required', 0)
if required:
try:
requiredcount = int(required)
except __HOLE__:
requiredcount = 1
if requiredcount and requiredcount > question.choices().count():
requiredcount = question.choices().count()
for k, v in answer.items():
if k.startswith('multiple'):
multiple.append(v)
if k.startswith('more') and len(v.strip()) > 0:
multiple_freeform.append(v)
if len(multiple) + len(multiple_freeform) < requiredcount:
raise AnswerException(ungettext(u"You must select at least %d option",
u"You must select at least %d options",
requiredcount) % requiredcount)
multiple.sort()
if multiple_freeform:
multiple.append(multiple_freeform)
return dumps(multiple)
|
ValueError
|
dataset/ETHPy150Open seantis/seantis-questionnaire/questionnaire/qprocessors/choice.py/process_multiple
|
3,548 |
def connectionLost(self, reason):
"""
Release the inotify file descriptor and do the necessary cleanup
"""
FileDescriptor.connectionLost(self, reason)
if self._fd >= 0:
try:
os.close(self._fd)
except __HOLE__, e:
log.err(e, "Couldn't close INotify file descriptor.")
|
OSError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/internet/inotify.py/INotify.connectionLost
|
3,549 |
def _doRead(self, in_):
"""
Work on the data just read from the file descriptor.
"""
self._buffer += in_
while len(self._buffer) >= 16:
wd, mask, cookie, size = struct.unpack("=LLLL", self._buffer[0:16])
if size:
name = self._buffer[16:16 + size].rstrip('\0')
else:
name = None
self._buffer = self._buffer[16 + size:]
try:
iwp = self._watchpoints[wd]
except __HOLE__:
continue
path = iwp.path
if name:
path = path.child(name)
iwp._notify(path, mask)
if (iwp.autoAdd and mask & IN_ISDIR and mask & IN_CREATE):
# mask & IN_ISDIR already guarantees that the path is a
# directory. There's no way you can get here without a
# directory anyway, so no point in checking for that again.
new_wd = self.watch(
path, mask=iwp.mask, autoAdd=True,
callbacks=iwp.callbacks
)
# This is very very very hacky and I'd rather not do this but
# we have no other alternative that is less hacky other than
# surrender. We use callLater because we don't want to have
# too many events waiting while we process these subdirs, we
# must always answer events as fast as possible or the overflow
# might come.
self.reactor.callLater(0,
self._addChildren, self._watchpoints[new_wd])
if mask & IN_DELETE_SELF:
self._rmWatch(wd)
|
KeyError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/internet/inotify.py/INotify._doRead
|
3,550 |
def _addChildren(self, iwp):
"""
This is a very private method, please don't even think about using it.
Note that this is a fricking hack... it's because we cannot be fast
enough in adding a watch to a directory and so we basically end up
getting here too late if some operations have already been going on in
the subdir, we basically need to catchup. This eventually ends up
meaning that we generate double events, your app must be resistant.
"""
try:
listdir = iwp.path.children()
except __HOLE__:
# Somebody or something (like a test) removed this directory while
# we were in the callLater(0...) waiting. It doesn't make sense to
# process it anymore
return
# note that it's true that listdir will only see the subdirs inside
# path at the moment of the call but path is monitored already so if
# something is created we will receive an event.
for f in listdir:
# It's a directory, watch it and then add its children
if f.isdir():
wd = self.watch(
f, mask=iwp.mask, autoAdd=True,
callbacks=iwp.callbacks
)
iwp._notify(f, IN_ISDIR|IN_CREATE)
# now f is watched, we can add its children the callLater is to
# avoid recursion
self.reactor.callLater(0,
self._addChildren, self._watchpoints[wd])
# It's a file and we notify it.
if f.isfile():
iwp._notify(f, IN_CREATE|IN_CLOSE_WRITE)
|
OSError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/internet/inotify.py/INotify._addChildren
|
3,551 |
def process_url(self, urldata, output_dir, call_completion_func=False,
completion_extra_prms=None, start_time=0):
error_occurred = False
try:
output_fn = os.path.join(output_dir, self._filename_from_urldata(urldata))
self._download_image(urldata['url'], output_fn)
clean_fn, thumb_fn = self.process_image(output_fn)
except urllib2.URLError, e:
log.info('URL Error for %s (%s)', urldata['url'], str(e))
error_occurred = True
except urllib2.HTTPError, e:
if e.code != 201:
log.info('HTTP Error for %s (%s)', urldata['url'], str(e))
error_occurred = True
except BadStatusLine, e:
log.info('Bad status line for %s (%s)', urldata['url'], str(e))
error_occurred = True
except __HOLE__, e:
log.info('IO Error for: %s (%s)', urldata['url'], str(e))
error_occurred = True
except FilterException, e:
log.info('Filtered out: %s (%s)', urldata['url'], str(e))
error_occurred = True
if not error_occurred:
out_dict = urldata
out_dict['orig_fn'] = output_fn
out_dict['clean_fn'] = clean_fn
out_dict['thumb_fn'] = thumb_fn
if start_time > 0:
out_dict['download_time'] = time.time() - start_time
if call_completion_func:
# use callback handler to run completion func configured in process_urls
if completion_extra_prms:
self._callback_handler.run_callback(out_dict, completion_extra_prms, blocking=True)
else:
self._callback_handler.run_callback(out_dict, blocking=True)
log.info('done with callback')
return out_dict
else:
if call_completion_func:
# indicate to callback handler that a URL to be processed failed
self._callback_handler.skip()
return None
|
IOError
|
dataset/ETHPy150Open kencoken/imsearch-tools/imsearchtools/process/image_getter.py/ImageGetter.process_url
|
3,552 |
def match_check(self, m):
try:
return self.formats[m.group(1)](self)
except __HOLE__:
return "(nil)"
|
KeyError
|
dataset/ETHPy150Open qtile/qtile/libqtile/widget/mpdwidget.py/Mpd.match_check
|
3,553 |
def __init__(self):
self.user = None
self.info = None
self.full_name = None
self.user_type = None
if frappe.local.form_dict.get('cmd')=='login' or frappe.local.request.path=="/api/method/login":
self.login()
self.resume = False
else:
try:
self.resume = True
self.make_session(resume=True)
self.set_user_info(resume=True)
except __HOLE__:
self.user = "Guest"
self.make_session()
self.set_user_info()
|
AttributeError
|
dataset/ETHPy150Open frappe/frappe/frappe/auth.py/LoginManager.__init__
|
3,554 |
def range_usage_text(request):
start = request.GET.get('start', None)
end = request.GET.get('end', None)
format = request.GET.get('format', 'human_readable')
if not (start and end):
return HttpResponse(json.dumps({
'success': False,
'error_messages': 'Provide a start and end'}))
get_objects = request.GET.get('get_objects', False)
if start.find(':') > -1:
ip_type = '6'
else:
ip_type = '4'
try:
usage_data = range_usage(start, end, ip_type, get_objects)
except (__HOLE__, ipaddr.AddressValueError), e:
return HttpResponse(
json.dumps({
'error_messages': str(e),
'success': False
}))
if format == 'human_readable':
usage_data['free_ranges'] = map(lambda x: (int_to_ip(x[0], ip_type),
int_to_ip(x[1], ip_type)),
usage_data['free_ranges'])
usage_data['success'] = True
return HttpResponse(json.dumps(usage_data))
|
ValidationError
|
dataset/ETHPy150Open rtucker-mozilla/mozilla_inventory/core/range/views.py/range_usage_text
|
3,555 |
def redirect_to_range_from_ip(request):
ip_str = request.GET.get('ip_str')
ip_type = request.GET.get('ip_type')
if not (ip_str and ip_type):
return HttpResponse(json.dumps({'failure': "Slob"}))
if ip_type == '4':
try:
ip_upper, ip_lower = 0, int(ipaddr.IPv4Address(ip_str))
except ipaddr.AddressValueError:
return HttpResponse(
json.dumps({'success': False, 'message': "Failure to "
"recognize{0} as an IPv4 "
"Address.".format(ip_str)}))
else:
try:
ip_upper, ip_lower = ipv6_to_longs(ip_str)
except __HOLE__:
return HttpResponse(json.dumps({'success': False,
'message': 'Invalid IP'}))
range_ = Range.objects.filter(start_upper__lte=ip_upper,
start_lower__lte=ip_lower,
end_upper__gte=ip_upper,
end_lower__gte=ip_lower)
if not len(range_) == 1:
return HttpResponse(json.dumps({'failure': "Failture to find range"}))
else:
return HttpResponse(json.dumps(
{'success': True,
'redirect_url': range_[0].get_absolute_url()}))
|
ValidationError
|
dataset/ETHPy150Open rtucker-mozilla/mozilla_inventory/core/range/views.py/redirect_to_range_from_ip
|
3,556 |
def get_all_ranges_ajax(request):
system_pk = request.GET.get('system_pk', '-1')
location = None
system = None
ret_list = []
from systems.models import System
try:
system = System.objects.get(pk=system_pk)
except ObjectDoesNotExist:
pass
if system:
try:
location = system.system_rack.location.name.title()
except __HOLE__:
pass
for r in Range.objects.all().order_by('network__site'):
relevant = False
if r.network.site:
site_name = r.network.site.get_site_path()
if location and location == r.network.site.name.title():
relevant = True
else:
site_name = ''
if r.network.vlan:
vlan_name = r.network.vlan.name
else:
vlan_name = ''
ret_list.append({'id': r.pk,
'display': r.choice_display(),
'vlan': vlan_name,
'site': site_name,
'relevant': relevant
})
return HttpResponse(json.dumps(ret_list))
|
AttributeError
|
dataset/ETHPy150Open rtucker-mozilla/mozilla_inventory/core/range/views.py/get_all_ranges_ajax
|
3,557 |
def configure(app, admin=None):
if app.config.get('DEBUG_TOOLBAR_ENABLED'):
try:
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(app)
except ImportError:
app.logger.info('flask_debugtoolbar is not installed')
if app.config.get('OPBEAT'):
try:
from opbeat.contrib.flask import Opbeat
Opbeat(
app,
logging=app.config.get('OPBEAT', {}).get('LOGGING', False)
)
app.logger.info('opbeat configured!!!')
except __HOLE__:
app.logger.info('opbeat is not installed')
if app.config.get('SENTRY_ENABLED', False):
try:
from raven.contrib.flask import Sentry
app.sentry = Sentry(app)
except ImportError:
app.logger.info('sentry, raven is not installed')
|
ImportError
|
dataset/ETHPy150Open LeightonStreet/LingoBarter/lingobarter/ext/development.py/configure
|
3,558 |
def test_import_pycurl(self):
try:
import pycurl
except __HOLE__:
pass
else:
import tornado.curl_httpclient
|
ImportError
|
dataset/ETHPy150Open cloudaice/simple-data/misc/virtenv/lib/python2.7/site-packages/tornado/test/import_test.py/ImportTest.test_import_pycurl
|
3,559 |
def drop_privileges(self):
if getuid() > 0:
# Running as non-root. Ignore.
return
try:
# Get the uid/gid from the name
uid = getpwnam(self.user).pw_uid
gid = getgrnam(self.group).gr_gid
except __HOLE__ as error:
print("ERROR: Could not drop privileges {0:s}".format(error))
print(format_exc())
raise SystemExit(-1)
try:
# Remove group privileges
setgroups([])
# Try setting the new uid/gid
setgid(gid)
setuid(uid)
if self.umask is not None:
umask(self.umask)
except Exception as error:
print("ERROR: Could not drop privileges {0:s}".format(error))
print(format_exc())
raise SystemExit(-1)
|
KeyError
|
dataset/ETHPy150Open circuits/circuits/circuits/app/dropprivileges.py/DropPrivileges.drop_privileges
|
3,560 |
def guard_iter(val):
try:
iter(val)
except __HOLE__:
raise LookupyError('Value not an iterable')
else:
return val
|
TypeError
|
dataset/ETHPy150Open naiquevin/lookupy/lookupy/lookupy.py/guard_iter
|
3,561 |
def _unlock_keychain_prompt(self):
while self.keychain.locked:
try:
self.keychain.unlock(self.getpass("Master password: "))
except __HOLE__:
self.stdout.write("\n")
sys.exit(0)
|
KeyboardInterrupt
|
dataset/ETHPy150Open georgebrock/1pass/onepassword/cli.py/CLI._unlock_keychain_prompt
|
3,562 |
def default(self, obj):
if is_lazy_string(obj):
try:
return unicode(obj) # python 2
except __HOLE__:
return str(obj) # python 3
return super(CustomJSONEncoder, self).default(obj)
|
NameError
|
dataset/ETHPy150Open weblabdeusto/weblabdeusto/server/src/weblab/admin/web/app.py/CustomJSONEncoder.default
|
3,563 |
def encode(self, obj, *args, **kwargs):
if isinstance(obj, dict):
new_obj = type(obj)()
for key, value in six.iteritems(obj):
if is_lazy_string(key):
try:
key = unicode(key)
except __HOLE__:
key = str(key)
new_obj[key] = value
obj = new_obj
return super(JSONEncoder, self).encode(obj, *args, **kwargs)
|
NameError
|
dataset/ETHPy150Open weblabdeusto/weblabdeusto/server/src/weblab/admin/web/app.py/CustomJSONEncoder.encode
|
3,564 |
def __init__(self, app, cfg_manager, core_server, bypass_authz = False):
super(AdministrationApplication, self).__init__()
app.json_encoder = CustomJSONEncoder
self.cfg_manager = cfg_manager
pub_directory = os.path.join(os.path.abspath(self.cfg_manager.get('deployment_dir', '')), 'pub')
self.config = cfg_manager
db.initialize(cfg_manager)
self.core_server = core_server
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=db.engine))
files_directory = cfg_manager.get_doc_value(configuration_doc.CORE_STORE_STUDENTS_PROGRAMS_PATH)
core_server_url = cfg_manager.get_value( 'core_server_url', '' )
self.script_name = urlparse.urlparse(core_server_url).path.split('/weblab')[0] or ''
self.app = app
static_folder = os.path.abspath(os.path.join(os.path.dirname(web.__file__), 'static'))
# Not allowed
@app.route('/weblab/not_allowed')
def not_allowed():
return "You are logged in, but not allowed to see this content. Please log in with a proper account"
# Back
@app.route('/weblab/back')
def back_to_client():
return redirect(url_for('core_webclient.labs'))
################################################
#
# Administration panel for administrators
#
#
admin_url = '/weblab/admin'
category_system = lazy_gettext("System")
category_users = lazy_gettext("Users")
category_logs = lazy_gettext("Logs")
category_experiments = lazy_gettext("Experiments")
category_permissions = lazy_gettext("Permissions")
self.admin = Admin(index_view = admin_views.HomeView(db_session, url = admin_url),name = lazy_gettext('WebLab-Deusto Admin'), url = admin_url, endpoint = admin_url, base_template = 'weblab-master.html', template_mode = 'bootstrap3')
self.admin.weblab_admin_app = self
self.admin.add_view(admin_views.SystemProperties(db_session, category = category_system, name = lazy_gettext('Settings'), endpoint = 'system/settings', url='settings'))
self.admin.add_view(admin_views.AuthsPanel(db_session, category = category_system, name = lazy_gettext('Authentication'), endpoint = 'system/auth', url='auth'))
if not os.path.exists(pub_directory):
try:
os.mkdir(pub_directory)
except (IOError, __HOLE__) as e:
print("WARNING: %s not found. Create it to upload files to it." % pub_directory)
if os.path.exists(pub_directory):
self.admin.add_view(admin_views.AdministratorFileAdmin(pub_directory, category = category_system, name = lazy_gettext('Public directory'), endpoint = 'system/pub', url='pub'))
self.admin.add_view(admin_views.UsersAddingView(db_session, category = category_users, name = lazy_gettext('Add multiple users'), endpoint = 'users/multiple'))
self.admin.add_view(admin_views.UsersPanel(db_session, category = category_users, name = lazy_gettext('Users'), endpoint = 'users/users', url='users'))
self.admin.add_view(admin_views.GroupsPanel(db_session, category = category_users, name = lazy_gettext('Groups'), endpoint = 'users/groups', url='groups'))
self.admin.add_view(admin_views.UserUsedExperimentPanel(files_directory, db_session, category = category_logs, name = lazy_gettext('User logs'), endpoint = 'logs/users', url='logs'))
self.admin.add_view(admin_views.ExperimentCategoryPanel(db_session, category = category_experiments, name = lazy_gettext('Categories'), endpoint = 'experiments/categories', url='experiments/categories'))
self.admin.add_view(admin_views.ExperimentPanel(db_session, category = category_experiments, name = lazy_gettext('Experiments'), endpoint = 'experiments/experiments', url='experiments'))
# TODO: Until finished, do not display
# self.admin.add_view(admin_views.SchedulerPanel(db_session, category = category_experiments, name = lazy_gettext('Schedulers'), endpoint = 'experiments/schedulers'))
self.admin.add_view(admin_views.PermissionsAddingView(db_session, category = category_permissions, name = lazy_gettext('Create'), endpoint = 'permissions/create', url='permissions'))
self.admin.add_view(admin_views.UserPermissionPanel(db_session, category = category_permissions, name = lazy_gettext('User'), endpoint = 'permissions/user'))
self.admin.add_view(admin_views.GroupPermissionPanel(db_session, category = category_permissions, name = lazy_gettext('Group'), endpoint = 'permissions/group'))
self.admin.add_view(admin_views.RolePermissionPanel(db_session, category = category_permissions, name = lazy_gettext('Roles'), endpoint = 'permissions/role'))
self.admin.add_link(MenuLink(endpoint='instructor.index', name = lazy_gettext('Instructor panel'), icon_type='glyph', icon_value='glyphicon-stats'))
self.admin.add_link(MenuLink(endpoint='profile.index', name = lazy_gettext('My profile'), icon_type='glyph', icon_value='glyphicon-user'))
self.admin.add_link(MenuLink(endpoint = 'back_to_client', name = lazy_gettext('Back'), icon_type='glyph', icon_value='glyphicon-log-out'))
self.admin.init_app(self.app)
self.full_admin_url = self.script_name + admin_url
################################################
#
# Profile panel
#
profile_url = '/weblab/profile'
self.profile = Admin(index_view = profile_views.ProfileHomeView(db_session, url = profile_url, endpoint = 'profile'),name = lazy_gettext('WebLab-Deusto profile'), url = profile_url, endpoint = profile_url, base_template = 'weblab-master.html', template_mode='bootstrap3')
self.profile.weblab_admin_app = self
self.profile.add_view(profile_views.ProfileEditView(db_session, name = lazy_gettext('Edit'), endpoint = 'edit'))
self.profile.add_view(profile_views.MyAccessesPanel(files_directory, db_session, name = lazy_gettext('My accesses'), endpoint = 'accesses'))
self.profile.add_link(MenuLink(endpoint = 'back_to_client', name = lazy_gettext('Back'), icon_type='glyph', icon_value='glyphicon-log-out'))
self.profile.init_app(self.app)
################################################
#
# Instructors panel
#
# TODO. There should be able a new M2M relation between instructors and groups.
#
# Instructor should be able to:
#
# a) Create new groups (of which they are in charge)
# b) Make other instructors in charge of these groups
# c) Add students (and only students) to the system; forcing a group
# d) Edit users (only students; of those groups that the administrator is in charge of)
# e) Assign permissions on these courses
# f) Manage the permissions on these courses
# g) See the logs of their own students
# h) See a panel with analytics of each of these groups (this panel is common to the administrator, and has not been implemented)
instructor_url = '/weblab/instructor'
instructor_home = instructor_views.InstructorHomeView(db_session, url = instructor_url, endpoint = 'instructor')
instructor_home.static_folder = static_folder
self.instructor = Admin(index_view = instructor_home, name = lazy_gettext("Weblab-Deusto instructor"), url = instructor_url, endpoint = instructor_url, base_template = 'weblab-master.html', template_mode='bootstrap3')
self.instructor.weblab_admin_app = self
category_general = lazy_gettext("General")
category_stats = lazy_gettext("Stats")
self.instructor.add_view(instructor_views.UsersPanel(db_session, category = category_general, name = lazy_gettext('Users'), endpoint = 'users'))
self.instructor.add_view(instructor_views.GroupsPanel(db_session, category = category_general, name = lazy_gettext('Groups'), endpoint = 'groups'))
self.instructor.add_view(instructor_views.UserUsedExperimentPanel(db_session, category = category_general, name = lazy_gettext('Raw accesses'), endpoint = 'logs'))
self.instructor.add_view(instructor_views.GroupStats(db_session, category = category_stats, name = lazy_gettext('Group'), endpoint = 'stats/groups'))
self.instructor.add_link(MenuLink(endpoint='profile.index', name = lazy_gettext('My profile'), icon_type='glyph', icon_value='glyphicon-user'))
self.instructor.add_link(MenuLink(endpoint = 'back_to_client', name = lazy_gettext('Back'), icon_type='glyph', icon_value='glyphicon-log-out'))
self.instructor.init_app(self.app)
################################################
#
# Other
#
self.bypass_authz = bypass_authz
|
OSError
|
dataset/ETHPy150Open weblabdeusto/weblabdeusto/server/src/weblab/admin/web/app.py/AdministrationApplication.__init__
|
3,565 |
def get_move(self):
legal_moves = self.legal_moves
# Remove stop
try:
del legal_moves[datamodel.stop]
except __HOLE__:
pass
# now remove the move that would lead to the previous_position
# unless there is no where else to go.
if len(legal_moves) > 1:
for (k,v) in legal_moves.items():
if v == self.previous_pos:
break
del legal_moves[k]
# just in case, there is really no way to go to:
if not legal_moves:
return datamodel.stop
# and select a move at random
return self.rnd.choice(list(legal_moves.keys()))
|
KeyError
|
dataset/ETHPy150Open ASPP/pelita/players/RandomPlayers.py/NQRandomPlayer.get_move
|
3,566 |
def build_nsis(srcname, dstname, data):
"""Build NSIS script"""
portable_dir = osp.join(osp.dirname(osp.abspath(__file__)), 'portable')
shutil.copy(osp.join(portable_dir, srcname), dstname)
data = [('!addincludedir', osp.join(portable_dir, 'include'))
] + list(data)
replace_in_nsis_file(dstname, data)
try:
retcode = subprocess.call('"%s" -V2 "%s"' % (NSIS_EXE, dstname),
shell=True, stdout=sys.stderr)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
except __HOLE__ as e:
print("Execution failed:", e, file=sys.stderr)
os.remove(dstname)
|
OSError
|
dataset/ETHPy150Open winpython/winpython/make.py/build_nsis
|
3,567 |
def _check_packages(self):
"""Check packages for duplicates or unsupported packages"""
print("Checking packages")
packages = []
my_plist = []
my_plist += os.listdir(self.wheeldir)
for fname0 in my_plist:
fname = self.get_package_fname(fname0)
if fname == self.python_fname:
continue
try:
pack = wppm.Package(fname)
except __HOLE__:
print("WARNING: package %s is not supported"
% osp.basename(fname), file=sys.stderr)
continue
packages.append(pack)
all_duplicates = []
for pack in packages:
if pack.name in all_duplicates:
continue
all_duplicates.append(pack.name)
duplicates = [p for p in packages if p.name == pack.name]
if len(duplicates) > 1:
print("WARNING: duplicate packages %s (%s)" %
(pack.name, ", ".join([p.version for p in duplicates])),
file=sys.stderr)
|
NotImplementedError
|
dataset/ETHPy150Open winpython/winpython/make.py/WinPythonDistribution._check_packages
|
3,568 |
def _install_all_other_packages(self):
"""Try to install all other packages in wheeldir"""
print("Installing other packages")
my_list = []
my_list += os.listdir(self.wheeldir)
for fname in my_list:
if osp.basename(fname) != osp.basename(self.python_fname):
try:
self.install_package(fname)
except __HOLE__:
print("WARNING: unable to install package %s"
% osp.basename(fname), file=sys.stderr)
|
NotImplementedError
|
dataset/ETHPy150Open winpython/winpython/make.py/WinPythonDistribution._install_all_other_packages
|
3,569 |
def _run_complement_batch_scripts(self, this_batch="run_complement.bat"):
""" tools\..\run_complement.bat for final complements"""
print('now %s in tooldirs\..' % this_batch)
for post_complement in list(set([osp.dirname(s)
for s in self._toolsdirs])):
filepath = osp.join(post_complement, this_batch)
if osp.isfile(filepath):
print('launch "%s" for "%s"' % (filepath, self.winpydir))
try:
retcode = subprocess.call('"%s" "%s"' % (filepath, self.winpydir),
shell=True, stdout=sys.stderr)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
except __HOLE__ as e:
print("Execution failed:", e, file=sys.stderr)
self._print_done()
|
OSError
|
dataset/ETHPy150Open winpython/winpython/make.py/WinPythonDistribution._run_complement_batch_scripts
|
3,570 |
def poll(interval):
# sleep some time
time.sleep(interval)
procs = []
procs_status = {}
for p in psutil.process_iter():
try:
p.dict = p.as_dict(['username', 'nice', 'memory_info',
'memory_percent', 'cpu_percent',
'cpu_times', 'name', 'status'])
try:
procs_status[p.dict['status']] += 1
except __HOLE__:
procs_status[p.dict['status']] = 1
except psutil.NoSuchProcess:
pass
else:
procs.append(p)
# return processes sorted by CPU percent usage
processes = sorted(procs, key=lambda p: p.dict['cpu_percent'],
reverse=True)
return (processes, procs_status)
|
KeyError
|
dataset/ETHPy150Open giampaolo/psutil/scripts/top.py/poll
|
3,571 |
def main():
try:
interval = 0
while True:
args = poll(interval)
refresh_window(*args)
interval = 1
except (__HOLE__, SystemExit):
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open giampaolo/psutil/scripts/top.py/main
|
3,572 |
def test_withLogsExceptionMessage(self):
"""
L{Action.__exit__} logs an action finish message on an exception
raised from the block.
"""
logger = MemoryLogger()
action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me")
exception = RuntimeError("because")
try:
with action:
raise exception
except __HOLE__:
pass
else:
self.fail("no exception")
self.assertEqual(len(logger.messages), 1)
assertContainsFields(self, logger.messages[0],
{"task_uuid": "uuid",
"task_level": [1, 1],
"action_type": "sys:me",
"action_status": "failed",
"reason": "because",
"exception": "%s.RuntimeError" % (
RuntimeError.__module__,)})
|
RuntimeError
|
dataset/ETHPy150Open ClusterHQ/eliot/eliot/tests/test_action.py/ActionTests.test_withLogsExceptionMessage
|
3,573 |
def GetResourcesFromFile(self, resource_file):
resources = set()
ConfigFile = FileOperations.open(resource_file, 'r').read().splitlines() # To remove stupid '\n' at the end
for line in ConfigFile:
if '#' == line[0]:
continue # Skip comment lines
try:
Type, Name, Resource = line.split('_____')
# Resource = Resource.strip()
resources.add((Type, Name, Resource))
except __HOLE__:
cprint("ERROR: The delimiter is incorrect in this line at Resource File: "+str(line.split('_____')))
return resources
|
ValueError
|
dataset/ETHPy150Open owtf/owtf/framework/db/resource_manager.py/ResourceDB.GetResourcesFromFile
|
3,574 |
def setUp(self):
super(TestVisualiser, self).setUp()
x = 'I scream for ice cream'
task = UberTask(base_task=FailingMergeSort, x=x, copies=4)
luigi.build([task], workers=1, scheduler_port=self.get_http_port())
self.done = threading.Event()
def _do_ioloop():
# Enter ioloop for maximum TEST_TIMEOUT. Check every 2s whether the test has finished.
print('Entering event loop in separate thread')
for i in range(TEST_TIMEOUT):
try:
self.wait(timeout=1)
except __HOLE__:
pass
if self.done.is_set():
break
print('Exiting event loop thread')
self.iothread = threading.Thread(target=_do_ioloop)
self.iothread.start()
|
AssertionError
|
dataset/ETHPy150Open spotify/luigi/test/visualiser/visualiser_test.py/TestVisualiser.setUp
|
3,575 |
def get_url(self):
try:
return self.new_url_format.format(**self.url_data)
except __HOLE__:
return self.old_url_format.format(**self.url_data)
|
KeyError
|
dataset/ETHPy150Open pculture/vidscraper/vidscraper/suites/blip.py/PathMixin.get_url
|
3,576 |
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
elif isinstance(other, UNIXAddress):
# First do the simple thing and check to see if the names are the
# same. If not, and the paths exist, check to see if they point to
# the same file.
if self.name == other.name:
return True
else:
try:
return os.path.samefile(self.name, other.name)
except __HOLE__:
pass
return False
|
OSError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/internet/address.py/UNIXAddress.__eq__
|
3,577 |
def _cache(self):
try:
for cache, data in zip(self.cache,
next(self.child_epoch_iterator)):
cache.extend(data)
except __HOLE__:
if not self.cache[0]:
raise
|
StopIteration
|
dataset/ETHPy150Open mila-udem/fuel/fuel/transformers/__init__.py/Cache._cache
|
3,578 |
def get_data(self, request=None):
"""Get data from the dataset."""
if request is None:
raise ValueError
data = [[] for _ in self.sources]
for i in range(request):
try:
for source_data, example in zip(
data, next(self.child_epoch_iterator)):
source_data.append(example)
except __HOLE__:
# If some data has been extracted and `strict` is not set,
# we should spit out this data before stopping iteration.
if not self.strictness and data[0]:
break
elif self.strictness > 1 and data[0]:
raise ValueError
raise
return tuple(numpy.asarray(source_data) for source_data in data)
|
StopIteration
|
dataset/ETHPy150Open mila-udem/fuel/fuel/transformers/__init__.py/Batch.get_data
|
3,579 |
def get_data(self, request=None):
if request is not None:
raise ValueError
if not self.data:
data = next(self.child_epoch_iterator)
self.data = izip(*data)
try:
return next(self.data)
except __HOLE__:
self.data = None
return self.get_data()
|
StopIteration
|
dataset/ETHPy150Open mila-udem/fuel/fuel/transformers/__init__.py/Unpack.get_data
|
3,580 |
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except __HOLE__:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
|
NameError
|
dataset/ETHPy150Open axltxl/m2bk/m2bk/_version.py/get_versions
|
3,581 |
def convert_number(input):
try:
return int(input)
except __HOLE__:
return None
|
ValueError
|
dataset/ETHPy150Open pwittchen/learn-python-the-hard-way/exercises/exercise48/exercise48/lexicon.py/convert_number
|
3,582 |
def recv_process():
""" Receive a Shipment """
try:
recv_id = long(request.args[0])
except (__HOLE__, ValueError):
# recv_id missing from URL or invalid
redirect(URL(f="recv"))
rtable = s3db.inv_recv
if not auth.s3_has_permission("update", rtable, record_id=recv_id):
session.error = T("You do not have permission to receive this shipment.")
redirect(URL(c="inv", f="recv", args=[recv_id]))
recv_record = db(rtable.id == recv_id).select(rtable.date,
rtable.status,
rtable.site_id,
rtable.recv_ref,
limitby=(0, 1),
).first()
# Check status
status = recv_record.status
inv_ship_status = s3db.inv_ship_status
if status == inv_ship_status["RECEIVED"]:
session.error = T("This shipment has already been received.")
redirect(URL(c="inv", f="recv", args=[recv_id]))
elif status == inv_ship_status["CANCEL"]:
session.error = T("This shipment has already been received & subsequently canceled.")
redirect(URL(c="inv", f="recv", args=[recv_id]))
# Update Receive record & lock for editing
data = {"status": inv_ship_status["RECEIVED"],
"owned_by_user": None,
"owned_by_group": ADMIN,
}
if not recv_record.recv_ref:
# No recv_ref yet? => add one now
code = s3db.supply_get_shipping_code(settings.get_inv_recv_shortname(),
recv_record.site_id,
s3db.inv_recv.recv_ref,
)
data["recv_ref"] = code
if not recv_record.date:
# Date not set? => set to now
data["date"] = request.utcnow
db(rtable.id == recv_id).update(**data)
# Update the Send record & lock for editing
stable = db.inv_send
tracktable = db.inv_track_item
send_row = db(tracktable.recv_id == recv_id).select(tracktable.send_id,
limitby=(0, 1)).first()
if send_row:
send_id = send_row.send_id
db(stable.id == send_id).update(status = inv_ship_status["RECEIVED"],
owned_by_user = None,
owned_by_group = ADMIN,
)
# Change the status for all track items in this shipment to Unloading
# the onaccept will then move the values into the site, update any request
# record, create any adjustment if needed and change the status to Arrived
db(tracktable.recv_id == recv_id).update(status = 3)
# Move each item to the site
track_rows = db(tracktable.recv_id == recv_id).select()
for track_item in track_rows:
row = Storage(track_item)
s3db.inv_track_item_onaccept(Storage(vars = Storage(id=row.id),
record = row,
))
# Done => confirmation message, open the record
session.confirmation = T("Shipment Items Received")
redirect(URL(c="inv", f="recv", args=[recv_id]))
# -----------------------------------------------------------------------------
|
IndexError
|
dataset/ETHPy150Open sahana/eden/controllers/inv.py/recv_process
|
3,583 |
@classmethod
def get_info(cls, func):
""" :rtype: _MethodViewInfo|None """
try: return func._methodview
except __HOLE__: return None
|
AttributeError
|
dataset/ETHPy150Open kolypto/py-flask-jsontools/flask_jsontools/views.py/_MethodViewInfo.get_info
|
3,584 |
def coerce_put_post(request):
"""
Django doesn't particularly understand REST.
In case we send data over PUT, Django won't
actually look at the data and load it. We need
to twist its arm here.
The try/except abominiation here is due to a bug
in mod_python. This should fix it.
"""
if request.method == "PUT":
# Bug fix: if _load_post_and_files has already been called, for
# example by middleware accessing request.POST, the below code to
# pretend the request is a POST instead of a PUT will be too late
# to make a difference. Also calling _load_post_and_files will result
# in the following exception:
# AttributeError: You cannot set the upload handlers after
# the upload has been processed.
# The fix is to check for the presence of the _post field which is set
# the first time _load_post_and_files is called (both by wsgi.py and
# modpython.py). If it's set, the request has to be 'reset' to redo
# the query value parsing in POST mode.
if hasattr(request, '_post'):
del request._post
del request._files
try:
request.method = "POST"
request._load_post_and_files()
request.method = "PUT"
except __HOLE__:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = 'PUT'
request.PUT = request.POST
|
AttributeError
|
dataset/ETHPy150Open ionyse/ionyweb/ionyweb/utils.py/coerce_put_post
|
3,585 |
def coerce_delete_post(request):
"""
Django doesn't particularly understand REST.
In case we send data over DELETE, Django won't
actually look at the data and load it. We need
to twist its arm here.
The try/except abominiation here is due to a bug
in mod_python. This should fix it.
"""
if request.method == "DELETE":
# Bug fix: if _load_post_and_files has already been called, for
# example by middleware accessing request.POST, the below code to
# pretend the request is a POST instead of a PUT will be too late
# to make a difference. Also calling _load_post_and_files will result
# in the following exception:
# AttributeError: You cannot set the upload handlers after
# the upload has been processed.
# The fix is to check for the presence of the _post field which is set
# the first time _load_post_and_files is called (both by wsgi.py and
# modpython.py). If it's set, the request has to be 'reset' to redo
# the query value parsing in POST mode.
if hasattr(request, '_post'):
del request._post
del request._files
try:
request.method = "POST"
request._load_post_and_files()
request.method = "DELETE"
except __HOLE__:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = 'DELETE'
request.DELETE = request.POST
|
AttributeError
|
dataset/ETHPy150Open ionyse/ionyweb/ionyweb/utils.py/coerce_delete_post
|
3,586 |
def _install_req(py_executable, unzip=False, distribute=False):
if not distribute:
setup_fn = 'setuptools-0.6c11-py%s.egg' % sys.version[:3]
project_name = 'setuptools'
bootstrap_script = EZ_SETUP_PY
source = None
else:
setup_fn = None
source = 'distribute-0.6.8.tar.gz'
project_name = 'distribute'
bootstrap_script = DISTRIBUTE_SETUP_PY
try:
# check if the global Python has distribute installed or plain
# setuptools
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
location = os.path.dirname(pkg_resources.__file__)
logger.notify("A globally installed setuptools was found (in %s)" % location)
logger.notify("Use the --no-site-packages option to use distribute in "
"the virtualenv.")
except __HOLE__:
pass
search_dirs = file_search_dirs()
if setup_fn is not None:
setup_fn = _find_file(setup_fn, search_dirs)
if source is not None:
source = _find_file(source, search_dirs)
if is_jython and os._name == 'nt':
# Jython's .bat sys.executable can't handle a command line
# argument with newlines
import tempfile
fd, ez_setup = tempfile.mkstemp('.py')
os.write(fd, bootstrap_script)
os.close(fd)
cmd = [py_executable, ez_setup]
else:
cmd = [py_executable, '-c', bootstrap_script]
if unzip:
cmd.append('--always-unzip')
env = {}
if logger.stdout_level_matches(logger.DEBUG):
cmd.append('-v')
old_chdir = os.getcwd()
if setup_fn is not None and os.path.exists(setup_fn):
logger.info('Using existing %s egg: %s' % (project_name, setup_fn))
cmd.append(setup_fn)
if os.environ.get('PYTHONPATH'):
env['PYTHONPATH'] = setup_fn + os.path.pathsep + os.environ['PYTHONPATH']
else:
env['PYTHONPATH'] = setup_fn
else:
# the source is found, let's chdir
if source is not None and os.path.exists(source):
os.chdir(os.path.dirname(source))
else:
logger.info('No %s egg found; downloading' % project_name)
cmd.extend(['--always-copy', '-U', project_name])
logger.start_progress('Installing %s...' % project_name)
logger.indent += 2
cwd = None
if project_name == 'distribute':
env['DONT_PATCH_SETUPTOOLS'] = 'true'
def _filter_ez_setup(line):
return filter_ez_setup(line, project_name)
if not os.access(os.getcwd(), os.W_OK):
cwd = '/tmp'
if source is not None and os.path.exists(source):
# the current working dir is hostile, let's copy the
# tarball to /tmp
target = os.path.join(cwd, os.path.split(source)[-1])
shutil.copy(source, target)
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_ez_setup,
extra_env=env,
cwd=cwd)
finally:
logger.indent -= 2
logger.end_progress()
if os.getcwd() != old_chdir:
os.chdir(old_chdir)
if is_jython and os._name == 'nt':
os.remove(ez_setup)
|
ImportError
|
dataset/ETHPy150Open ralfonso/theory/go-pylons.py/_install_req
|
3,587 |
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = ['.', here,
join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except __HOLE__:
pass
else:
dirs.append(os.path.join(os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
|
ImportError
|
dataset/ETHPy150Open ralfonso/theory/go-pylons.py/file_search_dirs
|
3,588 |
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if sys.platform == 'win32':
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
try:
import win32api
except __HOLE__:
print 'Error: the path "%s" has a space in it' % home_dir
print 'To handle these kinds of paths, the win32api module must be installed:'
print ' http://sourceforge.net/projects/pywin32/'
sys.exit(3)
home_dir = win32api.GetShortPathName(home_dir)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
elif is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
else:
lib_dir = join(home_dir, 'lib', py_version)
inc_dir = join(home_dir, 'include', py_version)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
|
ImportError
|
dataset/ETHPy150Open ralfonso/theory/go-pylons.py/path_locations
|
3,589 |
def check_subprocess(cmd, source, outname):
"""Run the command to resize the video and remove the output file if the
processing fails.
"""
logger = logging.getLogger(__name__)
try:
returncode, stdout, stderr = call_subprocess(cmd)
except __HOLE__:
logger.debug('Process terminated, removing file %s', outname)
if os.path.isfile(outname):
os.remove(outname)
raise
if returncode:
logger.debug('STDOUT:\n %s', stdout)
logger.debug('STDERR:\n %s', stderr)
if os.path.isfile(outname):
logger.debug('Removing file %s', outname)
os.remove(outname)
raise SubprocessException('Failed to process ' + source)
|
KeyboardInterrupt
|
dataset/ETHPy150Open saimn/sigal/sigal/video.py/check_subprocess
|
3,590 |
def _assert_only_test_databases_accessed(self):
original_init = Database.__init__
self_ = self
def asserting_init(self, uri, create=False, server=None, **params):
original_init(self, uri, create=create, server=server, **params)
try:
self_._assert_is_a_test_db(self.dbname)
except __HOLE__:
db = self
def request(self, *args, **kwargs):
self_._assert_is_a_test_db(db.dbname)
self.res.request = request
Database.__init__ = asserting_init
|
AssertionError
|
dataset/ETHPy150Open dimagi/commcare-hq/testrunner.py/HqTestSuiteRunner._assert_only_test_databases_accessed
|
3,591 |
@staticmethod
def get_test_class(method):
"""
return the TestCase class associated with method
method can either be a test_* method, or setUpClass
"""
try:
# setUpClass
return method.im_self
except __HOLE__:
# test_* method
return method.__class__
|
AttributeError
|
dataset/ETHPy150Open dimagi/commcare-hq/testrunner.py/TimingTestSuite.get_test_class
|
3,592 |
def run_benchmarks(control, experiment, benchmark_dir, benchmarks, trials,
vcs=None, record_dir=None, profile_dir=None,
continue_on_error=False, control_python=sys.executable,
experiment_python=sys.executable):
if benchmarks:
print("Running benchmarks: %s" % " ".join(benchmarks))
else:
print("Running all benchmarks")
if record_dir:
record_dir = os.path.abspath(record_dir)
if not os.path.isdir(record_dir):
raise ValueError('Recording directory "%s" does not exist' % record_dir)
print("Recording data to '%s'" % record_dir)
if profile_dir:
profile_dir = os.path.abspath(profile_dir)
if not os.path.isdir(profile_dir):
raise ValueError('Profile directory "%s" does not exist' % profile_dir)
print("Recording profile data to '%s'" % profile_dir)
control_label = get_django_version(control, vcs=vcs)
experiment_label = get_django_version(experiment, vcs=vcs)
branch_info = "%s branch " % vcs if vcs else ""
print("Control: Django %s (in %s%s)" % (control_label, branch_info, control))
print("Experiment: Django %s (in %s%s)" % (experiment_label, branch_info, experiment))
print('')
# Calculate the subshell envs that we'll use to execute the
# benchmarks in.
if vcs:
control_env = {
'PYTHONPATH': '%s:%s' % (os.path.abspath(os.getcwd()), benchmark_dir),
}
experiment_env = control_env.copy()
else:
control_env = {'PYTHONPATH': '%s:%s' % (os.path.abspath(control), benchmark_dir)}
experiment_env = {'PYTHONPATH': '%s:%s' % (os.path.abspath(experiment), benchmark_dir)}
for benchmark in discover_benchmarks(benchmark_dir):
if not benchmarks or benchmark in benchmarks:
print("Running '%s' benchmark ..." % benchmark)
settings_mod = '%s.settings' % benchmark
control_env['DJANGO_SETTINGS_MODULE'] = settings_mod
experiment_env['DJANGO_SETTINGS_MODULE'] = settings_mod
if profile_dir is not None:
control_env['DJANGOBENCH_PROFILE_FILE'] = os.path.join(profile_dir, "con-%s" % benchmark)
experiment_env['DJANGOBENCH_PROFILE_FILE'] = os.path.join(profile_dir, "exp-%s" % benchmark)
try:
if vcs:
switch_to_branch(vcs, control)
control_data = run_benchmark(benchmark, benchmark_dir, trials,
executable=control_python,
env=control_env)
if vcs:
switch_to_branch(vcs, experiment)
experiment_data = run_benchmark(benchmark, benchmark_dir, trials,
executable=experiment_python,
env=experiment_env)
except SkipBenchmark as reason:
print("Skipped: %s\n" % reason)
continue
except __HOLE__ as error:
if continue_on_error:
print("Failed: %s\n" % error)
continue
raise
options = argparse.Namespace(
track_memory=False,
diff_instrumentation=False,
benchmark_name=benchmark,
disable_timelines=True,
control_label=control_label,
experiment_label=experiment_label,
)
result = perf.CompareBenchmarkData(control_data, experiment_data, options)
if record_dir:
record_benchmark_results(
dest=os.path.join(record_dir, '%s.json' % benchmark),
name=benchmark,
result=result,
control=control_label,
experiment=experiment_label,
control_data=control_data,
experiment_data=experiment_data,
)
print(format_benchmark_result(result, len(control_data.runtimes)))
print('')
|
RuntimeError
|
dataset/ETHPy150Open django/djangobench/djangobench/main.py/run_benchmarks
|
3,593 |
def parse_instance_info_capabilities(node):
"""Parse the instance_info capabilities.
One way of having these capabilities set is via Nova, where the
capabilities are defined in the Flavor extra_spec and passed to
Ironic by the Nova Ironic driver.
NOTE: Although our API fully supports JSON fields, to maintain the
backward compatibility with Juno the Nova Ironic driver is sending
it as a string.
:param node: a single Node.
:raises: InvalidParameterValue if the capabilities string is not a
dictionary or is malformed.
:returns: A dictionary with the capabilities if found, otherwise an
empty dictionary.
"""
def parse_error():
error_msg = (_('Error parsing capabilities from Node %s instance_info '
'field. A dictionary or a "jsonified" dictionary is '
'expected.') % node.uuid)
raise exception.InvalidParameterValue(error_msg)
capabilities = node.instance_info.get('capabilities', {})
if isinstance(capabilities, six.string_types):
try:
capabilities = jsonutils.loads(capabilities)
except (ValueError, __HOLE__):
parse_error()
if not isinstance(capabilities, dict):
parse_error()
return capabilities
|
TypeError
|
dataset/ETHPy150Open openstack/ironic/ironic/drivers/modules/deploy_utils.py/parse_instance_info_capabilities
|
3,594 |
def agent_get_clean_steps(task, interface=None, override_priorities=None):
"""Get the list of cached clean steps from the agent.
#TODO(JoshNang) move to BootInterface
The clean steps cache is updated at the beginning of cleaning.
:param task: a TaskManager object containing the node
:param interface: The interface for which clean steps
are to be returned. If this is not provided, it returns the
clean steps for all interfaces.
:param override_priorities: a dictionary with keys being step names and
values being new priorities for them. If a step isn't in this
dictionary, the step's original priority is used.
:raises NodeCleaningFailure: if the clean steps are not yet cached,
for example, when a node has just been enrolled and has not been
cleaned yet.
:returns: A list of clean step dictionaries
"""
node = task.node
try:
all_steps = node.driver_internal_info['agent_cached_clean_steps']
except __HOLE__:
raise exception.NodeCleaningFailure(_('Cleaning steps are not yet '
'available for node %(node)s')
% {'node': node.uuid})
if interface:
steps = [step.copy() for step in all_steps.get(interface, [])]
else:
steps = [step.copy() for step_list in all_steps.values()
for step in step_list]
if not steps or not override_priorities:
return steps
for step in steps:
new_priority = override_priorities.get(step.get('step'))
if new_priority is not None:
step['priority'] = new_priority
return steps
|
KeyError
|
dataset/ETHPy150Open openstack/ironic/ironic/drivers/modules/deploy_utils.py/agent_get_clean_steps
|
3,595 |
def parse_root_device_hints(node):
"""Parse the root_device property of a node.
Parse the root_device property of a node and make it a flat string
to be passed via the PXE config.
:param node: a single Node.
:returns: A flat string with the following format
opt1=value1,opt2=value2. Or None if the
Node contains no hints.
:raises: InvalidParameterValue, if some information is invalid.
"""
root_device = node.properties.get('root_device')
if not root_device:
return
# Find invalid hints for logging
invalid_hints = set(root_device) - VALID_ROOT_DEVICE_HINTS
if invalid_hints:
raise exception.InvalidParameterValue(
_('The hints "%(invalid_hints)s" are invalid. '
'Valid hints are: "%(valid_hints)s"') %
{'invalid_hints': ', '.join(invalid_hints),
'valid_hints': ', '.join(VALID_ROOT_DEVICE_HINTS)})
if 'size' in root_device:
try:
int(root_device['size'])
except __HOLE__:
raise exception.InvalidParameterValue(
_('Root device hint "size" is not an integer value.'))
hints = []
for key, value in sorted(root_device.items()):
# NOTE(lucasagomes): We can't have spaces in the PXE config
# file, so we are going to url/percent encode the value here
# and decode on the other end.
if isinstance(value, six.string_types):
value = value.strip()
value = parse.quote(value)
hints.append("%s=%s" % (key, value))
return ','.join(hints)
|
ValueError
|
dataset/ETHPy150Open openstack/ironic/ironic/drivers/modules/deploy_utils.py/parse_root_device_hints
|
3,596 |
def prepare_cleaning_ports(task):
"""Prepare the Ironic ports of the node for cleaning.
This method deletes the cleaning ports currently existing
for all the ports of the node and then creates a new one
for each one of them. It also adds 'vif_port_id' to port.extra
of each Ironic port, after creating the cleaning ports.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the previous cleaning ports cannot
be removed or if new cleaning ports cannot be created
"""
provider = dhcp_factory.DHCPFactory()
# If we have left over ports from a previous cleaning, remove them
if getattr(provider.provider, 'delete_cleaning_ports', None):
# Allow to raise if it fails, is caught and handled in conductor
provider.provider.delete_cleaning_ports(task)
# Create cleaning ports if necessary
if getattr(provider.provider, 'create_cleaning_ports', None):
# Allow to raise if it fails, is caught and handled in conductor
ports = provider.provider.create_cleaning_ports(task)
# Add vif_port_id for each of the ports because some boot
# interfaces expects these to prepare for booting ramdisk.
for port in task.ports:
extra_dict = port.extra
try:
extra_dict['vif_port_id'] = ports[port.uuid]
except __HOLE__:
# This is an internal error in Ironic. All DHCP providers
# implementing create_cleaning_ports are supposed to
# return a VIF port ID for all Ironic ports. But
# that doesn't seem to be true here.
error = (_("When creating cleaning ports, DHCP provider "
"didn't return VIF port ID for %s") % port.uuid)
raise exception.NodeCleaningFailure(
node=task.node.uuid, reason=error)
else:
port.extra = extra_dict
port.save()
|
KeyError
|
dataset/ETHPy150Open openstack/ironic/ironic/drivers/modules/deploy_utils.py/prepare_cleaning_ports
|
3,597 |
def parse_instance_info(node):
"""Gets the instance specific Node deployment info.
This method validates whether the 'instance_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
info = node.instance_info
i_info = {}
i_info['image_source'] = info.get('image_source')
iwdi = node.driver_internal_info.get('is_whole_disk_image')
if not iwdi:
if (i_info['image_source'] and
not service_utils.is_glance_image(
i_info['image_source'])):
i_info['kernel'] = info.get('kernel')
i_info['ramdisk'] = info.get('ramdisk')
i_info['root_gb'] = info.get('root_gb')
error_msg = _("Cannot validate driver deploy. Some parameters were missing"
" in node's instance_info")
check_for_missing_params(i_info, error_msg)
# Internal use only
i_info['deploy_key'] = info.get('deploy_key')
i_info['swap_mb'] = int(info.get('swap_mb', 0))
i_info['ephemeral_gb'] = info.get('ephemeral_gb', 0)
err_msg_invalid = _("Cannot validate parameter for driver deploy. "
"Invalid parameter %(param)s. Reason: %(reason)s")
for param in DISK_LAYOUT_PARAMS:
try:
int(i_info[param])
except ValueError:
reason = _("%s is not an integer value.") % i_info[param]
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': param,
'reason': reason})
i_info['root_mb'] = 1024 * int(info.get('root_gb'))
if iwdi:
if int(i_info['swap_mb']) > 0 or int(i_info['ephemeral_gb']) > 0:
err_msg_invalid = _("Cannot deploy whole disk image with "
"swap or ephemeral size set")
raise exception.InvalidParameterValue(err_msg_invalid)
i_info['ephemeral_format'] = info.get('ephemeral_format')
i_info['configdrive'] = info.get('configdrive')
if i_info['ephemeral_gb'] and not i_info['ephemeral_format']:
i_info['ephemeral_format'] = CONF.pxe.default_ephemeral_format
preserve_ephemeral = info.get('preserve_ephemeral', False)
try:
i_info['preserve_ephemeral'] = (
strutils.bool_from_string(preserve_ephemeral, strict=True))
except __HOLE__ as e:
raise exception.InvalidParameterValue(
err_msg_invalid % {'param': 'preserve_ephemeral', 'reason': e})
# NOTE(Zhenguo): If rebuilding with preserve_ephemeral option, check
# that the disk layout is unchanged.
if i_info['preserve_ephemeral']:
_check_disk_layout_unchanged(node, i_info)
return i_info
|
ValueError
|
dataset/ETHPy150Open openstack/ironic/ironic/drivers/modules/deploy_utils.py/parse_instance_info
|
3,598 |
@staticmethod
def from_manifest(manifest, manifest_url):
r = SourceInstall()
r.manifest = manifest
r.manifest_url = manifest_url
rd_debug("Loading manifest:\n{{{%s\n}}}\n"%manifest)
r.install_command = manifest.get("install-script", '')
r.check_presence_command = manifest.get("check-presence-script", '')
r.exec_path = manifest.get("exec-path", ".")
try:
r.tarball = manifest["uri"]
except __HOLE__:
raise InvalidRdmanifest("uri required for source rosdeps")
r.alternate_tarball = manifest.get("alternate-uri")
r.tarball_md5sum = manifest.get("md5sum")
r.dependencies = manifest.get("depends", [])
return r
|
KeyError
|
dataset/ETHPy150Open ros-infrastructure/rosdep/src/rosdep2/platforms/source.py/SourceInstall.from_manifest
|
3,599 |
def resolve(self, rosdep_args):
"""
:raises: :exc:`InvalidData` If format invalid or unable
to retrieve rdmanifests.
:returns: [SourceInstall] instances.
"""
try:
url = rosdep_args["uri"]
except __HOLE__:
raise InvalidData("'uri' key required for source rosdeps")
alt_url = rosdep_args.get("alternate-uri", None)
md5sum = rosdep_args.get("md5sum", None)
# load manifest from cache or from web
manifest = None
if url in self._rdmanifest_cache:
return self._rdmanifest_cache[url]
elif alt_url in self._rdmanifest_cache:
return self._rdmanifest_cache[alt_url]
try:
rd_debug("Downloading manifest [%s], mirror [%s]"%(url, alt_url))
manifest, download_url = download_rdmanifest(url, md5sum, alt_url)
resolved = SourceInstall.from_manifest(manifest, download_url)
self._rdmanifest_cache[download_url] = [resolved]
return [resolved]
except DownloadFailed as ex:
# not sure this should be masked this way
raise InvalidData(str(ex))
except InvalidRdmanifest as ex:
raise InvalidData(str(ex))
|
KeyError
|
dataset/ETHPy150Open ros-infrastructure/rosdep/src/rosdep2/platforms/source.py/SourceInstaller.resolve
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.