Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
4,300 |
def fopen(*args, **kwargs):
'''
Wrapper around open() built-in to set CLOEXEC on the fd.
This flag specifies that the file descriptor should be closed when an exec
function is invoked;
When a file descriptor is allocated (as with open or dup), this bit is
initially cleared on the new file descriptor, meaning that descriptor will
survive into the new program after exec.
NB! We still have small race condition between open and fcntl.
'''
# ensure 'binary' mode is always used on Windows in Python 2
if ((six.PY2 and is_windows() and 'binary' not in kwargs) or
kwargs.pop('binary', False)):
if len(args) > 1:
args = list(args)
if 'b' not in args[1]:
args[1] += 'b'
elif kwargs.get('mode', None):
if 'b' not in kwargs['mode']:
kwargs['mode'] += 'b'
else:
# the default is to read
kwargs['mode'] = 'rb'
elif six.PY3 and 'encoding' not in kwargs:
# In Python 3, if text mode is used and the encoding
# is not specified, set the encoding to 'utf-8'.
binary = False
if len(args) > 1:
args = list(args)
if 'b' in args[1]:
binary = True
if kwargs.get('mode', None):
if 'b' in kwargs['mode']:
binary = True
if not binary:
kwargs['encoding'] = 'utf-8'
fhandle = open(*args, **kwargs)
if is_fcntl_available():
# modify the file descriptor on systems with fcntl
# unix and unix-like systems only
try:
FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103
except __HOLE__:
FD_CLOEXEC = 1 # pylint: disable=C0103
old_flags = fcntl.fcntl(fhandle.fileno(), fcntl.F_GETFD)
fcntl.fcntl(fhandle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)
return fhandle
|
AttributeError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/fopen
|
4,301 |
def check_whitelist_blacklist(value, whitelist=None, blacklist=None):
'''
Check a whitelist and/or blacklist to see if the value matches it.
'''
if not any((whitelist, blacklist)):
return True
in_whitelist = False
in_blacklist = False
if whitelist:
if not isinstance(whitelist, list):
whitelist = [whitelist]
try:
for expr in whitelist:
if expr_match(value, expr):
in_whitelist = True
break
except __HOLE__:
log.error('Non-iterable whitelist {0}'.format(whitelist))
whitelist = None
else:
whitelist = None
if blacklist:
if not isinstance(blacklist, list):
blacklist = [blacklist]
try:
for expr in blacklist:
if expr_match(value, expr):
in_blacklist = True
break
except TypeError:
log.error('Non-iterable blacklist {0}'.format(whitelist))
blacklist = None
else:
blacklist = None
if whitelist and not blacklist:
ret = in_whitelist
elif blacklist and not whitelist:
ret = not in_blacklist
elif whitelist and blacklist:
ret = in_whitelist and not in_blacklist
else:
ret = True
return ret
|
TypeError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/check_whitelist_blacklist
|
4,302 |
def traverse_dict(data, key, default, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict using a colon-delimited (or otherwise delimited, using the
'delimiter' param) target string. The target 'foo:bar:baz' will return
data['foo']['bar']['baz'] if this value exists, and will otherwise return
the dict in the default argument.
'''
try:
for each in key.split(delimiter):
data = data[each]
except (__HOLE__, IndexError, TypeError):
# Encountered a non-indexable value in the middle of traversing
return default
return data
|
KeyError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/traverse_dict
|
4,303 |
def traverse_dict_and_list(data, key, default, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict or list using a colon-delimited (or otherwise delimited,
using the 'delimiter' param) target string. The target 'foo:bar:0' will
return data['foo']['bar'][0] if this value exists, and will otherwise
return the dict in the default argument.
Function will automatically determine the target type.
The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
then return data['foo']['bar']['0']
'''
for each in key.split(delimiter):
if isinstance(data, list):
try:
idx = int(each)
except ValueError:
embed_match = False
# Index was not numeric, lets look at any embedded dicts
for embedded in (x for x in data if isinstance(x, dict)):
try:
data = embedded[each]
embed_match = True
break
except KeyError:
pass
if not embed_match:
# No embedded dicts matched, return the default
return default
else:
try:
data = data[idx]
except __HOLE__:
return default
else:
try:
data = data[each]
except (KeyError, TypeError):
return default
return data
|
IndexError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/traverse_dict_and_list
|
4,304 |
@real_memoize
def is_proxy():
'''
Return True if this minion is a proxy minion.
Leverages the fact that is_linux() and is_windows
both return False for proxies.
TODO: Need to extend this for proxies that might run on
other Unices
'''
import __main__ as main
# This is a hack. If a proxy minion is started by other
# means, e.g. a custom script that creates the minion objects
# then this will fail.
is_proxy = False
try:
if 'salt-proxy' in main.__file__:
is_proxy = True
except __HOLE__:
pass
return is_proxy
|
AttributeError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/is_proxy
|
4,305 |
@real_memoize
def is_smartos_globalzone():
'''
Function to return if host is SmartOS (Illumos) global zone or not
'''
if not is_smartos():
return False
else:
cmd = ['zonename']
try:
zonename = subprocess.Popen(
cmd, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__:
return False
if zonename.returncode:
return False
if zonename.stdout.read().strip() == 'global':
return True
return False
|
OSError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/is_smartos_globalzone
|
4,306 |
@real_memoize
def is_smartos_zone():
'''
Function to return if host is SmartOS (Illumos) and not the gz
'''
if not is_smartos():
return False
else:
cmd = ['zonename']
try:
zonename = subprocess.Popen(
cmd, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__:
return False
if zonename.returncode:
return False
if zonename.stdout.read().strip() == 'global':
return False
return True
|
OSError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/is_smartos_zone
|
4,307 |
def test_mode(**kwargs):
'''
Examines the kwargs passed and returns True if any kwarg which matching
"Test" in any variation on capitalization (i.e. "TEST", "Test", "TeSt",
etc) contains a True value (as determined by salt.utils.is_true).
'''
for arg, value in six.iteritems(kwargs):
try:
if arg.lower() == 'test' and is_true(value):
return True
except __HOLE__:
continue
return False
|
AttributeError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/test_mode
|
4,308 |
def is_true(value=None):
'''
Returns a boolean value representing the "truth" of the value passed. The
rules for what is a "True" value are:
1. Integer/float values greater than 0
2. The string values "True" and "true"
3. Any object for which bool(obj) returns True
'''
# First, try int/float conversion
try:
value = int(value)
except (ValueError, __HOLE__):
pass
try:
value = float(value)
except (ValueError, TypeError):
pass
# Now check for truthiness
if isinstance(value, (int, float)):
return value > 0
elif isinstance(value, six.string_types):
return str(value).lower() == 'true'
else:
return bool(value)
|
TypeError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/is_true
|
4,309 |
def print_cli(msg):
'''
Wrapper around print() that suppresses tracebacks on broken pipes (i.e.
when salt output is piped to less and less is stopped prematurely).
'''
try:
try:
print(msg)
except UnicodeEncodeError:
print(msg.encode('utf-8'))
except __HOLE__ as exc:
if exc.errno != errno.EPIPE:
raise
|
IOError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/print_cli
|
4,310 |
def date_cast(date):
'''
Casts any object into a datetime.datetime object
date
any datetime, time string representation...
'''
if date is None:
return datetime.datetime.now()
elif isinstance(date, datetime.datetime):
return date
# fuzzy date
try:
if isinstance(date, six.string_types):
try:
if HAS_TIMELIB:
# py3: yes, timelib.strtodatetime wants bytes, not str :/
return timelib.strtodatetime(to_bytes(date))
except __HOLE__:
pass
# not parsed yet, obviously a timestamp?
if date.isdigit():
date = int(date)
else:
date = float(date)
return datetime.datetime.fromtimestamp(date)
except Exception:
if HAS_TIMELIB:
raise ValueError('Unable to parse {0}'.format(date))
raise RuntimeError('Unable to parse {0}.'
' Consider installing timelib'.format(date))
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/date_cast
|
4,311 |
def argspec_report(functions, module=''):
'''
Pass in a functions dict as it is returned from the loader and return the
argspec function signatures
'''
ret = {}
# TODO: cp.get_file will also match cp.get_file_str. this is the
# same logic as sys.doc, and it is not working as expected, see
# issue #3614
_use_fnmatch = False
if '*' in module:
target_mod = module
_use_fnmatch = True
elif module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
target_module = module + '.' if not module.endswith('.') else module
else:
target_module = ''
if _use_fnmatch:
for fun in fnmatch.filter(functions, target_mod):
try:
aspec = salt.utils.args.get_function_argspec(functions[fun])
except __HOLE__:
# this happens if not callable
continue
args, varargs, kwargs, defaults = aspec
ret[fun] = {}
ret[fun]['args'] = args if args else None
ret[fun]['defaults'] = defaults if defaults else None
ret[fun]['varargs'] = True if varargs else None
ret[fun]['kwargs'] = True if kwargs else None
else:
for fun in functions:
if fun == module or fun.startswith(target_module):
try:
aspec = salt.utils.args.get_function_argspec(functions[fun])
except TypeError:
# this happens if not callable
continue
args, varargs, kwargs, defaults = aspec
ret[fun] = {}
ret[fun]['args'] = args if args else None
ret[fun]['defaults'] = defaults if defaults else None
ret[fun]['varargs'] = True if varargs else None
ret[fun]['kwargs'] = True if kwargs else None
return ret
|
TypeError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/argspec_report
|
4,312 |
def find_json(raw):
'''
Pass in a raw string and load the json when is starts. This allows for a
string to start with garbage and end with json but be cleanly loaded
'''
ret = {}
for ind in range(len(raw)):
working = '\n'.join(raw.splitlines()[ind:])
try:
ret = json.loads(working, object_hook=decode_dict)
except __HOLE__:
continue
if ret:
return ret
if not ret:
# Not json, raise an error
raise ValueError
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/find_json
|
4,313 |
def get_group_list(user=None, include_default=True):
'''
Returns a list of all of the system group names of which the user
is a member.
'''
if HAS_GRP is False or HAS_PWD is False:
# We don't work on platforms that don't have grp and pwd
# Just return an empty list
return []
group_names = None
ugroups = set()
if not isinstance(user, six.string_types):
raise Exception
if hasattr(os, 'getgrouplist'):
# Try os.getgrouplist, available in python >= 3.3
log.trace('Trying os.getgrouplist for \'{0}\''.format(user))
try:
group_names = [
grp.getgrgid(grpid).gr_name for grpid in
os.getgrouplist(user, pwd.getpwnam(user).pw_gid)
]
except Exception:
pass
else:
# Try pysss.getgrouplist
log.trace('Trying pysss.getgrouplist for \'{0}\''.format(user))
try:
import pysss # pylint: disable=import-error
group_names = list(pysss.getgrouplist(user))
except Exception:
pass
if group_names is None:
# Fall back to generic code
# Include the user's default group to behave like
# os.getgrouplist() and pysss.getgrouplist() do
log.trace('Trying generic group list for \'{0}\''.format(user))
group_names = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
try:
default_group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name
if default_group not in group_names:
group_names.append(default_group)
except __HOLE__:
# If for some reason the user does not have a default group
pass
ugroups.update(group_names)
if include_default is False:
# Historically, saltstack code for getting group lists did not
# include the default group. Some things may only want
# supplemental groups, so include_default=False omits the users
# default group.
try:
default_group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name
ugroups.remove(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
log.trace('Group list for user \'{0}\': \'{1}\''.format(user, sorted(ugroups)))
return sorted(ugroups)
|
KeyError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/get_group_list
|
4,314 |
def import_json():
'''
Import a json module, starting with the quick ones and going down the list)
'''
for fast_json in ('ujson', 'yajl', 'json'):
try:
mod = __import__(fast_json)
log.trace('loaded {0} json lib'.format(fast_json))
return mod
except __HOLE__:
continue
|
ImportError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/import_json
|
4,315 |
def chugid(runas):
'''
Change the current process to belong to
the imputed user (and the groups he belongs to)
'''
uinfo = pwd.getpwnam(runas)
supgroups = []
supgroups_seen = set()
# The line below used to exclude the current user's primary gid.
# However, when root belongs to more than one group
# this causes root's primary group of '0' to be dropped from
# his grouplist. On FreeBSD, at least, this makes some
# command executions fail with 'access denied'.
#
# The Python documentation says that os.setgroups sets only
# the supplemental groups for a running process. On FreeBSD
# this does not appear to be strictly true.
group_list = get_group_dict(runas, include_default=True)
if sys.platform == 'darwin':
group_list = dict((k, v) for k, v in six.iteritems(group_list)
if not k.startswith('_'))
for group_name in group_list:
gid = group_list[group_name]
if (gid not in supgroups_seen
and not supgroups_seen.add(gid)):
supgroups.append(gid)
if os.getgid() != uinfo.pw_gid:
try:
os.setgid(uinfo.pw_gid)
except __HOLE__ as err:
raise CommandExecutionError(
'Failed to change from gid {0} to {1}. Error: {2}'.format(
os.getgid(), uinfo.pw_gid, err
)
)
# Set supplemental groups
if sorted(os.getgroups()) != sorted(supgroups):
try:
os.setgroups(supgroups)
except OSError as err:
raise CommandExecutionError(
'Failed to set supplemental groups to {0}. Error: {1}'.format(
supgroups, err
)
)
if os.getuid() != uinfo.pw_uid:
try:
os.setuid(uinfo.pw_uid)
except OSError as err:
raise CommandExecutionError(
'Failed to change from uid {0} to {1}. Error: {2}'.format(
os.getuid(), uinfo.pw_uid, err
)
)
|
OSError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/chugid
|
4,316 |
def is_iter(y, ignore=six.string_types):
'''
Test if an object is iterable, but not a string type.
Test if an object is an iterator or is iterable itself. By default this
does not return True for string objects.
The `ignore` argument defaults to a list of string types that are not
considered iterable. This can be used to also exclude things like
dictionaries or named tuples.
Based on https://bitbucket.org/petershinners/yter
'''
if ignore and isinstance(y, ignore):
return False
try:
iter(y)
return True
except __HOLE__:
return False
|
TypeError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/is_iter
|
4,317 |
def split_input(val):
'''
Take an input value and split it into a list, returning the resulting list
'''
if isinstance(val, list):
return val
try:
return [x.strip() for x in val.split(',')]
except __HOLE__:
return [x.strip() for x in str(val).split(',')]
|
AttributeError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/split_input
|
4,318 |
def str_version_to_evr(verstring):
'''
Split the package version string into epoch, version and release.
Return this as tuple.
The epoch is always not empty. The version and the release can be an empty
string if such a component could not be found in the version string.
"2:1.0-1.2" => ('2', '1.0', '1.2)
"1.0" => ('0', '1.0', '')
"" => ('0', '', '')
'''
if verstring in [None, '']:
return '0', '', ''
idx_e = verstring.find(':')
if idx_e != -1:
try:
epoch = str(int(verstring[:idx_e]))
except __HOLE__:
# look, garbage in the epoch field, how fun, kill it
epoch = '0' # this is our fallback, deal
else:
epoch = '0'
idx_r = verstring.find('-')
if idx_r != -1:
version = verstring[idx_e + 1:idx_r]
release = verstring[idx_r + 1:]
else:
version = verstring[idx_e + 1:]
release = ''
return epoch, version, release
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/__init__.py/str_version_to_evr
|
4,319 |
def create_query_params(**kwargs):
params = {}
for potential_arg in 'login', 'experiment_name', 'category_name', 'ip', 'country':
if potential_arg in request.args:
params[potential_arg] = request.args[potential_arg]
for potential_arg in 'start_date', 'end_date':
if potential_arg in request.args:
try:
params[potential_arg] = datetime.datetime.strptime(request.args[potential_arg], "%Y-%m-%d").date()
except __HOLE__:
pass
for potential_arg in 'page',:
if potential_arg in request.args:
try:
params[potential_arg] = int(request.args[potential_arg])
except ValueError:
pass
if 'page' not in params or params['page'] <= 0:
params['page'] = 1
for potential_arg in 'date_precision',:
if potential_arg in request.args:
if request.args[potential_arg] in ('day', 'month', 'year', 'week'):
params[potential_arg] = request.args[potential_arg]
if 'date_precision' not in params:
params['date_precision'] = 'month'
params.update(kwargs)
query_params = UsesQueryParams(**params)
metadata = weblab_api.db.quickadmin_uses_metadata(query_params)
params['count'] = metadata['count']
if 'start_date' in params:
params['min_date'] = params['start_date']
else:
params['min_date'] = metadata['min_date']
if 'end_date' in params:
params['max_date'] = params['end_date']
else:
params['max_date'] = metadata['max_date']
return UsesQueryParams(**params)
|
ValueError
|
dataset/ETHPy150Open weblabdeusto/weblabdeusto/server/src/weblab/core/web/quickadmin.py/create_query_params
|
4,320 |
def hydrate_runcaseversions(self, bundle):
"""
Handle the runcaseversion creation during a POST of a new Run.
Tastypie handles the creation of the run itself. But we handle the
RunCaseVersions and Results because we have special handler methods for
setting the statuses which we want to keep DRY.
"""
try:
run = bundle.obj
run.save()
# walk results
for data in bundle.data["runcaseversions"]:
status = data.pop("status")
# find caseversion for case
cv = CaseVersion.objects.get(
productversion=run.productversion,
case=data.pop("case"),
)
# create runcaseversion for this run to caseversion
rcv, created = RunCaseVersion.objects.get_or_create(
run=run,
caseversion=cv,
)
data["user"] = bundle.request.user
data["environment"] = Environment.objects.get(
pk=data["environment"])
# create result via methods on runcaseversion
rcv.get_result_method(status)(**data)
bundle.data["runcaseversions"] = []
return bundle
except __HOLE__ as e:
raise ValidationError(
"bad result object data missing key: {0}".format(e))
except ObjectDoesNotExist as e:
raise ValidationError(e)
|
KeyError
|
dataset/ETHPy150Open mozilla/moztrap/moztrap/model/execution/api.py/RunResource.hydrate_runcaseversions
|
4,321 |
def obj_create(self, bundle, request=None, **kwargs):
"""
Manually create the proper results objects.
This is necessary because we have special handler methods in
RunCaseVersion for setting the statuses which we want to keep DRY.
"""
request = request or bundle.request
data = bundle.data.copy()
try:
status = data.pop("status")
case = data.pop("case")
env = Environment.objects.get(pk=data.get("environment"))
run = data.pop("run_id")
except __HOLE__ as e:
raise ValidationError(
"bad result object data missing key: {0}".format(e))
except Environment.DoesNotExist as e:
raise ValidationError(
"Specified environment does not exist: {0}".format(e))
data["environment"] = env
try:
rcv = RunCaseVersion.objects.get(
run__id=run,
caseversion__case__id=case,
environments=env,
)
except RunCaseVersion.DoesNotExist as e:
raise ValidationError(
"RunCaseVersion not found for run: {0}, case: {1}, environment: {2}:\nError {3}".format(
str(run), str(case), str(env), e))
self.authorized_create_detail([rcv], bundle)
data["user"] = request.user
method = rcv.get_result_method(status)
bundle.obj = method(**data)
return bundle
|
KeyError
|
dataset/ETHPy150Open mozilla/moztrap/moztrap/model/execution/api.py/ResultResource.obj_create
|
4,322 |
@property
def group(self):
"""FIXME: DEPRECATED"""
if self.is_superuser:
return "SuperAdmins"
try:
return self.groups.all()[0].name
except __HOLE__:
return "---"
|
IndexError
|
dataset/ETHPy150Open tonioo/modoboa/modoboa/core/models.py/User.group
|
4,323 |
@property
def role(self):
"""Return user role."""
if self.is_superuser:
return "SuperAdmins"
try:
return self.groups.all()[0].name
except __HOLE__:
return "---"
|
IndexError
|
dataset/ETHPy150Open tonioo/modoboa/modoboa/core/models.py/User.role
|
4,324 |
@property
def first(self):
"""
Return a Query that selects only the first element of this Query.
If no elements are available, returns a query with no results.
Example usage:
.. code:: python
>> q = Query(lambda: range(5))
>> q.first.results
[0]
Returns:
Query
"""
def _transform(xs): # pylint: disable=missing-docstring, invalid-name
try:
return [iter(xs).next()]
except __HOLE__:
return []
return self.transform(_transform, 'first')
|
StopIteration
|
dataset/ETHPy150Open edx/bok-choy/bok_choy/query.py/Query.first
|
4,325 |
def nth(self, index):
"""
Return a query that selects the element at `index` (starts from 0).
If no elements are available, returns a query with no results.
Example usage:
.. code:: python
>> q = Query(lambda: range(5))
>> q.nth(2).results
[2]
Args:
index (int): The index of the element to select (starts from 0)
Returns:
Query
"""
def _transform(xs): # pylint: disable=missing-docstring, invalid-name
try:
return [next(islice(iter(xs), index, None))]
# Gracefully handle (a) running out of elements, and (b) negative indices
except (__HOLE__, ValueError):
return []
return self.transform(_transform, 'nth')
|
StopIteration
|
dataset/ETHPy150Open edx/bok-choy/bok_choy/query.py/Query.nth
|
4,326 |
def test_egg5(self):
"""Loading an app from an egg that has an import error in its models module raises that error"""
egg_name = '%s/brokenapp.egg' % self.egg_dir
sys.path.append(egg_name)
self.assertRaises(ImportError, load_app, 'broken_app')
try:
load_app('broken_app')
except __HOLE__, e:
# Make sure the message is indicating the actual
# problem in the broken app.
self.assertTrue("modelz" in e.args[0])
|
ImportError
|
dataset/ETHPy150Open django-nonrel/django-nonrel/tests/regressiontests/app_loading/tests.py/EggLoadingTest.test_egg5
|
4,327 |
def repository(**kwargs):
if 'url' not in kwargs:
kwargs['url'] = 'https://github.com/example-{0}/example.git'.format(
random.randint(1, 100000))
try:
result = Repository.query.filter_by(url=kwargs['url'])[0]
except __HOLE__:
result = Repository(**kwargs)
db.session.add(result)
return result
|
IndexError
|
dataset/ETHPy150Open dropbox/changes/changes/mock.py/repository
|
4,328 |
def author(**kwargs):
if 'name' not in kwargs:
kwargs['name'] = ' '.join(get_sentences(1)[0].split(' ')[0:2])
if 'email' not in kwargs:
kwargs['email'] = '{0}@example.com'.format(slugify(kwargs['name']))
try:
result = Author.query.filter_by(email=kwargs['email'])[0]
except __HOLE__:
result = Author(**kwargs)
db.session.add(result)
return result
|
IndexError
|
dataset/ETHPy150Open dropbox/changes/changes/mock.py/author
|
4,329 |
@staticmethod
def get_file_interface(filename):
file = FileManager.locate_file(filename)
ext = os.path.splitext(filename)[1]
if file:
try:
return FileManager.file_interfaces[ext]
except __HOLE__:
return None
|
KeyError
|
dataset/ETHPy150Open missionpinball/mpf/mpf/system/file_manager.py/FileManager.get_file_interface
|
4,330 |
@staticmethod
def load(filename, verify_version=False, halt_on_error=False):
file = FileManager.locate_file(filename)
if file:
ext = os.path.splitext(file)[1]
try:
config = FileManager.file_interfaces[ext].load(file,
verify_version,
halt_on_error)
except __HOLE__:
# todo convert to exception
FileManager.log.error("No config file processor available for file type {}"
.format(ext))
sys.exit()
return config
elif halt_on_error:
raise IOError("Could not find file {}".format(filename))
else:
return dict()
|
KeyError
|
dataset/ETHPy150Open missionpinball/mpf/mpf/system/file_manager.py/FileManager.load
|
4,331 |
@staticmethod
def save(filename, data):
ext = os.path.splitext(filename)[1]
try:
FileManager.file_interfaces[ext].save(filename, data)
except __HOLE__:
# todo convert to exception
FileManager.log.error("No config file processor available for file type {}"
.format(ext))
sys.exit()
|
KeyError
|
dataset/ETHPy150Open missionpinball/mpf/mpf/system/file_manager.py/FileManager.save
|
4,332 |
def _fit(self, X):
X = iter(X)
try:
first = next(X)
except (TypeError, __HOLE__):
raise ValueError("Cannot fit with an empty dataset")
logger.debug("Starting flattener.fit")
# Build basic schema
self._fit_first(first)
if self.str_tuple_indexes or self.bag_indexes:
# Is there anything to one-hot encode or bag-of-words encode?
# See all datapoints looking for one-hot encodeable feature values
for datapoint in self._iter_valid(X, first=first):
self._fit_step(datapoint)
logger.debug("Finished flattener.fit")
logger.debug("Input tuple size %s, output vector size %s" %
(len(first), len(self.indexes)))
return self
|
StopIteration
|
dataset/ETHPy150Open machinalis/featureforge/featureforge/flattener.py/FeatureMappingFlattener._fit
|
4,333 |
def _fit_transform(self, X):
X = iter(X)
try:
first = next(X)
except (__HOLE__, StopIteration):
raise ValueError("Cannot fit with an empty dataset")
logger.debug("Starting flattener.fit_transform")
self._fit_first(first)
matrix = []
for datapoint in self._iter_valid(X, first=first):
self._fit_step(datapoint)
vector = self._transform_step(datapoint)
matrix.append(vector.reshape((1, -1)))
N = len(self.indexes)
for i, vector in enumerate(matrix):
if len(vector) == N:
break
# This works because one-hot encoded features go at the end
vector = numpy.array(vector)
vector.resize((1, N))
matrix[i] = vector
if not matrix:
result = numpy.zeros((0, N))
else:
result = numpy.concatenate(matrix)
logger.debug("Finished flattener.fit_transform")
logger.debug("Matrix has size %sx%s" % result.shape)
return result
|
TypeError
|
dataset/ETHPy150Open machinalis/featureforge/featureforge/flattener.py/FeatureMappingFlattener._fit_transform
|
4,334 |
def _sparse_fit_transform(self, X):
X = iter(X)
try:
first = next(X)
except (TypeError, __HOLE__):
raise ValueError("Cannot fit with an empty dataset")
logger.debug("Starting flattener.fit_transform")
self._fit_first(first)
data = array.array("d")
indices = array.array("i")
indptr = array.array("i", [0])
for datapoint in self._iter_valid(X, first=first):
self._fit_step(datapoint)
for i, value in self._sparse_transform_step(datapoint):
data.append(value)
indices.append(i)
indptr.append(len(data))
if len(indptr) == 0:
result = numpy.zeros((0, len(self.indexes)))
else:
result = csr_matrix((data, indices, indptr),
dtype=float,
shape=(len(indptr) - 1, len(self.indexes)))
logger.debug("Finished flattener.fit_transform")
logger.debug("Matrix has size %sx%s" % result.shape)
return result
|
StopIteration
|
dataset/ETHPy150Open machinalis/featureforge/featureforge/flattener.py/FeatureMappingFlattener._sparse_fit_transform
|
4,335 |
def _profile(prof_id, func):
"""
Wrap a call with a profiler
"""
# Note that some distro don't come with pstats
import pstats
try:
import cProfile as profile
except __HOLE__:
import profile
PROF_DAT = '/tmp/desktop-profile-%s.dat' % (prof_id,)
prof = profile.Profile()
try:
prof.runcall(func)
finally:
if os.path.exists(PROF_DAT):
os.remove(PROF_DAT)
prof.dump_stats(PROF_DAT)
# Sort the calls by time spent and show top 50
pstats.Stats(PROF_DAT).sort_stats('time').print_stats(50)
print >>sys.stderr, "Complete profile data in %s" % (PROF_DAT,)
|
ImportError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/src/desktop/manage_entry.py/_profile
|
4,336 |
def managed(name, entries, connect_spec=None):
'''Ensure the existance (or not) of LDAP entries and their attributes
Example:
.. code-block:: yaml
ldapi:///:
ldap.managed:
- connect_spec:
bind:
method: sasl
- entries:
# make sure the entry doesn't exist
- cn=foo,ou=users,dc=example,dc=com:
- delete_others: True
# make sure the entry exists with only the specified
# attribute values
- cn=admin,dc=example,dc=com:
- delete_others: True
- replace:
cn:
- admin
description:
- LDAP administrator
objectClass:
- simpleSecurityObject
- organizationalRole
userPassword:
- {{pillar.ldap_admin_password}}
# make sure the entry exists, its olcRootDN attribute
# has only the specified value, the olcRootDN attribute
# doesn't exist, and all other attributes are ignored
- 'olcDatabase={1}hdb,cn=config':
- replace:
olcRootDN:
- cn=admin,dc=example,dc=com
# the admin entry has its own password attribute
olcRootPW: []
# note the use of 'default'. also note how you don't
# have to use list syntax if there is only one attribute
# value
- cn=foo,ou=users,dc=example,dc=com:
- delete_others: True
- default:
userPassword: changeme
shadowLastChange: 0
# keep sshPublicKey if present, but don't create
# the attribute if it is missing
sshPublicKey: []
- replace:
cn: foo
uid: foo
uidNumber: 1000
gidNumber: 1000
gecos: Foo Bar
givenName: Foo
sn: Bar
homeDirectory: /home/foo
loginShell: /bin/bash
objectClass:
- inetOrgPerson
- posixAccount
- top
- ldapPublicKey
- shadowAccount
:param name:
The URL of the LDAP server. This is ignored if
``connect_spec`` is either a connection object or a dict with
a ``'url'`` entry.
:param entries:
A description of the desired state of zero or more LDAP
entries.
``entries`` is an iterable of dicts. Each of these dict's
keys are the distinguished names (DNs) of LDAP entries to
manage. Each of these dicts is processed in order. A later
dict can reference an LDAP entry that was already mentioned in
an earlier dict, which makes it possible for later dicts to
enhance or alter the desired state of an LDAP entry.
The DNs are mapped to a description of the LDAP entry's
desired state. These LDAP entry descriptions are themselves
iterables of dicts. Each dict in the iterable is processed in
order. They contain directives controlling the entry's state.
The key names the directive type and the value is state
information for the directive. The specific structure of the
state information depends on the directive type.
The structure of ``entries`` looks like this::
[{dn1: [{directive1: directive1_state,
directive2: directive2_state},
{directive3: directive3_state}],
dn2: [{directive4: directive4_state,
directive5: directive5_state}]},
{dn3: [{directive6: directive6_state}]}]
These are the directives:
* ``'delete_others'``
Boolean indicating whether to delete attributes not
mentioned in this dict or any of the other directive
dicts for this DN. Defaults to ``False``.
If you don't want to delete an attribute if present, but
you also don't want to add it if it is missing or modify
it if it is present, you can use either the ``'default'``
directive or the ``'add'`` directive with an empty value
list.
* ``'default'``
A dict mapping an attribute name to an iterable of default
values for that attribute. If the attribute already
exists, it is left alone. If not, it is created using the
given list of values.
An empty value list is useful when you don't want to
create an attribute if it is missing but you do want to
preserve it if the ``'delete_others'`` key is ``True``.
* ``'add'``
Attribute values to add to the entry. This is a dict
mapping an attribute name to an iterable of values to add.
An empty value list is useful when you don't want to
create an attribute if it is missing but you do want to
preserve it if the ``'delete_others'`` key is ``True``.
* ``'delete'``
Attribute values to remove from the entry. This is a dict
mapping an attribute name to an iterable of values to
delete from the attribute. If the iterable is empty, all
of the attribute's values are deleted.
* ``'replace'``
Attributes to replace. This is a dict mapping an
attribute name to an iterable of values. Any existing
values for the attribute are deleted, then the given
values are added. The iterable may be empty.
In the above directives, the iterables of attribute values may
instead be ``None``, in which case an empty list is used, or a
scalar such as a string or number, in which case a new list
containing the scalar is used.
Note that if all attribute values are removed from an entry,
the entire entry is deleted.
:param connect_spec:
See the description of the ``connect_spec`` parameter of the
:py:func:`ldap3.connect <salt.modules.ldap3.connect>` function
in the :py:mod:`ldap3 <salt.modules.ldap3>` execution module.
If this is a dict and the ``'url'`` entry is not specified,
the ``'url'`` entry is set to the value of the ``name``
parameter.
:returns:
A dict with the following keys:
* ``'name'``
This is the same object passed to the ``name`` parameter.
* ``'changes'``
This is a dict describing the changes made (or, in test
mode, the changes that would have been attempted). If no
changes were made (or no changes would have been
attempted), then this dict is empty. Only successful
changes are included.
Each key is a DN of an entry that was changed (or would
have been changed). Entries that were not changed (or
would not have been changed) are not included. The value
is a dict with two keys:
* ``'old'``
The state of the entry before modification. If the
entry did not previously exist, this key maps to
``None``. Otherwise, the value is a dict mapping each
of the old entry's attributes to a list of its values
before any modifications were made. Unchanged
attributes are excluded from this dict.
* ``'new'``
The state of the entry after modification. If the
entry was deleted, this key maps to ``None``.
Otherwise, the value is a dict mapping each of the
entry's attributes to a list of its values after the
modifications were made. Unchanged attributes are
excluded from this dict.
Example ``'changes'`` dict where a new entry was created
with a single attribute containing two values::
{'dn1': {'old': None,
'new': {'attr1': ['val1', 'val2']}}}
Example ``'changes'`` dict where a new attribute was added
to an existing entry::
{'dn1': {'old': {},
'new': {'attr2': ['val3']}}}
* ``'result'``
One of the following values:
* ``True`` if no changes were necessary or if all changes
were applied successfully.
* ``False`` if at least one change was unable to be applied.
* ``None`` if changes would be applied but it is in test
mode.
'''
if connect_spec is None:
connect_spec = {}
try:
connect_spec.setdefault('url', name)
except __HOLE__:
# already a connection object
pass
connect = __salt__['ldap3.connect']
# hack to get at the ldap3 module to access the ldap3.LDAPError
# exception class. https://github.com/saltstack/salt/issues/27578
ldap3 = inspect.getmodule(connect)
with connect(connect_spec) as l:
old, new = _process_entries(l, entries)
# collect all of the affected entries (only the key is
# important in this dict; would have used an OrderedSet if
# there was one)
dn_set = OrderedDict()
dn_set.update(old)
dn_set.update(new)
# do some cleanup
dn_to_delete = set()
for dn in dn_set:
o = old.get(dn, {})
n = new.get(dn, {})
for x in o, n:
to_delete = set()
for attr, vals in six.iteritems(x):
if not len(vals):
# clean out empty attribute lists
to_delete.add(attr)
for attr in to_delete:
del x[attr]
if o == n:
# clean out unchanged entries
dn_to_delete.add(dn)
for dn in dn_to_delete:
for x in old, new:
x.pop(dn, None)
del dn_set[dn]
ret = {
'name': name,
'changes': {},
'result': None,
'comment': '',
}
if old == new:
ret['comment'] = 'LDAP entries already set'
ret['result'] = True
return ret
if __opts__['test']:
ret['comment'] = 'Would change LDAP entries'
changed_old = old
changed_new = new
success_dn_set = dn_set
else:
# execute the changes
changed_old = OrderedDict()
changed_new = OrderedDict()
# assume success; these will be changed on error
ret['result'] = True
ret['comment'] = 'Successfully updated LDAP entries'
errs = []
success_dn_set = OrderedDict()
for dn in dn_set:
o = old.get(dn, {})
n = new.get(dn, {})
try:
# perform the operation
if len(o):
if len(n):
op = 'modify'
assert o != n
__salt__['ldap3.change'](l, dn, o, n)
else:
op = 'delete'
__salt__['ldap3.delete'](l, dn)
else:
op = 'add'
assert len(n)
__salt__['ldap3.add'](l, dn, n)
# update these after the op in case an exception
# is raised
changed_old[dn] = o
changed_new[dn] = n
success_dn_set[dn] = True
except ldap3.LDAPError:
log.exception('failed to %s entry %s', op, dn)
errs.append((op, dn))
continue
if len(errs):
ret['result'] = False
ret['comment'] = 'failed to ' \
+ ', '.join((op + ' entry ' + dn
for op, dn in errs))
# set ret['changes']. filter out any unchanged attributes, and
# convert the value sets to lists before returning them to the
# user (sorted for easier comparisons)
for dn in success_dn_set:
o = changed_old.get(dn, {})
n = changed_new.get(dn, {})
changes = {}
ret['changes'][dn] = changes
for x, xn in ((o, 'old'), (n, 'new')):
if not len(x):
changes[xn] = None
continue
changes[xn] = dict(((attr, sorted(vals))
for attr, vals in six.iteritems(x)
if o.get(attr, ()) != n.get(attr, ())))
return ret
|
AttributeError
|
dataset/ETHPy150Open saltstack/salt/salt/states/ldap.py/managed
|
4,337 |
def _toset(thing):
'''helper to convert various things to a set
This enables flexibility in what users provide as the list of LDAP
entry attribute values. Note that the LDAP spec prohibits
duplicate values in an attribute and that the order is
unspecified, so a set is good for automatically removing
duplicates.
None becomes an empty set. Iterables except for strings have
their elements added to a new set. Non-None scalars (strings,
numbers, non-iterable objects, etc.) are added as the only member
of a new set.
'''
if thing is None:
return set()
if isinstance(thing, six.string_types):
return set((thing,))
# convert numbers to strings so that equality checks work
# (LDAP stores numbers as strings)
try:
return set((str(x) for x in thing))
except __HOLE__:
return set((str(thing),))
|
TypeError
|
dataset/ETHPy150Open saltstack/salt/salt/states/ldap.py/_toset
|
4,338 |
def get_commit(self, rev):
"""Get commit object identified by `rev` (SHA or branch or tag name)."""
for prefix in ['refs/heads/', 'refs/tags/', '']:
key = prefix + rev
try:
obj = self[encode_for_git(key)]
if isinstance(obj, dulwich.objects.Tag):
obj = self[obj.object[1]]
return obj
except __HOLE__:
pass
raise KeyError(rev)
|
KeyError
|
dataset/ETHPy150Open jonashaag/klaus/klaus/repo.py/FancyRepo.get_commit
|
4,339 |
def get_default_branch(self):
"""Tries to guess the default repo branch name."""
for candidate in ['master', 'trunk', 'default', 'gh-pages']:
try:
self.get_commit(candidate)
return candidate
except KeyError:
pass
try:
return self.get_branch_names()[0]
except __HOLE__:
return None
|
IndexError
|
dataset/ETHPy150Open jonashaag/klaus/klaus/repo.py/FancyRepo.get_default_branch
|
4,340 |
def commit_diff(self, commit):
"""Return the list of changes introduced by `commit`."""
from klaus.utils import guess_is_binary
if commit.parents:
parent_tree = self[commit.parents[0]].tree
else:
parent_tree = None
summary = {'nfiles': 0, 'nadditions': 0, 'ndeletions': 0}
file_changes = [] # the changes in detail
dulwich_changes = self.object_store.tree_changes(parent_tree, commit.tree)
for (oldpath, newpath), (oldmode, newmode), (oldsha, newsha) in dulwich_changes:
summary['nfiles'] += 1
try:
# Check for binary files -- can't show diffs for these
if newsha and guess_is_binary(self[newsha]) or \
oldsha and guess_is_binary(self[oldsha]):
file_changes.append({
'is_binary': True,
'old_filename': oldpath or '/dev/null',
'new_filename': newpath or '/dev/null',
'chunks': None
})
continue
except __HOLE__:
# newsha/oldsha are probably related to submodules.
# Dulwich will handle that.
pass
bytesio = io.BytesIO()
dulwich.patch.write_object_diff(bytesio, self.object_store,
(oldpath, oldmode, oldsha),
(newpath, newmode, newsha))
files = prepare_udiff(decode_from_git(bytesio.getvalue()), want_header=False)
if not files:
# the diff module doesn't handle deletions/additions
# of empty files correctly.
file_changes.append({
'old_filename': oldpath or '/dev/null',
'new_filename': newpath or '/dev/null',
'chunks': [],
'additions': 0,
'deletions': 0,
})
else:
change = files[0]
summary['nadditions'] += change['additions']
summary['ndeletions'] += change['deletions']
file_changes.append(change)
return summary, file_changes
|
KeyError
|
dataset/ETHPy150Open jonashaag/klaus/klaus/repo.py/FancyRepo.commit_diff
|
4,341 |
def test_parsing(self):
# make '0' more likely to be chosen than other digits
digits = '000000123456789'
signs = ('+', '-', '')
# put together random short valid strings
# \d*[.\d*]?e
for i in range(1000):
for j in range(TEST_SIZE):
s = random.choice(signs)
intpart_len = random.randrange(5)
s += ''.join(random.choice(digits) for _ in range(intpart_len))
if random.choice([True, False]):
s += '.'
fracpart_len = random.randrange(5)
s += ''.join(random.choice(digits)
for _ in range(fracpart_len))
else:
fracpart_len = 0
if random.choice([True, False]):
s += random.choice(['e', 'E'])
s += random.choice(signs)
exponent_len = random.randrange(1, 4)
s += ''.join(random.choice(digits)
for _ in range(exponent_len))
if intpart_len + fracpart_len:
self.check_strtod(s)
else:
try:
float(s)
except __HOLE__:
pass
else:
assert False, "expected ValueError"
|
ValueError
|
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_strtod.py/StrtodTests.test_parsing
|
4,342 |
def include_list(objs, method=None, **kwargs):
try:
s = get_object_serialization(objs[0], method)
except __HOLE__:
return []
return [s.method(obj, **kwargs) for obj in objs]
|
IndexError
|
dataset/ETHPy150Open fiam/wapi/serializers/__init__.py/include_list
|
4,343 |
def default(self, obj, **kw):
try:
return dict([(k, v) for k, v in obj.__dict__.iteritems() if not k.startswith('_')])
except __HOLE__:
return dict()
|
AttributeError
|
dataset/ETHPy150Open fiam/wapi/serializers/__init__.py/BaseSerializer.default
|
4,344 |
def _get_serialization(self, obj, method):
try:
m = getattr(self, method or 'default')
except __HOLE__:
raise NoSerializationMethod('Serialization "%s" is not defined in serializer "%s" for object "%s"' % \
(method, _SERIALIZERS_REGISTRY.get(obj.__class__, Serializer).__name__, obj.__class__.__name__))
return Serialization(self.obj_name(m) or obj.__class__.__name__.lower(), m)
|
AttributeError
|
dataset/ETHPy150Open fiam/wapi/serializers/__init__.py/BaseSerializer._get_serialization
|
4,345 |
def get_class_serializer(cls):
try:
return _SERIALIZERS_REGISTRY[cls]
except __HOLE__:
return _DEFAULT_SERIALIZER
|
KeyError
|
dataset/ETHPy150Open fiam/wapi/serializers/__init__.py/get_class_serializer
|
4,346 |
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except __HOLE__:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
|
ValueError
|
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/distlib/database.py/DistributionPath.provides_distribution
|
4,347 |
def __init__(self, path, metadata=None, env=None):
self.finder = finder = resources.finder_for_path(path)
if finder is None:
import pdb; pdb.set_trace ()
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
try:
r = finder.find('REQUESTED')
except __HOLE__:
import pdb; pdb.set_trace ()
self.requested = r is not None
|
AttributeError
|
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/distlib/database.py/InstalledDistribution.__init__
|
4,348 |
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except __HOLE__:
pass
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
return metadata
|
IOError
|
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/distlib/database.py/EggInfoDistribution._get_metadata
|
4,349 |
def htlm_link_if_uri(value):
try:
uri = value.uri
return '<a href="%s">%s</a>' % (uri, six.text_type(value))
except __HOLE__:
return six.text_type(value)
|
AttributeError
|
dataset/ETHPy150Open trungdong/prov/prov/dot.py/htlm_link_if_uri
|
4,350 |
def endElement(self, name, value, connection):
if name == 'DNSName':
self.dns_name = value
elif name == 'HTTPPort':
try:
self.http_port = int(value)
except ValueError:
self.http_port = value
elif name == 'HTTPSPort':
try:
self.https_port = int(value)
except __HOLE__:
self.https_port = value
elif name == 'OriginProtocolPolicy':
self.origin_protocol_policy = value
else:
setattr(self, name, value)
|
ValueError
|
dataset/ETHPy150Open darcyliu/storyboard/boto/cloudfront/origin.py/CustomOrigin.endElement
|
4,351 |
def set_raw_value(self, raw_data, value):
if self.multiple:
try:
del raw_data[self.input_name]
except __HOLE__:
pass
for v in value:
raw_data.add(self.input_name, v)
else:
raw_data[self.input_name] = value
|
KeyError
|
dataset/ETHPy150Open SmartTeleMax/iktomi/iktomi/forms/fields.py/Field.set_raw_value
|
4,352 |
def accept(self):
old = self.python_data
result = OrderedDict()
for index in self.form.raw_data.getall(self.indices_input_name):
try:
#XXX: we do not convert index to int, just check it.
# is it good idea?
int(index)
except __HOLE__:
logger.warning('Got incorrect index from form: %r', index)
continue
#TODO: describe this
field = self.field(name=str(index))
if not field.writable:
# readonly field
if index in old:
result[field.name] = old[field.name]
else:
result.update(field.accept())
self.clean_value = self.conv.accept(result)
return {self.name: self.clean_value}
|
ValueError
|
dataset/ETHPy150Open SmartTeleMax/iktomi/iktomi/forms/fields.py/FieldList.accept
|
4,353 |
def set_raw_value(self, raw_data, value):
indices = []
for index in range(1, len(value)+1):
index = str(index)
subvalue = value[index]
subfield = self.field(name=index)
subfield.set_raw_value(raw_data, subfield.from_python(subvalue))
indices.append(index)
try:
del raw_data[self.indices_input_name]
except __HOLE__:
pass
for index in indices:
raw_data.add(self.indices_input_name, index)
|
KeyError
|
dataset/ETHPy150Open SmartTeleMax/iktomi/iktomi/forms/fields.py/FieldList.set_raw_value
|
4,354 |
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
from matplotlib import pylab
except __HOLE__:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
|
ImportError
|
dataset/ETHPy150Open nltk/nltk/nltk/probability.py/FreqDist.plot
|
4,355 |
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
from matplotlib import pylab
except __HOLE__:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = "%s" % condition
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
|
ImportError
|
dataset/ETHPy150Open nltk/nltk/nltk/probability.py/ConditionalFreqDist.plot
|
4,356 |
def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000,
AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None,
print_sql=False, bucket_location=None, s3_bucket=None):
"""
Upload a dataframe to redshift via s3.
Parameters
----------
name: str
name for your shiny new table
df: DataFrame
data frame you want to save to the db
drop_if_exists: bool (False)
whether you'd like to drop the table if it already exists
chunk_size: int (10000)
Number of DataFrame chunks to upload and COPY from S3. Upload speed
is *much* faster if chunks = multiple-of-slices. Ex: DW1.XL nodes
have 2 slices per node, so if running 2 nodes you will want
chunk_size=4, 8, etc
AWS_ACCESS_KEY: str
your aws access key. if this is None, the function will try
and grab AWS_ACCESS_KEY from your environment variables
AWS_SECRET_KEY: str
your aws secrety key. if this is None, the function will try
and grab AWS_SECRET_KEY from your environment variables
s3: S3
alternative to using keys, you can use an S3 object
print_sql: bool (False)
option for printing sql statement that will be executed
bucket_location: boto.s3.connection.Location
a specific AWS location in which to create the temporary transfer s3
bucket. This should match your redshift cluster's region.
Examples
--------
"""
if self.dbtype!="redshift":
raise Exception("Sorry, feature only available for redshift.")
try:
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.s3.connection import Location
# if boto is present, set the bucket_location to default.
# we can't do this in the function definition because we're
# lazily importing boto only if necessary here.
if bucket_location is None:
bucket_location = Location.DEFAULT
except __HOLE__:
raise Exception("Couldn't find boto library. Please ensure it is installed")
if s3 is not None:
AWS_ACCESS_KEY = s3.access_key
AWS_SECRET_KEY = s3.secret_key
if AWS_ACCESS_KEY is None:
AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY')
if AWS_SECRET_KEY is None:
AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY')
if AWS_ACCESS_KEY is None:
raise Exception("Must specify AWS_ACCESS_KEY as either function argument or as an environment variable `AWS_ACCESS_KEY`")
if AWS_SECRET_KEY is None:
raise Exception("Must specify AWS_SECRET_KEY as either function argument or as an environment variable `AWS_SECRET_KEY`")
conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
#this way users with permission on specific buckets can use this feature
bucket_name = "dbpy-{0}".format(uuid.uuid4())
if s3_bucket:
bucket = conn.get_bucket(s3_bucket)
bucket_name = s3_bucket
else:
bucket = conn.create_bucket(bucket_name, location=bucket_location)
# we're going to chunk the file into pieces. according to amazon, this is
# much faster when it comes time to run the \COPY statment.
#
# see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html
sys.stderr.write("Transfering {0} to s3 in chunks".format(name))
len_df = len(df)
chunks = range(0, len_df, chunk_size)
def upload_chunk(i):
conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
chunk = df[i:(i+chunk_size)]
k = Key(bucket)
k.key = 'data-%d-%d.csv.gz' % (i, i + chunk_size)
k.set_metadata('parent', 'db.py')
out = StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(chunk.to_csv(index=False, encoding='utf-8'))
k.set_contents_from_string(out.getvalue())
sys.stderr.write(".")
return i
threads = []
for i in chunks:
t = threading.Thread(target=upload_chunk, args=(i, ))
t.start()
threads.append(t)
# join all threads
for t in threads:
t.join()
sys.stderr.write("done\n")
if drop_if_exists:
sql = "DROP TABLE IF EXISTS {0};".format(name)
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
# generate schema from pandas and then adapt for redshift
sql = pd.io.sql.get_schema(df, name)
# defaults to using SQLite format. need to convert it to Postgres
sql = sql.replace("[", "").replace("]", "")
# we'll create the table ONLY if it doens't exist
sql = sql.replace("CREATE TABLE", "CREATE TABLE IF NOT EXISTS")
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
self.con.commit()
# perform the \COPY here. the s3 argument is a prefix, so it'll pick up
# all of the data*.gz files we've created
sys.stderr.write("Copying data from s3 to redshfit...")
sql = """
copy {name} from 's3://{bucket_name}/data'
credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}'
CSV IGNOREHEADER as 1 GZIP;
""".format(name=name, bucket_name=bucket_name,
AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY)
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
self.con.commit()
sys.stderr.write("done!\n")
# tear down the bucket
sys.stderr.write("Tearing down bucket...")
for key in bucket.list():
key.delete()
if not s3_bucket:
conn.delete_bucket(bucket_name)
sys.stderr.write("done!")
|
ImportError
|
dataset/ETHPy150Open yhat/db.py/db/db.py/DB.to_redshift
|
4,357 |
def access_cached(func):
def inner(self, context):
session = context['request'].session
try:
if session['allowed']['valid_for'] != session.get('token'):
raise KeyError()
except __HOLE__:
session['allowed'] = {"valid_for": session.get('token')}
key = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if key not in session['allowed']:
session['allowed'][key] = func(self, context)
session.modified = True
return session['allowed'][key]
return inner
|
KeyError
|
dataset/ETHPy150Open CiscoSystems/avos/horizon/base.py/access_cached
|
4,358 |
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except __HOLE__:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = patterns('')
return urlpatterns
|
ImportError
|
dataset/ETHPy150Open CiscoSystems/avos/horizon/base.py/HorizonComponent._get_default_urlpatterns
|
4,359 |
def _process_panel_configuration(self, config):
"""Add, remove and set default panels on the dashboard."""
try:
dashboard = config.get('PANEL_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_DASHBOARD defined.", config.__name__)
return
panel_slug = config.get('PANEL')
dashboard_cls = self.get_dashboard(dashboard)
panel_group = config.get('PANEL_GROUP')
default_panel = config.get('DEFAULT_PANEL')
# Set the default panel
if default_panel:
dashboard_cls.default_panel = default_panel
# Remove the panel
if config.get('REMOVE_PANEL', False):
for panel in dashboard_cls.get_panels():
if panel_slug == panel.slug:
dashboard_cls.unregister(panel.__class__)
elif config.get('ADD_PANEL', None):
# Add the panel to the dashboard
panel_path = config['ADD_PANEL']
mod_path, panel_cls = panel_path.rsplit(".", 1)
try:
mod = import_module(mod_path)
except __HOLE__:
LOG.warning("Could not load panel: %s", mod_path)
return
panel = getattr(mod, panel_cls)
dashboard_cls.register(panel)
if panel_group:
dashboard_cls.get_panel_group(panel_group).__class__.\
panels.append(panel.slug)
dashboard_cls.get_panel_group(panel_group).\
panels.append(panel.slug)
else:
panels = list(dashboard_cls.panels)
panels.append(panel)
dashboard_cls.panels = tuple(panels)
except Exception as e:
LOG.warning('Could not process panel %(panel)s: %(exc)s',
{'panel': panel_slug, 'exc': e})
|
ImportError
|
dataset/ETHPy150Open CiscoSystems/avos/horizon/base.py/Site._process_panel_configuration
|
4,360 |
def run(args):
"""Starts the execution after args have been parsed and logging has been setup.
"""
LOG.debug("CLI arguments are:")
utils.log_object(args, logger=LOG, level=logging.DEBUG, item_max_len=128)
# Keep the old args around so we have the full set to write out
saved_args = dict(args)
action = args.pop("action", '').strip().lower()
if re.match(r"^moo[o]*$", action):
return
try:
runner_cls = actions.class_for(action)
except Exception as ex:
raise excp.OptionException(str(ex))
if runner_cls.needs_sudo:
ensure_perms()
# Check persona file exists
persona_fn = args.pop('persona_fn')
if not persona_fn:
raise excp.OptionException("No persona file name specified!")
if not sh.isfile(persona_fn):
raise excp.OptionException("Invalid persona file %r specified!" % (persona_fn))
# Check origin file exists
origins_fn = args.pop('origins_fn')
if not origins_fn:
raise excp.OptionException("No origin file name specified!")
if not sh.isfile(origins_fn):
raise excp.OptionException("Invalid origin file %r specified!" % (origins_fn))
args['origins_fn'] = sh.abspth(origins_fn)
# Determine the root directory...
root_dir = sh.abspth(args.pop("dir"))
(repeat_string, line_max_len) = utils.welcome()
print(pprint.center_text("Action Runner", repeat_string, line_max_len))
# !!
# Here on out we should be using the logger (and not print)!!
# !!
# Ensure the anvil dirs are there if others are about to use it...
if not sh.isdir(root_dir):
LOG.info("Creating anvil root directory at path: %s", root_dir)
sh.mkdir(root_dir)
try:
for d in ANVIL_DIRS:
if sh.isdir(d):
continue
LOG.info("Creating anvil auxiliary directory at path: %s", d)
sh.mkdir(d)
except __HOLE__ as e:
LOG.warn("Failed ensuring auxiliary directories due to %s", e)
# Load the origins...
origins = _origins.load(args['origins_fn'],
patch_file=args.get('origins_patch'))
# Load the distro/s
possible_distros = distro.load(settings.DISTRO_DIR,
distros_patch=args.get('distros_patch'))
# Load + match the persona to the possible distros...
try:
persona_obj = persona.load(persona_fn)
except Exception as e:
raise excp.OptionException("Error loading persona file: %s due to %s" % (persona_fn, e))
else:
dist = persona_obj.match(possible_distros, origins)
LOG.info('Persona selected distro: %s from %s possible distros',
colorizer.quote(dist.name), len(possible_distros))
# Update the dist with any other info...
dist.inject_platform_overrides(persona_obj.distro_updates, source=persona_fn)
dist.inject_platform_overrides(origins, source=origins_fn)
# Print it out...
LOG.debug("Distro settings are:")
for line in dist.pformat(item_max_len=128).splitlines():
LOG.debug(line)
# Get the object we will be running with...
runner = runner_cls(distro=dist,
root_dir=root_dir,
name=action,
cli_opts=args)
# Now that the settings are known to work, store them for next run
store_current_settings(saved_args)
LOG.info("Starting action %s on %s for distro: %s",
colorizer.quote(action), colorizer.quote(utils.iso8601()),
colorizer.quote(dist.name))
LOG.info("Using persona: %s", colorizer.quote(persona_fn))
LOG.info("Using origins: %s", colorizer.quote(origins_fn))
LOG.info("In root directory: %s", colorizer.quote(root_dir))
start_time = time.time()
runner.run(persona_obj)
end_time = time.time()
pretty_time = utils.format_time(end_time - start_time)
LOG.info("It took %s seconds or %s minutes to complete action %s.",
colorizer.quote(pretty_time['seconds']), colorizer.quote(pretty_time['minutes']), colorizer.quote(action))
|
OSError
|
dataset/ETHPy150Open openstack/anvil/anvil/__main__.py/run
|
4,361 |
def store_current_settings(c_settings):
# Remove certain keys that just shouldn't be saved
to_save = dict(c_settings)
for k in ['action', 'verbose']:
if k in c_settings:
to_save.pop(k, None)
buf = six.StringIO()
buf.write("# Anvil last used settings\n")
buf.write(utils.add_header(SETTINGS_FILE,
utils.prettify_yaml(to_save),
adjusted=sh.isfile(SETTINGS_FILE)))
try:
sh.write_file(SETTINGS_FILE, buf.getvalue())
except __HOLE__ as e:
LOG.warn("Failed writing to %s due to %s", SETTINGS_FILE, e)
|
OSError
|
dataset/ETHPy150Open openstack/anvil/anvil/__main__.py/store_current_settings
|
4,362 |
def test_getvalue_after_close(self):
f = cStringIO.StringIO('hello')
f.getvalue()
f.close()
try:
f.getvalue()
except __HOLE__:
pass
else:
self.fail("cStringIO.StringIO: getvalue() after close() should have raised ValueError")
|
ValueError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_StringIO_jy.py/TestGetValueAfterClose.test_getvalue_after_close
|
4,363 |
def __new__(cls, name, bases, dct):
new_cls = super(MetaClass, cls).__new__(cls, name, bases, dct)
fields = {}
# Custom inheritance -- delicious _and_ necessary!
try:
parents = [b for b in bases if issubclass (b, DocumentModel)]
# Reversing simulates the usual MRO
parents.reverse()
for p in parents:
parent_fields = getattr(getattr(p, '_meta', None), 'fields', None)
if parent_fields:
fields.update(parent_fields)
except __HOLE__:
pass
# If there are any search fields defined on the class, allow them to
# to set themselves up, given that we now know the name of the field
# instance
for name, field in dct.items():
if isinstance(field, Field):
field.add_to_class(new_cls, name)
fields[name] = field
delattr(new_cls, name)
new_cls._meta = Options(fields)
return new_cls
|
NameError
|
dataset/ETHPy150Open potatolondon/search/search/indexes.py/MetaClass.__new__
|
4,364 |
def put(self, documents):
"""Add `documents` to this index"""
def get_fields(d):
"""Convenience function for getting the search API fields list
from the given document `d`.
"""
field = lambda f, n, v: self.FIELD_MAP[type(f)](name=n, value=v)
return [
field(f, n, f.to_search_value(getattr(d, n, None)))
for n, f in d._meta.fields.items()
]
# If documents is actually just a single document, stick it in a list
try:
len(documents)
except __HOLE__:
documents = [documents]
# Construct the actual search API documents to add to the underlying
# search API index
search_docs = [
search_api.Document(
doc_id=d.doc_id,
rank=d._rank,
fields=get_fields(d)
)
for d in documents
]
return self._index.put(search_docs)
|
TypeError
|
dataset/ETHPy150Open potatolondon/search/search/indexes.py/Index.put
|
4,365 |
def endElement(self, name):
self.name_stack.pop()
if name == 'StopPoint':
try:
# Classify metro stops according to their particular system
if self.meta['stop-type'] == 'MET':
try:
entity_type, is_entrance = self.entity_types[self.meta['stop-type'] + ':' + self.meta['atco-code'][6:8]]
except __HOLE__:
entity_type, is_entrance = self.entity_types['MET']
else:
entity_type, is_entrance = self.entity_types[self.meta['stop-type']]
except KeyError:
pass
else:
entity = self.add_stop(self.meta, entity_type, self.source, is_entrance)
if entity:
self.entities.add(entity)
elif name == 'StopAreaRef':
self.stop_areas.append(self.meta['stop-area'])
del self.meta['stop-area']
elif name == 'StopArea':
if self.areas != None:
in_area = False
for area in self.areas:
if self.meta['area-code'].startswith(area):
in_area = True
if not in_area:
return
sa, created = EntityGroup.objects.get_or_create(
source=self.source,
ref_code=self.meta['area-code'])
sa.save()
for lang_code, name in self.names.items():
if lang_code is None: lang_code = 'en'
set_name_in_language(sa, lang_code, title=name)
elif name == 'CommonName':
if self.lang not in self.names:
self.names[self.lang] = self.meta['common-name']
elif name == 'Name' and self.meta['name'] != '':
if self.lang not in self.names:
self.names[self.lang] = self.meta['name']
|
KeyError
|
dataset/ETHPy150Open mollyproject/mollyproject/molly/apps/places/providers/naptan.py/NaptanContentHandler.endElement
|
4,366 |
def characters(self, text):
top = tuple(self.name_stack[3:])
try:
self.meta[self.meta_names[top]] += text
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open mollyproject/mollyproject/molly/apps/places/providers/naptan.py/NaptanContentHandler.characters
|
4,367 |
def run_monitor(self):
"""Called by main thread methods like __main__ so Ctrl-C works"""
self.monitor()
try:
while self._monitor_continously:
time.sleep(.02)
except __HOLE__:
self.stop_monitor()
|
KeyboardInterrupt
|
dataset/ETHPy150Open cmheisel/pywatch/src/pywatch/watcher.py/Watcher.run_monitor
|
4,368 |
def monitor_once(self, execute=True):
for f in self.files:
try:
mtime = os.stat(f).st_mtime
except __HOLE__:
#The file might be right in the middle of being written so sleep
time.sleep(1)
mtime = os.stat(f).st_mtime
if f not in self.mtimes.keys():
self.mtimes[f] = mtime
continue
if mtime > self.mtimes[f]:
if self.verbose: print("File changed: %s" % os.path.realpath(f))
self.mtimes[f] = mtime
if execute:
self.execute()
break
|
OSError
|
dataset/ETHPy150Open cmheisel/pywatch/src/pywatch/watcher.py/Watcher.monitor_once
|
4,369 |
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
protocol = origin[0].rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
if protocol != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except __HOLE__:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if (origin[1] and
origin[1].lower() != secure_origin[1].lower() and
secure_origin[1] != "*"):
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
|
ValueError
|
dataset/ETHPy150Open pypa/pip/pip/index.py/PackageFinder._validate_secure_origin
|
4,370 |
def quarantine_db(object_file, server_type):
"""
In the case that a corrupt file is found, move it to a quarantined area to
allow replication to fix it.
:param object_file: path to corrupt file
:param server_type: type of file that is corrupt
('container' or 'account')
"""
object_dir = os.path.dirname(object_file)
quarantine_dir = os.path.abspath(
os.path.join(object_dir, '..', '..', '..', '..', 'quarantined',
server_type + 's', os.path.basename(object_dir)))
try:
renamer(object_dir, quarantine_dir, fsync=False)
except __HOLE__ as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quarantine_dir = "%s-%s" % (quarantine_dir, uuid.uuid4().hex)
renamer(object_dir, quarantine_dir, fsync=False)
|
OSError
|
dataset/ETHPy150Open openstack/swift/swift/common/db_replicator.py/quarantine_db
|
4,371 |
def roundrobin_datadirs(datadirs):
"""
Generator to walk the data dirs in a round robin manner, evenly
hitting each device on the system, and yielding any .db files
found (in their proper places). The partitions within each data
dir are walked randomly, however.
:param datadirs: a list of (path, node_id) to walk
:returns: A generator of (partition, path_to_db_file, node_id)
"""
def walk_datadir(datadir, node_id):
partitions = os.listdir(datadir)
random.shuffle(partitions)
for partition in partitions:
part_dir = os.path.join(datadir, partition)
if not os.path.isdir(part_dir):
continue
suffixes = os.listdir(part_dir)
if not suffixes:
os.rmdir(part_dir)
for suffix in suffixes:
suff_dir = os.path.join(part_dir, suffix)
if not os.path.isdir(suff_dir):
continue
hashes = os.listdir(suff_dir)
for hsh in hashes:
hash_dir = os.path.join(suff_dir, hsh)
if not os.path.isdir(hash_dir):
continue
object_file = os.path.join(hash_dir, hsh + '.db')
if os.path.exists(object_file):
yield (partition, object_file, node_id)
its = [walk_datadir(datadir, node_id) for datadir, node_id in datadirs]
while its:
for it in its:
try:
yield next(it)
except __HOLE__:
its.remove(it)
|
StopIteration
|
dataset/ETHPy150Open openstack/swift/swift/common/db_replicator.py/roundrobin_datadirs
|
4,372 |
def delete_db(self, broker):
object_file = broker.db_file
hash_dir = os.path.dirname(object_file)
suf_dir = os.path.dirname(hash_dir)
with lock_parent_directory(object_file):
shutil.rmtree(hash_dir, True)
try:
os.rmdir(suf_dir)
except __HOLE__ as err:
if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
self.logger.exception(
_('ERROR while trying to clean up %s') % suf_dir)
return False
self.stats['remove'] += 1
device_name = self.extract_device(object_file)
self.logger.increment('removes.' + device_name)
return True
|
OSError
|
dataset/ETHPy150Open openstack/swift/swift/common/db_replicator.py/Replicator.delete_db
|
4,373 |
def _parse_sync_args(self, args):
"""
Convert remote sync args to remote_info dictionary.
"""
(remote_sync, hash_, id_, created_at, put_timestamp,
delete_timestamp, metadata) = args[:7]
remote_metadata = {}
if metadata:
try:
remote_metadata = json.loads(metadata)
except __HOLE__:
self.logger.error("Unable to decode remote metadata %r",
metadata)
remote_info = {
'point': remote_sync,
'hash': hash_,
'id': id_,
'created_at': created_at,
'put_timestamp': put_timestamp,
'delete_timestamp': delete_timestamp,
'metadata': remote_metadata,
}
return remote_info
|
ValueError
|
dataset/ETHPy150Open openstack/swift/swift/common/db_replicator.py/ReplicatorRpc._parse_sync_args
|
4,374 |
def poll(self, timeout = None):
if self.rlist or self.wlist:
try:
r, w, e = select(self.rlist, self.wlist, [], timeout)
except __HOLE__:
return None
else:
sleep(timeout)
return []
result = []
for s in r:
result.append((s, POLLIN))
for s in w:
result.append((s, POLLOUT))
return result
|
ValueError
|
dataset/ETHPy150Open Piratenfraktion-Berlin/OwnTube/videoportal/BitTornadoABC/BitTornado/selectpoll.py/poll.poll
|
4,375 |
def setUp(self):
try:
from cms.wizards.wizard_pool import wizard_pool
delete = [
'djangocms_blog',
'djangocms_blog.cms_wizards',
]
for module in delete:
if module in sys.modules:
del sys.modules[module]
wizard_pool._reset()
except __HOLE__:
# Not in django CMS 3.2+, no cleanup needed
pass
super(WizardTest, self).setUp()
|
ImportError
|
dataset/ETHPy150Open nephila/djangocms-blog/tests/test_wizards.py/WizardTest.setUp
|
4,376 |
def __call__(self, *args, **kwargs):
try:
return self._partial(*args, **kwargs)
except __HOLE__ as exc:
if self._should_curry(args, kwargs, exc):
return self.bind(*args, **kwargs)
raise
|
TypeError
|
dataset/ETHPy150Open pytoolz/toolz/toolz/functoolz.py/curry.__call__
|
4,377 |
def isunary(f):
""" Does a function have only a single argument?
>>> def f(x):
... return x
>>> isunary(f)
True
>>> isunary(lambda x, y: x + y)
False
"""
try:
if sys.version_info[0] == 2: # pragma: py3 no cover
spec = inspect.getargspec(f)
if sys.version_info[0] == 3: # pragma: py2 no cover
spec = inspect.getfullargspec(f)
return bool(spec and spec.varargs is None and not has_kwargs(f)
and len(spec.args) == 1)
except __HOLE__: # pragma: no cover
return None # in Python < 3.4 builtins fail, return None
|
TypeError
|
dataset/ETHPy150Open pytoolz/toolz/toolz/functoolz.py/isunary
|
4,378 |
@curry
def memoize(func, cache=None, key=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
Use the ``cache`` keyword to provide a dict-like object as an initial cache
>>> @memoize(cache={(1, 2): 3})
... def add(x, y):
... return x + y
Note that the above works as a decorator because ``memoize`` is curried.
It is also possible to provide a ``key(args, kwargs)`` function that
calculates keys used for the cache, which receives an ``args`` tuple and
``kwargs`` dict as input, and must return a hashable value. However,
the default key function should be sufficient most of the time.
>>> # Use key function that ignores extraneous keyword arguments
>>> @memoize(key=lambda args, kwargs: args)
... def add(x, y, verbose=False):
... if verbose:
... print('Calculating %s + %s' % (x, y))
... return x + y
"""
if cache is None:
cache = {}
try:
may_have_kwargs = has_kwargs(func)
# Is unary function (single arg, no variadic argument or keywords)?
is_unary = isunary(func)
except TypeError: # pragma: no cover
may_have_kwargs = True
is_unary = False
if key is None:
if is_unary:
def key(args, kwargs):
return args[0]
elif may_have_kwargs:
def key(args, kwargs):
return (
args or None,
frozenset(kwargs.items()) if kwargs else None,
)
else:
def key(args, kwargs):
return args
def memof(*args, **kwargs):
k = key(args, kwargs)
try:
return cache[k]
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
except __HOLE__:
cache[k] = result = func(*args, **kwargs)
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
memof.__wrapped__ = func
return memof
|
KeyError
|
dataset/ETHPy150Open pytoolz/toolz/toolz/functoolz.py/memoize
|
4,379 |
@property
def __doc__(self):
def composed_doc(*fs):
"""Generate a docstring for the composition of fs.
"""
if not fs:
# Argument name for the docstring.
return '*args, **kwargs'
return '{f}({g})'.format(f=fs[0].__name__, g=composed_doc(*fs[1:]))
try:
return (
'lambda *args, **kwargs: ' +
composed_doc(*reversed((self.first,) + self.funcs))
)
except __HOLE__:
# One of our callables does not have a `__name__`, whatever.
return 'A composition of functions'
|
AttributeError
|
dataset/ETHPy150Open pytoolz/toolz/toolz/functoolz.py/Compose.__doc__
|
4,380 |
@property
def __name__(self):
try:
return '_of_'.join(
f.__name__ for f in reversed((self.first,) + self.funcs),
)
except __HOLE__:
return type(self).__name__
|
AttributeError
|
dataset/ETHPy150Open pytoolz/toolz/toolz/functoolz.py/Compose.__name__
|
4,381 |
def __get__(self, instance, owner):
if instance is None:
return self._class_doc
exc = instance.exc
try:
if isinstance(exc, tuple):
exc_name = '(%s)' % ', '.join(
map(attrgetter('__name__'), exc),
)
else:
exc_name = exc.__name__
return dedent(
"""\
A wrapper around {inst.func.__name__!r} that will except:
{exc}
and handle any exceptions with {inst.handler.__name__!r}.
Docs for {inst.func.__name__!r}:
{inst.func.__doc__}
Docs for {inst.handler.__name__!r}:
{inst.handler.__doc__}
"""
).format(
inst=instance,
exc=exc_name,
)
except __HOLE__:
return self._class_doc
|
AttributeError
|
dataset/ETHPy150Open pytoolz/toolz/toolz/functoolz.py/_ExceptsDoc.__get__
|
4,382 |
@property
def __name__(self):
exc = self.exc
try:
if isinstance(exc, tuple):
exc_name = '_or_'.join(map(attrgetter('__name__'), exc))
else:
exc_name = exc.__name__
return '%s_excepting_%s' % (self.func.__name__, exc_name)
except __HOLE__:
return 'excepting'
|
AttributeError
|
dataset/ETHPy150Open pytoolz/toolz/toolz/functoolz.py/excepts.__name__
|
4,383 |
def is_valid_args(func, args, kwargs, sigspec=None):
if sigspec is None:
try:
sigspec = inspect.signature(func)
except (__HOLE__, TypeError) as e:
sigspec = e
if isinstance(sigspec, ValueError):
return _is_builtin_valid_args(func, args, kwargs)
elif isinstance(sigspec, TypeError):
return False
try:
sigspec.bind(*args, **kwargs)
except (TypeError, AttributeError):
return False
return True
|
ValueError
|
dataset/ETHPy150Open pytoolz/toolz/toolz/functoolz.py/is_valid_args
|
4,384 |
def is_valid_args(func, args, kwargs, sigspec=None):
if sigspec is None:
try:
sigspec = inspect.getargspec(func)
except __HOLE__ as e:
sigspec = e
if isinstance(sigspec, TypeError):
if not callable(func):
return False
return _is_builtin_valid_args(func, args, kwargs)
spec = sigspec
defaults = spec.defaults or ()
num_pos = len(spec.args) - len(defaults)
missing_pos = spec.args[len(args):num_pos]
if any(arg not in kwargs for arg in missing_pos):
return False
if spec.varargs is None:
num_extra_pos = max(0, len(args) - num_pos)
else:
num_extra_pos = 0
kwargs = dict(kwargs)
# Add missing keyword arguments (unless already included in `args`)
missing_kwargs = spec.args[num_pos + num_extra_pos:]
kwargs.update(zip(missing_kwargs, defaults[num_extra_pos:]))
# Convert call to use positional arguments
args = args + tuple(kwargs.pop(key) for key in spec.args[len(args):])
if (
not spec.keywords and kwargs or
not spec.varargs and len(args) > len(spec.args) or
set(spec.args[:len(args)]) & set(kwargs)
):
return False
else:
return True
|
TypeError
|
dataset/ETHPy150Open pytoolz/toolz/toolz/functoolz.py/is_valid_args
|
4,385 |
def is_partial_args(func, args, kwargs, sigspec=None):
if sigspec is None:
try:
sigspec = inspect.signature(func)
except (__HOLE__, TypeError) as e:
sigspec = e
if isinstance(sigspec, ValueError):
return _is_builtin_partial_args(func, args, kwargs)
elif isinstance(sigspec, TypeError):
return False
try:
sigspec.bind_partial(*args, **kwargs)
except (TypeError, AttributeError):
return False
return True
|
ValueError
|
dataset/ETHPy150Open pytoolz/toolz/toolz/functoolz.py/is_partial_args
|
4,386 |
def is_partial_args(func, args, kwargs, sigspec=None):
if sigspec is None:
try:
sigspec = inspect.getargspec(func)
except __HOLE__ as e:
sigspec = e
if isinstance(sigspec, TypeError):
if not callable(func):
return False
return _is_builtin_partial_args(func, args, kwargs)
spec = sigspec
defaults = spec.defaults or ()
num_pos = len(spec.args) - len(defaults)
if spec.varargs is None:
num_extra_pos = max(0, len(args) - num_pos)
else:
num_extra_pos = 0
kwargs = dict(kwargs)
# Add missing keyword arguments (unless already included in `args`)
missing_kwargs = spec.args[num_pos + num_extra_pos:]
kwargs.update(zip(missing_kwargs, defaults[num_extra_pos:]))
# Add missing position arguments as keywords (may already be in kwargs)
missing_args = spec.args[len(args):num_pos + num_extra_pos]
kwargs.update((x, None) for x in missing_args)
# Convert call to use positional arguments
args = args + tuple(kwargs.pop(key) for key in spec.args[len(args):])
if (
not spec.keywords and kwargs or
not spec.varargs and len(args) > len(spec.args) or
set(spec.args[:len(args)]) & set(kwargs)
):
return False
else:
return True
|
TypeError
|
dataset/ETHPy150Open pytoolz/toolz/toolz/functoolz.py/is_partial_args
|
4,387 |
def whichdb(filename):
"""Guess which db package to use to open a db file.
Return values:
- None if the database file can't be read;
- empty string if the file can be read but can't be recognized
- the module name (e.g. "dbm" or "gdbm") if recognized.
Importing the given module may still fail, and opening the
database using that module may still fail.
"""
# Check for dbm first -- this has a .pag and a .dir file
try:
f = open(filename + os.extsep + "pag", "rb")
f.close()
# dbm linked with gdbm on OS/2 doesn't have .dir file
if not (dbm.library == "GNU gdbm" and sys.platform == "os2emx"):
f = open(filename + os.extsep + "dir", "rb")
f.close()
return "dbm"
except IOError:
# some dbm emulations based on Berkeley DB generate a .db file
# some do not, but they should be caught by the dbhash checks
try:
f = open(filename + os.extsep + "db", "rb")
f.close()
# guarantee we can actually open the file using dbm
# kind of overkill, but since we are dealing with emulations
# it seems like a prudent step
if dbm is not None:
d = dbm.open(filename)
d.close()
return "dbm"
except (IOError, _dbmerror):
pass
# Check for dumbdbm next -- this has a .dir and a .dat file
try:
# First check for presence of files
os.stat(filename + os.extsep + "dat")
size = os.stat(filename + os.extsep + "dir").st_size
# dumbdbm files with no keys are empty
if size == 0:
return "dumbdbm"
f = open(filename + os.extsep + "dir", "rb")
try:
if f.read(1) in ("'", '"'):
return "dumbdbm"
finally:
f.close()
except (__HOLE__, IOError):
pass
# See if the file exists, return None if not
try:
f = open(filename, "rb")
except IOError:
return None
# Read the start of the file -- the magic number
s16 = f.read(16)
f.close()
s = s16[0:4]
# Return "" if not at least 4 bytes
if len(s) != 4:
return ""
# Convert to 4-byte int in native byte order -- return "" if impossible
try:
(magic,) = struct.unpack("=l", s)
except struct.error:
return ""
# Check for GNU dbm
if magic in (0x13579ace, 0x13579acd, 0x13579acf):
return "gdbm"
# Check for old Berkeley db hash file format v2
if magic in (0x00061561, 0x61150600):
return "bsddb185"
# Later versions of Berkeley db hash file have a 12-byte pad in
# front of the file type
try:
(magic,) = struct.unpack("=l", s16[-4:])
except struct.error:
return ""
# Check for BSD hash
if magic in (0x00061561, 0x61150600):
return "dbhash"
# Unknown
return ""
|
OSError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/whichdb.py/whichdb
|
4,388 |
@classmethod
def setUpClass(cls):
try:
import pysal as ps
except __HOLE__:
raise unittest.SkipTest("PySAL is not installed")
pth = ps.examples.get_path("columbus.shp")
cls.tracts = read_file(pth)
|
ImportError
|
dataset/ETHPy150Open geopandas/geopandas/geopandas/tests/test_plotting.py/TestPySALPlotting.setUpClass
|
4,389 |
def readFromFile(self, infile, format="fasta"):
"""read multiple alignment from file in various format."""
self.mMali = {}
self.mIdentifiers = []
pattern_parse_ranges = re.compile("(\S+)/(\d+)-(\d+)")
# read profiles - a profile possibly consists of several
# entries per file so treat it differently
if format.lower() == "profile":
while 1:
line = infile.readline()
if not line:
return False
if line[0] != "#":
break
if line[0] != ">":
raise "expected '>' at as first character in line %s" % line
try:
self.mName, length, width = re.match(
">profile=(\S+) length=(\d+) width=(\d+)", line).groups()
except __HOLE__:
raise "could not parse header line %s" % line
width = int(width)
for x in range(0, width):
id = "seq%i" % x
self.mIdentifiers.append(id)
line = infile.readline()
if not line:
raise "expected %i sequences, only got %i" % (width, x)
self.mMali[id] = AlignedString(
id, 0, self.countCharacters(line[:-1]), line[:-1])
return True
if isinstance(infile, list) or \
isinstance(infile, tuple):
lines = infile
else:
lines = infile.readlines()
if format not in ("stockholm"):
# save comments
self.mComments = filter(lambda x: x[0] == "#", lines)
lines = filter(lambda x: x[0] != "#", lines)
else:
self.mComments = []
# remove empty lines
lines = filter(lambda x: x.strip(), lines)
if not lines:
raise AttributeError("empty alignment")
def getId(id, s):
x = pattern_parse_ranges.match(id)
if x:
id, fr, to = x.groups()
fr, to = int(fr) - 1, int(to)
else:
fr, to = 0, self.countCharacters(s)
self.mWriteRanges = False
return id, fr, to
#######################################################################
if format.lower() == "plain":
for line in lines:
if not line.strip():
continue
data = line[:-1].split("\t")
id = data[3]
xid = id
x = 0
while xid in self.mMali:
xid = id + "-" + str(x)
x += 1
self.addEntry(
AlignedString(xid,
int(data[0]) - 1,
int(data[2]),
data[1]))
#######################################################################
elif format.lower() == "fasta":
pattern_identifier = "\S+"
id = None
fragments = []
for line in lines:
if line[0] == ">":
if id:
s = re.sub("\s", "", string.join(fragments, ""))
id, fr, to = getId(id, s)
self.addEntry(AlignedString(id, fr, to, s))
id = re.search(
"^(%s)" % pattern_identifier, line[1:-1]).group(0)
fragments = []
continue
fragments.append(line[:-1])
s = re.sub("\s", "", string.join(fragments, ""))
id, fr, to = getId(id, s)
self.addEntry(AlignedString(id, fr, to, s))
#######################################################################
elif format.lower() == "phylip":
nsequences, nchars = re.split("\s+", lines[0][:-1].strip())
nsequences = int(nsequences)
for line in lines[1:]:
l = line[:-1].strip()
if not l:
continue
id, sequence = re.match("(\S+)\s+(.*)", l).groups()
sequence = re.sub("\s", "", sequence)
if id not in self.mMali:
self.mIdentifiers.append(id)
self.mMali[id] = []
self.mMali[id].append(sequence)
for id, frags in self.mMali.items():
s = "".join(frags)
fr, to = 0, self.countCharacters(s)
self.mMali[id] = AlignedString(id, fr, to, s)
#######################################################################
elif format.lower() == "clustal":
# skip header line
del lines[0]
fragments = {}
# prune lines
lines = map(lambda x: x.strip(), lines)
# remove empty lines
lines = filter(lambda x: len(x[:-1]) > 0, lines)
for line in lines:
# remove consensus lines
if line[0] in ("*", ":"):
continue
data = re.split("\s+", line)
if len(data) != 2:
raise ValueError("parsing error in line %s" % line)
id, fragment = data
if id not in fragments:
fragments[id] = []
self.mIdentifiers.append(id)
fragments[id].append(fragment)
for id, f in fragments.items():
s = re.sub("\s", "", string.join(f, ""))
self.mMali[id] = AlignedString(
id, 0, self.countCharacters(s), s)
elif format.lower() == "stockholm":
# skip header line
assert lines[0].startswith(
"# STOCKHOLM"), "file is not in stockholm format"
del lines[0]
fragments = {}
annotations = {}
# prune lines
lines = map(lambda x: x.strip(), lines)
# remove empty lines
lines = filter(lambda x: len(x[:-1]) > 0, lines)
for line in lines:
data = re.split("\s+", line)
if data[0] == "//":
break
if line[0] == '#':
if data[0] == "#=GC":
id, fragment = data[1:3]
else:
self.mComments.append(line)
continue
if id not in annotations:
annotations[id] = []
annotations[id].append(fragment)
else:
if len(data) > 2:
raise ValueError("parsing error in line %s" % line)
elif len(data) == 1:
# treat empty alignments/lines
id = data[0]
fragment = ""
else:
id, fragment = data
if id not in fragments:
fragments[id] = []
self.mIdentifiers.append(id)
fragments[id].append(fragment)
n = []
for id in self.mIdentifiers:
f = fragments[id]
s = re.sub("\s", "", string.join(f, ""))
x = pattern_parse_ranges.match(id)
if x:
id, fr, to = x.groups()
fr, to = int(fr) - 1, int(to)
else:
fr, to = 0, self.countCharacters(s)
n.append(id)
self.mMali[id] = AlignedString(id, fr, to, s)
self.mIdentifiers = n
for id, f in annotations.items():
s = re.sub("\s", "", string.join(f, ""))
annotations[id] = s
self.mAnnotations = annotations
else:
raise "unknown alignment format %s" % format
if len(self.mMali) == 0:
self.mLength = 0
else:
self.mLength = min(
map(lambda x: len(x.mString), self.mMali.values()))
|
AttributeError
|
dataset/ETHPy150Open CGATOxford/cgat/CGAT/Mali.py/Mali.readFromFile
|
4,390 |
def as_xml(self, format_datetime=None):
format_datetime = format_datetime or json_format_datetime
case = ElementTree.Element('case')
order = ['case_id', 'date_modified', 'create', 'update', 'close',
self.CASE_TYPE, 'user_id', 'case_name', 'external_id', 'date_opened', 'owner_id']
def sort_key(item):
word, _ = item
try:
i = order.index(word)
return 0, i
except __HOLE__:
return 1, word
def fmt(value):
if value is None:
return ''
if isinstance(value, datetime):
return unicode(format_datetime(value))
elif isinstance(value, (basestring, int, date)):
return unicode(value)
else:
raise CaseBlockError("Can't transform to XML: {}; unexpected type {}.".format(value, type(value)))
def dict_to_xml(block, dct):
if dct.has_key('_attrib'):
for (key, value) in dct['_attrib'].items():
if value is not CaseBlock.undefined:
block.set(key, fmt(value))
if dct.has_key('_text'):
block.text = unicode(dct['_text'])
for (key, value) in sorted(dct.items(), key=sort_key):
if value is not CaseBlock.undefined and not key.startswith('_'):
elem = ElementTree.Element(key)
block.append(elem)
if isinstance(value, dict):
dict_to_xml(elem, value)
else:
elem.text = fmt(value)
dict_to_xml(case, self)
return case
|
ValueError
|
dataset/ETHPy150Open dimagi/commcare-hq/corehq/ex-submodules/casexml/apps/case/mock.py/CaseBlock.as_xml
|
4,391 |
def _as_dict(self, check_relationship):
all_field = {}
for field in self._meta.fields:
if field.rel:
if not check_relationship:
continue
if is_deferred(self, field):
continue
field_value = getattr(self, field.attname)
# If current field value is an expression, we are not evaluating it
if is_db_expression(field_value):
continue
try:
# Store the converted value for fields with conversion
field_value = field.to_python(field_value)
except __HOLE__:
# The current value is not valid so we cannot convert it
pass
if is_buffer(field_value):
# psycopg2 returns uncopyable type buffer for bytea
field_value = str(field_value)
# Explanation of copy usage here :
# https://github.com/romgar/django-dirtyfields/commit/efd0286db8b874b5d6bd06c9e903b1a0c9cc6b00
all_field[field.name] = copy(field_value)
return all_field
|
ValidationError
|
dataset/ETHPy150Open romgar/django-dirtyfields/src/dirtyfields/dirtyfields.py/DirtyFieldsMixin._as_dict
|
4,392 |
def test_exponential_entity_expansion(self):
try:
import defusedxml
except __HOLE__:
defusedxml = None
if defusedxml is None:
raise SkipTest('This test case only applies when using defusedxml')
document_xml = '''
<p>
<r>
<t>&c;</t>
</r>
</p>
'''
xml_header = '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE xml [
<!ENTITY a "123">
<!ENTITY b "&a;&a;">
<!ENTITY c "&b;&b;">
]>
'''
document = WordprocessingDocumentFactory(xml_header=xml_header)
document.add(MainDocumentPart, document_xml)
expected_html = '<p>123123123123</p>'
try:
self.assert_document_generates_html(document, expected_html)
raise AssertionError(
'Expected "EntitiesForbidden" exception did not occur',
)
except defusedxml.EntitiesForbidden:
pass
|
ImportError
|
dataset/ETHPy150Open CenterForOpenScience/pydocx/tests/export/html/test_xml_vulnerabilities.py/XMLVulnerabilitiesTestCase.test_exponential_entity_expansion
|
4,393 |
def test_entity_blowup(self):
try:
import defusedxml
except __HOLE__:
defusedxml = None
if defusedxml is None:
raise SkipTest('This test case only applies when using defusedxml')
document_xml = '''
<p>
<r>
<t>&a;</t>
</r>
</p>
'''
xml_header = '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE xml [
<!ENTITY a "123">
]>
'''
document = WordprocessingDocumentFactory(xml_header=xml_header)
document.add(MainDocumentPart, document_xml)
expected_html = '<p>123</p>'
try:
self.assert_document_generates_html(document, expected_html)
raise AssertionError(
'Expected "EntitiesForbidden" exception did not occur',
)
except defusedxml.EntitiesForbidden:
pass
|
ImportError
|
dataset/ETHPy150Open CenterForOpenScience/pydocx/tests/export/html/test_xml_vulnerabilities.py/XMLVulnerabilitiesTestCase.test_entity_blowup
|
4,394 |
@dispatch(Reduction)
def _lean(expr, fields=None):
child = expr._child
try:
fields = child.active_columns()
except __HOLE__:
fields = child.fields
child, child_fields = _lean(child, fields=set(filter(None, fields)))
return expr._subs({expr._child: child}), child_fields
|
AttributeError
|
dataset/ETHPy150Open blaze/blaze/blaze/expr/optimize.py/_lean
|
4,395 |
@extensions.expected_errors((404, 409, 501))
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
try:
# NOTE(gmann): To make V21 same as V2 API, this method will call
# 'get_diagnostics' instead of 'get_instance_diagnostics'.
# In future, 'get_instance_diagnostics' needs to be called to
# provide VM diagnostics in a defined format for all driver.
# BP - https://blueprints.launchpad.net/nova/+spec/v3-diagnostics.
return self.compute_api.get_diagnostics(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'get_diagnostics', server_id)
except __HOLE__:
common.raise_feature_not_supported()
|
NotImplementedError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/server_diagnostics.py/ServerDiagnosticsController.index
|
4,396 |
def mkdir_p(path):
try:
os.makedirs(path)
except __HOLE__ as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
|
OSError
|
dataset/ETHPy150Open rllab/rllab/rllab/misc/console.py/mkdir_p
|
4,397 |
def save_redeem(self, addr, redeem):
try:
self.kv.store('safe_timelock_redeem', addr, {'redeem':redeem})
except __HOLE__:
# Already saved
pass
|
AssertionError
|
dataset/ETHPy150Open orisi/orisi/src/oracle/handlers/safe_timelock_contract/safe_timelock_create_handler.py/SafeTimelockCreateHandler.save_redeem
|
4,398 |
def __init__(self, value, key):
"""Initialze internal state.
Eval the string value and save the result.
Args:
value: String to compile as a regular expression.
key: The YAML field name.
Raises:
InvalidCodeInConfiguration: if the code could not be evaluated, or
the evalauted method is not callable.
"""
self.value = value
try:
self.method = eval(value, _global_temp_globals)
except Exception, err:
raise bulkloader_errors.InvalidCodeInConfiguration(
'Invalid code for %s. Code: "%s". Details: %s' % (key, value, err))
if not callable(self.method):
raise bulkloader_errors.InvalidCodeInConfiguration(
'Code for %s did not return a callable. Code: "%s".' %
(key, value))
self.supports_bulkload_state = False
try:
argspec = inspect.getargspec(self.method)
if 'bulkload_state' in argspec[0]:
self.supports_bulkload_state = True
except __HOLE__:
pass
|
TypeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/ext/bulkload/bulkloader_parser.py/EvaluatedCallable.ParsedMethod.__init__
|
4,399 |
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = []
results2 = []
def f():
try:
i = self.barrier.wait()
if i == self.N//2:
raise RuntimeError
self.barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except __HOLE__:
self.barrier.abort()
pass
self.run_threads(f)
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
|
RuntimeError
|
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/lock_tests.py/BarrierTests.test_abort
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.