Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
4,000 |
def test_is_greater_than_or_equal_to_bad_arg_type_failure(self):
try:
assert_that(self.d1).is_greater_than_or_equal_to(123)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
|
TypeError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestDate.test_is_greater_than_or_equal_to_bad_arg_type_failure
|
4,001 |
def test_is_less_than_failure(self):
try:
d2 = datetime.datetime.today()
assert_that(d2).is_less_than(self.d1)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).matches('Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be less than <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
|
AssertionError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestDate.test_is_less_than_failure
|
4,002 |
def test_is_less_than_bad_arg_type_failure(self):
try:
assert_that(self.d1).is_less_than(123)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
|
TypeError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestDate.test_is_less_than_bad_arg_type_failure
|
4,003 |
def test_is_less_than_or_equal_to_failure(self):
try:
d2 = datetime.datetime.today()
assert_that(d2).is_less_than_or_equal_to(self.d1)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).matches('Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be less than or equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
|
AssertionError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestDate.test_is_less_than_or_equal_to_failure
|
4,004 |
def test_is_less_than_or_equal_to_bad_arg_type_failure(self):
try:
assert_that(self.d1).is_less_than_or_equal_to(123)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
|
TypeError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestDate.test_is_less_than_or_equal_to_bad_arg_type_failure
|
4,005 |
def test_is_between_failure(self):
try:
d2 = datetime.datetime.today()
d3 = datetime.datetime.today()
assert_that(self.d1).is_between(d2, d3)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).matches('Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be between <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> and <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
|
AssertionError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestDate.test_is_between_failure
|
4,006 |
def test_is_between_bad_arg1_type_failure(self):
try:
assert_that(self.d1).is_between(123, 456)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('given low arg must be <datetime>, but was <int>')
|
TypeError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestDate.test_is_between_bad_arg1_type_failure
|
4,007 |
def test_is_between_bad_arg2_type_failure(self):
try:
d2 = datetime.datetime.today()
assert_that(self.d1).is_between(d2, 123)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('given high arg must be <datetime>, but was <datetime>')
|
TypeError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestDate.test_is_between_bad_arg2_type_failure
|
4,008 |
def test_is_close_to_failure(self):
try:
d2 = self.d1 + datetime.timedelta(minutes=5)
assert_that(self.d1).is_close_to(d2, datetime.timedelta(minutes=1))
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).matches('Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be close to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> within tolerance <\d+:\d{2}:\d{2}>, but was not.')
|
AssertionError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestDate.test_is_close_to_failure
|
4,009 |
def test_is_close_to_bad_arg_type_failure(self):
try:
assert_that(self.d1).is_close_to(123, 456)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was <int>')
|
TypeError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestDate.test_is_close_to_bad_arg_type_failure
|
4,010 |
def test_is_close_to_bad_tolerance_arg_type_failure(self):
try:
d2 = datetime.datetime.today()
assert_that(self.d1).is_close_to(d2, 123)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('given tolerance arg must be timedelta, but was <int>')
|
TypeError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestDate.test_is_close_to_bad_tolerance_arg_type_failure
|
4,011 |
def test_is_greater_than_failure(self):
try:
t2 = datetime.timedelta(seconds=90)
assert_that(self.t1).is_greater_than(t2)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).matches('Expected <\d{1,2}:\d{2}:\d{2}> to be greater than <\d{1,2}:\d{2}:\d{2}>, but was not.')
|
AssertionError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestTimedelta.test_is_greater_than_failure
|
4,012 |
def test_is_greater_than_bad_arg_type_failure(self):
try:
assert_that(self.t1).is_greater_than(123)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
|
TypeError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestTimedelta.test_is_greater_than_bad_arg_type_failure
|
4,013 |
def test_is_greater_than_or_equal_to_failure(self):
try:
t2 = datetime.timedelta(seconds=90)
assert_that(self.t1).is_greater_than_or_equal_to(t2)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).matches('Expected <\d{1,2}:\d{2}:\d{2}> to be greater than or equal to <\d{1,2}:\d{2}:\d{2}>, but was not.')
|
AssertionError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestTimedelta.test_is_greater_than_or_equal_to_failure
|
4,014 |
def test_is_greater_than_or_equal_to_bad_arg_type_failure(self):
try:
assert_that(self.t1).is_greater_than_or_equal_to(123)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
|
TypeError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestTimedelta.test_is_greater_than_or_equal_to_bad_arg_type_failure
|
4,015 |
def test_is_less_than_failure(self):
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t2).is_less_than(self.t1)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).matches('Expected <\d{1,2}:\d{2}:\d{2}> to be less than <\d{1,2}:\d{2}:\d{2}>, but was not.')
|
AssertionError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestTimedelta.test_is_less_than_failure
|
4,016 |
def test_is_less_than_bad_arg_type_failure(self):
try:
assert_that(self.t1).is_less_than(123)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
|
TypeError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestTimedelta.test_is_less_than_bad_arg_type_failure
|
4,017 |
def test_is_less_than_or_equal_to_failure(self):
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t2).is_less_than_or_equal_to(self.t1)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).matches('Expected <\d{1,2}:\d{2}:\d{2}> to be less than or equal to <\d{1,2}:\d{2}:\d{2}>, but was not.')
|
AssertionError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestTimedelta.test_is_less_than_or_equal_to_failure
|
4,018 |
def test_is_less_than_or_equal_to_bad_arg_type_failure(self):
try:
assert_that(self.t1).is_less_than_or_equal_to(123)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
|
TypeError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestTimedelta.test_is_less_than_or_equal_to_bad_arg_type_failure
|
4,019 |
def test_is_between_failure(self):
try:
d2 = datetime.timedelta(seconds=30)
d3 = datetime.timedelta(seconds=40)
assert_that(self.t1).is_between(d2, d3)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).matches('Expected <\d{1,2}:\d{2}:\d{2}> to be between <\d{1,2}:\d{2}:\d{2}> and <\d{1,2}:\d{2}:\d{2}>, but was not.')
|
AssertionError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_datetime.py/TestTimedelta.test_is_between_failure
|
4,020 |
def __getitem__(self, item):
try:
return self.data[int(item)]
except __HOLE__:
return getattr(self, item)
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/api/SOAPpy/Types.py/arrayType.__getitem__
|
4,021 |
def _setDetail(self, detail = None):
if detail != None:
self.detail = detail
else:
try: del self.detail
except __HOLE__: pass
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/api/SOAPpy/Types.py/faultType._setDetail
|
4,022 |
@internationalizeDocstring
def lart(self, irc, msg, args, channel, id, text):
"""[<channel>] [<id>] <who|what> [for <reason>]
Uses the Luser Attitude Readjustment Tool on <who|what> (for <reason>,
if given). If <id> is given, uses that specific lart. <channel> is
only necessary if the message isn't sent in the channel itself.
"""
if ' for ' in text:
(target, reason) = list(map(str.strip, text.split(' for ', 1)))
else:
(target, reason) = (text, '')
if id is not None:
try:
lart = self.db.get(channel, id)
except __HOLE__:
irc.error(format(_('There is no lart with id #%i.'), id))
return
else:
lart = self.db.random(channel)
if not lart:
irc.error(format(_('There are no larts in my database '
'for %s.'), channel))
return
text = lart.text
if ircutils.strEqual(target, irc.nick):
target = msg.nick
reason = self._replaceFirstPerson(_('trying to dis me'), irc.nick)
else:
target = self._replaceFirstPerson(target, msg.nick)
reason = self._replaceFirstPerson(reason, msg.nick)
if target.endswith('.'):
target = target.rstrip('.')
text = text.replace('$who', target)
if reason:
text += _(' for ') + reason
if self.registryValue('showIds', channel):
text += format(' (#%i)', lart.id)
irc.reply(text, action=True)
|
KeyError
|
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Lart/plugin.py/Lart.lart
|
4,023 |
@option(
'-k', '--kernel_name', action='store', default="default",
help='arbitrary name given to reference kernel'
)
@option(
'-i', '--ids', action='store', default=None,
help='the machine ids to use from the cluster'
)
def line_parallel(self, module_name, class_name, kernel_name="default", ids=None):
"""
%parallel MODULE CLASS [-k NAME] [-i [...]] - construct an interface to the cluster.
Example:
%parallel bash_kernel BashKernel
%parallel bash_kernel BashKernel -k bash
%parallel bash_kernel BashKernel --i [0,2:5,9,...]
cluster_size and cluster_rank variables are set upon
initialization of the remote node (if the kernel
supports %set).
Use %px or %%px to send code to the cluster.
"""
try:
from ipyparallel import Client
except __HOLE__:
from IPython.parallel import Client
count = 1
while count <= 5:
try:
self.client = Client()
break
except:
print("Waiting on cluster to start...")
time.sleep(2)
count += 1
if count == 6:
raise Exception("Cluster was not started.")
if ids is None:
count = 1
while count <= 5:
try:
self.view = self.client[:]
break
except:
print("Waiting for engines...")
time.sleep(2)
count += 1
if count == 6:
raise Exception("Engines were not started.")
else:
# ids[:] = slice(None, None, None)
# ids[1:3] = slice(1, 3, None)
# ids[1:3:1] = slice(1, 3, 1)
# ids[1, 2, ...] = [1, 2, Ellipsis]
# ids[1, 2:4, ...] = [1, slice(2, 4, None), Ellipsis]
try:
ids_slice = eval("slicer%s" % ids) # slicer[0,...,7]
except:
ids_slice = slicer[:]
if isinstance(ids_slice, (slice, int)):
count = 1
while count <= 5:
try:
self.view = self.client[ids_slice]
break
except:
print("Waiting for engines...")
time.sleep(2)
count += 1
if count == 6:
raise Exception("Engines were not started.")
else: # tuple of indexes/slices
# FIXME: if so, handle Ellipsis
view = None
for item in ids_slice:
count = 1
while count <= 5:
try:
client = self.client[item]
if view:
## FIXME: can't do this:
view.append(client)
else:
view = client
break
except:
print("Waiting on cluster to start...")
time.sleep(2)
count += 1
if count == 6:
raise Exception("Cluster was not started.")
self.view = view
self.view_load_balanced = self.client.load_balanced_view()
self.module_name = module_name
self.class_name = class_name
self.kernel_name = kernel_name
self.view.execute("""
import os
for key, value in %(env)s.items():
os.environ[key] = value
try:
kernels
except:
kernels = {}
from %(module_name)s import %(class_name)s
%(class_name)s.subkernel(get_ipython().parent)
kernels['%(kernel_name)s'] = %(class_name)s()
""" % {"module_name": module_name,
"class_name": class_name,
"kernel_name": kernel_name,
"env": str(self.kernel.env)},
block=True)
self.view["kernels['%s'].set_variable(\"cluster_size\", %s)" % (
kernel_name, len(self.client))]
self.client[:].scatter('cluster_rank', self.client.ids, flatten=True)
self.view["kernels['%s'].set_variable(\"cluster_rank\", cluster_rank)" % (
kernel_name)]
self.retval = None
|
ImportError
|
dataset/ETHPy150Open Calysto/metakernel/metakernel/magics/parallel_magic.py/ParallelMagic.line_parallel
|
4,024 |
def line_pmap(self, function_name, args, kernel_name=None):
"""
%pmap FUNCTION [ARGS1,ARGS2,...] - ("parallel map") call a FUNCTION on args
This line magic will apply a function name to all of the
arguments given one at a time using a dynamic load balancing scheduler.
Currently, the args are provided as a Python expression (with no spaces).
You must first setup a cluster using the %parallel magic.
Examples:
%pmap function-name-in-language range(10)
%pmap function-name-in-language [1,2,3,4]
%pmap run_experiment range(1,100,5)
%pmap run_experiment ["test1","test2","test3"]
%pmap f [(1,4,7),(2,3,5),(7,2,2)]
The function name must be a function that is available on all
nodes in the cluster. For example, you could:
%%px
(define myfunc
(lambda (n)
(+ n 1)))
to define myfunc on all machines (use %%px -e to also define
it in the running notebook or console). Then you can apply it
to a list of arguments:
%%pmap myfunc range(100)
The load balancer will run myfunc on the next available node
in the cluster.
Note: not all languages may support running a function via this magic.
"""
if kernel_name is None:
kernel_name = self.kernel_name
# To make sure we can find `kernels`:
try:
from ipyparallel.util import interactive
except __HOLE__:
from IPython.parallel.util import interactive
f = interactive(lambda arg, kname=kernel_name, fname=function_name: \
kernels[kname].do_function_direct(fname, arg))
self.retval = self.view_load_balanced.map_async(f, eval(args))
|
ImportError
|
dataset/ETHPy150Open Calysto/metakernel/metakernel/magics/parallel_magic.py/ParallelMagic.line_pmap
|
4,025 |
def startService(self):
Service.startService(self)
parent = self._config_path.parent()
try:
if not parent.exists():
parent.makedirs()
if not self._config_path.exists():
uuid = unicode(uuid4())
self._config_path.setContent(json.dumps({u"uuid": uuid,
u"version": 1}))
except __HOLE__ as e:
raise CreateConfigurationError(e.args[1])
config = json.loads(self._config_path.getContent())
self.node_id = config[u"uuid"]
self.pool.startService()
|
OSError
|
dataset/ETHPy150Open ClusterHQ/flocker/flocker/volume/service.py/VolumeService.startService
|
4,026 |
def enumerate(self):
"""Get a listing of all volumes managed by this service.
:return: A ``Deferred`` that fires with an iterator of :class:`Volume`.
"""
enumerating = self.pool.enumerate()
def enumerated(filesystems):
for filesystem in filesystems:
# XXX It so happens that this works but it's kind of a
# fragile way to recover the information:
# https://clusterhq.atlassian.net/browse/FLOC-78
basename = filesystem.get_path().basename()
try:
node_id, name = basename.split(b".", 1)
name = VolumeName.from_bytes(name)
# We convert to a UUID object for validation purposes:
UUID(node_id)
except __HOLE__:
# ValueError may happen because:
# 1. We can't split on `.`.
# 2. We couldn't parse the UUID.
# 3. We couldn't parse the volume name.
# In any of those case it's presumably because that's
# not a filesystem Flocker is managing.Perhaps a user
# created it, so we just ignore it.
continue
# Probably shouldn't yield this volume if the uuid doesn't
# match this service's uuid.
yield Volume(
node_id=node_id.decode("ascii"),
name=name,
service=self,
size=filesystem.size)
enumerating.addCallback(enumerated)
return enumerating
|
ValueError
|
dataset/ETHPy150Open ClusterHQ/flocker/flocker/volume/service.py/VolumeService.enumerate
|
4,027 |
def draw_rectangle(self, x, y, width, height):
# Nasty retry if the image is loaded for the first time and it's truncated
try:
d = ImageDraw.Draw(self.image)
except __HOLE__:
d = ImageDraw.Draw(self.image)
d.rectangle([x, y, x + width, y + height])
del d
|
IOError
|
dataset/ETHPy150Open thumbor/thumbor/thumbor/engines/pil.py/Engine.draw_rectangle
|
4,028 |
def read(self, extension=None, quality=None): # NOQA
# returns image buffer in byte format.
img_buffer = BytesIO()
ext = extension or self.extension or self.get_default_extension()
options = {
'quality': quality
}
if ext == '.jpg' or ext == '.jpeg':
options['optimize'] = True
if self.context.config.PROGRESSIVE_JPEG:
# Can't simply set options['progressive'] to the value
# of self.context.config.PROGRESSIVE_JPEG because save
# operates on the presence of the key in **options, not
# the value of that setting.
options['progressive'] = True
if self.image.mode != 'RGB':
self.image = self.image.convert('RGB')
else:
subsampling_config = self.context.config.PILLOW_JPEG_SUBSAMPLING
qtables_config = self.context.config.PILLOW_JPEG_QTABLES
if subsampling_config is not None or qtables_config is not None:
options['quality'] = 0 # can't use 'keep' here as Pillow would try to extract qtables/subsampling and fail
orig_subsampling = self.subsampling
orig_qtables = self.qtables
if (subsampling_config == 'keep' or subsampling_config is None) and (orig_subsampling is not None):
options['subsampling'] = orig_subsampling
else:
options['subsampling'] = subsampling_config
if (qtables_config == 'keep' or qtables_config is None) and (orig_qtables and 2 <= len(orig_qtables) <= 4):
options['qtables'] = orig_qtables
else:
options['qtables'] = qtables_config
if options['quality'] is None:
options['quality'] = self.context.config.QUALITY
if self.icc_profile is not None:
options['icc_profile'] = self.icc_profile
if self.context.config.PRESERVE_EXIF_INFO:
if self.exif is not None:
options['exif'] = self.exif
if self.image.mode == 'P' and self.transparency:
options['transparency'] = self.transparency
try:
if ext == '.webp':
if self.image.mode not in ['RGB', 'RGBA']:
mode = None
if self.image.mode == 'P':
mode = 'RGBA'
else:
mode = 'RGBA' if self.image.mode[-1] == 'A' else 'RGB'
self.image = self.image.convert(mode)
if ext in ['.png', '.gif'] and self.image.mode == 'CMYK':
self.image = self.image.convert('RGBA')
self.image.format = FORMATS[ext]
self.image.save(img_buffer, FORMATS[ext], **options)
except __HOLE__:
logger.exception('Could not save as improved image, consider to increase ImageFile.MAXBLOCK')
self.image.save(img_buffer, FORMATS[ext])
except KeyError:
logger.exception('Image format not found in PIL: %s' % ext)
ext = self.get_default_extension()
# extension could not help determine format => use default
self.image.format = FORMATS[ext]
self.image.save(img_buffer, FORMATS[ext])
results = img_buffer.getvalue()
img_buffer.close()
return results
|
IOError
|
dataset/ETHPy150Open thumbor/thumbor/thumbor/engines/pil.py/Engine.read
|
4,029 |
def _format_fixed_ips(port):
try:
return '\n'.join([jsonutils.dumps(ip) for ip in port['fixed_ips']])
except (TypeError, __HOLE__):
return ''
|
KeyError
|
dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/neutron/v2_0/port.py/_format_fixed_ips
|
4,030 |
def _format_fixed_ips_csv(port):
try:
return jsonutils.dumps(port['fixed_ips'])
except (__HOLE__, KeyError):
return ''
|
TypeError
|
dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/neutron/v2_0/port.py/_format_fixed_ips_csv
|
4,031 |
def gather_modules():
"""Collect the information and construct the output."""
reqs = {}
errors = []
output = []
for package in sorted(explore_module('homeassistant.components', True)):
try:
module = importlib.import_module(package)
except __HOLE__:
errors.append(package)
continue
if not getattr(module, 'REQUIREMENTS', None):
continue
for req in module.REQUIREMENTS:
reqs.setdefault(req, []).append(package)
for key in reqs:
reqs[key] = sorted(reqs[key],
key=lambda name: (len(name.split('.')), name))
if errors:
print("******* ERROR")
print("Errors while importing: ", ', '.join(errors))
print("Make sure you import 3rd party libraries inside methods.")
return None
output.append('# Home Assistant core')
output.append('\n')
output.append('\n'.join(core_requirements()))
output.append('\n')
for pkg, requirements in sorted(reqs.items(), key=lambda item: item[0]):
for req in sorted(requirements,
key=lambda name: (len(name.split('.')), name)):
output.append('\n# {}'.format(req))
if comment_requirement(pkg):
output.append('\n# {}\n'.format(pkg))
else:
output.append('\n{}\n'.format(pkg))
return ''.join(output)
|
ImportError
|
dataset/ETHPy150Open home-assistant/home-assistant/script/gen_requirements_all.py/gather_modules
|
4,032 |
def runjobs_by_signals(self, when, options):
""" Run jobs from the signals """
# Thanks for Ian Holsman for the idea and code
from django_extensions.management import signals
from django.db import models
from django.conf import settings
verbosity = int(options.get('verbosity', 1))
for app_name in settings.INSTALLED_APPS:
try:
__import__(app_name + '.management', '', '', [''])
except __HOLE__:
pass
for app in models.get_apps():
if verbosity > 1:
app_name = '.'.join(app.__name__.rsplit('.')[:-1])
print "Sending %s job signal for: %s" % (when, app_name)
if when == 'minutely':
signals.run_minutely_jobs.send(sender=app, app=app)
elif when == 'quarter_hourly':
signals.run_quarter_hourly_jobs.send(sender=app, app=app)
elif when == 'hourly':
signals.run_hourly_jobs.send(sender=app, app=app)
elif when == 'daily':
signals.run_daily_jobs.send(sender=app, app=app)
elif when == 'weekly':
signals.run_weekly_jobs.send(sender=app, app=app)
elif when == 'monthly':
signals.run_monthly_jobs.send(sender=app, app=app)
elif when == 'yearly':
signals.run_yearly_jobs.send(sender=app, app=app)
|
ImportError
|
dataset/ETHPy150Open mozilla/inventory/vendor-local/src/django-extensions/build/lib/django_extensions/management/commands/runjobs.py/Command.runjobs_by_signals
|
4,033 |
def configure(current_node=None):
"""Deploy chef-solo specific files"""
current_node = current_node or {}
# Ensure that the /tmp/chef-solo/cache directory exist
cache_dir = "{0}/cache".format(env.node_work_path)
# First remote call, could go wrong
try:
cache_exists = exists(cache_dir)
except EOFError as e:
abort("Could not login to node, got: {0}".format(e))
if not cache_exists:
with settings(hide('running', 'stdout'), warn_only=True):
output = sudo('mkdir -p {0}'.format(cache_dir))
if output.failed:
error = "Could not create {0} dir. ".format(env.node_work_path)
error += "Do you have sudo rights?"
abort(error)
# Change ownership of /tmp/chef-solo/ so that we can rsync
with hide('running', 'stdout'):
with settings(warn_only=True):
output = sudo(
'chown -R {0} {1}'.format(env.user, env.node_work_path))
if output.failed:
error = "Could not modify {0} dir. ".format(env.node_work_path)
error += "Do you have sudo rights?"
abort(error)
# Set up chef solo configuration
logging_path = os.path.dirname(LOGFILE)
if not exists(logging_path):
sudo('mkdir -p {0}'.format(logging_path))
if not exists('/etc/chef'):
sudo('mkdir -p /etc/chef')
# Set parameters and upload solo.rb template
reversed_cookbook_paths = cookbook_paths[:]
reversed_cookbook_paths.reverse()
cookbook_paths_list = '[{0}]'.format(', '.join(
['"{0}/{1}"'.format(env.node_work_path, x)
for x in reversed_cookbook_paths]))
data = {
'node_work_path': env.node_work_path,
'cookbook_paths_list': cookbook_paths_list,
'environment': current_node.get('chef_environment', '_default'),
'verbose': "true" if env.verbose else "false",
'http_proxy': env.http_proxy,
'https_proxy': env.https_proxy
}
with settings(hide('everything')):
try:
upload_template('solo.rb.j2', '/etc/chef/solo.rb',
context=data, use_sudo=True, backup=False,
template_dir=BASEDIR, use_jinja=True, mode=0400)
except __HOLE__:
error = ("Failed to upload '/etc/chef/solo.rb'\nThis "
"can happen when the deployment user does not have a "
"home directory, which is needed as a temporary location")
abort(error)
with hide('stdout'):
sudo('chown root:$(id -g -n root) {0}'.format('/etc/chef/solo.rb'))
|
SystemExit
|
dataset/ETHPy150Open tobami/littlechef/littlechef/solo.py/configure
|
4,034 |
def _loadregisry():
try:
with open(_REG_FILENAME) as f: #(3)
return json.load(f)
except __HOLE__: #(4)
return {}
|
IOError
|
dataset/ETHPy150Open rgalanakis/practicalmayapython/src/chapter5/newmenumarker.py/_loadregisry
|
4,035 |
@property
def OptionParser(self):
if self._optparse is None:
try:
me = 'repo %s' % self.NAME
usage = self.helpUsage.strip().replace('%prog', me)
except __HOLE__:
usage = 'repo %s' % self.NAME
self._optparse = optparse.OptionParser(usage = usage)
self._Options(self._optparse)
return self._optparse
|
AttributeError
|
dataset/ETHPy150Open ossxp-com/repo/command.py/Command.OptionParser
|
4,036 |
def GetProjects(self, args, missing_ok=False):
"""A list of projects that match the arguments.
"""
all = self.manifest.projects
result = []
if not args:
for project in all.values():
if missing_ok or project.Exists:
result.append(project)
else:
by_path = None
for arg in args:
project = all.get(arg)
if not project:
path = os.path.abspath(arg).replace('\\', '/')
if not by_path:
by_path = dict()
for p in all.values():
by_path[p.worktree] = p
if os.path.exists(path):
oldpath = None
while path \
and path != oldpath \
and path != self.manifest.topdir:
try:
project = by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
else:
try:
project = by_path[path]
except __HOLE__:
pass
if not project:
raise NoSuchProjectError(arg)
if not missing_ok and not project.Exists:
raise NoSuchProjectError(arg)
result.append(project)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
|
KeyError
|
dataset/ETHPy150Open ossxp-com/repo/command.py/Command.GetProjects
|
4,037 |
def apply_patch(self, patch_file, base_path, base_dir, p=None,
revert=False):
"""Apply the patch and return a PatchResult indicating its success."""
# Figure out the -p argument for patch. We override the calculated
# value if it is supplied via a commandline option.
p_num = p or self._get_p_number(base_path, base_dir)
cmd = ['patch']
if revert:
cmd.append('-R')
if p_num >= 0:
cmd.append('-p%d' % p_num)
cmd.extend(['-i', six.text_type(patch_file)])
# Ignore return code 2 in case the patch file consists of only empty
# files, which 'patch' can't handle. Other 'patch' errors also give
# return code 2, so we must check the command output.
rc, patch_output = execute(cmd, extra_ignore_errors=(2,),
return_error_code=True)
only_garbage_in_patch = ('patch: **** Only garbage was found in the '
'patch input.\n')
if (patch_output and patch_output.startswith('patch: **** ') and
patch_output != only_garbage_in_patch):
die('Failed to execute command: %s\n%s' % (cmd, patch_output))
# Check the patch for any added/deleted empty files to handle.
if self.supports_empty_files():
try:
with open(patch_file, 'rb') as f:
patch = f.read()
except __HOLE__ as e:
logging.error('Unable to read file %s: %s', patch_file, e)
return
patched_empty_files = self.apply_patch_for_empty_files(
patch, p_num, revert=revert)
# If there are no empty files in a "garbage-only" patch, the patch
# is probably malformed.
if (patch_output == only_garbage_in_patch and
not patched_empty_files):
die('Failed to execute command: %s\n%s' % (cmd, patch_output))
# TODO: Should this take into account apply_patch_for_empty_files ?
# The return value of that function is False both when it fails
# and when there are no empty files.
return PatchResult(applied=(rc == 0), patch_output=patch_output)
|
IOError
|
dataset/ETHPy150Open reviewboard/rbtools/rbtools/clients/__init__.py/SCMClient.apply_patch
|
4,038 |
@not_implemented_for('multigraph')
def katz_centrality(G, alpha=0.1, beta=1.0,
max_iter=1000, tol=1.0e-6, nstart=None, normalized=True):
r"""Compute the Katz centrality for the nodes of the graph G.
Katz centrality is related to eigenvalue centrality and PageRank.
The Katz centrality for node `i` is
.. math::
x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
where `A` is the adjacency matrix of the graph G with eigenvalues `\lambda`.
The parameter `\beta` controls the initial centrality and
.. math::
\alpha < \frac{1}{\lambda_{max}}.
Katz centrality computes the relative influence of a node within a
network by measuring the number of the immediate neighbors (first
degree nodes) and also all other nodes in the network that connect
to the node under consideration through these immediate neighbors.
Extra weight can be provided to immediate neighbors through the
parameter :math:`\beta`. Connections made with distant neighbors
are, however, penalized by an attenuation factor `\alpha` which
should be strictly less than the inverse largest eigenvalue of the
adjacency matrix in order for the Katz centrality to be computed
correctly. More information is provided in [1]_ .
Parameters
----------
G : graph
A NetworkX graph
alpha : float
Attenuation factor
beta : scalar or dictionary, optional (default=1.0)
Weight attributed to the immediate neighborhood. If not a scalar the
dictionary must have an value for every node.
max_iter : integer, optional (default=1000)
Maximum number of iterations in power method.
tol : float, optional (default=1.0e-6)
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of Katz iteration for each node.
normalized : bool, optional (default=True)
If True normalize the resulting values.
Returns
-------
nodes : dictionary
Dictionary of nodes with Katz centrality as the value.
Examples
--------
>>> import math
>>> G = nx.path_graph(4)
>>> phi = (1+math.sqrt(5))/2.0 # largest eigenvalue of adj matrix
>>> centrality = nx.katz_centrality(G,1/phi-0.01)
>>> for n,c in sorted(centrality.items()):
... print("%d %0.2f"%(n,c))
0 0.37
1 0.60
2 0.60
3 0.37
Notes
-----
This algorithm it uses the power method to find the eigenvector
corresponding to the largest eigenvalue of the adjacency matrix of G.
The constant alpha should be strictly less than the inverse of largest
eigenvalue of the adjacency matrix for the algorithm to converge.
The iteration will stop after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
When `\alpha = 1/\lambda_{max}` and `\beta=1` Katz centrality is the same as
eigenvector centrality.
References
----------
.. [1] M. Newman, Networks: An Introduction. Oxford University Press,
USA, 2010, p. 720.
See Also
--------
katz_centrality_numpy
eigenvector_centrality
eigenvector_centrality_numpy
pagerank
hits
"""
from math import sqrt
if len(G)==0:
return {}
nnodes=G.number_of_nodes()
if nstart is None:
# choose starting vector with entries of 0
x=dict([(n,0) for n in G])
else:
x=nstart
try:
b = dict.fromkeys(G,float(beta))
except (TypeError,__HOLE__):
b = beta
if set(beta) != set(G):
raise nx.NetworkXError('beta dictionary '
'must have a value for every node')
# make up to max_iter iterations
for i in range(max_iter):
xlast=x
x=dict.fromkeys(xlast, 0)
# do the multiplication y = Alpha * Ax - Beta
for n in x:
for nbr in G[n]:
x[n] += xlast[nbr] * G[n][nbr].get('weight',1)
x[n] = alpha*x[n] + b[n]
# check convergence
err=sum([abs(x[n]-xlast[n]) for n in x])
if err < nnodes*tol:
if normalized:
# normalize vector
try:
s=1.0/sqrt(sum(v**2 for v in x.values()))
# this should never be zero?
except ZeroDivisionError:
s=1.0
else:
s = 1
for n in x:
x[n]*=s
return x
raise nx.NetworkXError('Power iteration failed to converge in ',
'%d iterations."%(i+1))')
|
ValueError
|
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/algorithms/centrality/katz.py/katz_centrality
|
4,039 |
@not_implemented_for('multigraph')
def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True):
r"""Compute the Katz centrality for the graph G.
Katz centrality is related to eigenvalue centrality and PageRank.
The Katz centrality for node `i` is
.. math::
x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
where `A` is the adjacency matrix of the graph G with eigenvalues `\lambda`.
The parameter `\beta` controls the initial centrality and
.. math::
\alpha < \frac{1}{\lambda_{max}}.
Katz centrality computes the relative influence of a node within a
network by measuring the number of the immediate neighbors (first
degree nodes) and also all other nodes in the network that connect
to the node under consideration through these immediate neighbors.
Extra weight can be provided to immediate neighbors through the
parameter :math:`\beta`. Connections made with distant neighbors
are, however, penalized by an attenuation factor `\alpha` which
should be strictly less than the inverse largest eigenvalue of the
adjacency matrix in order for the Katz centrality to be computed
correctly. More information is provided in [1]_ .
Parameters
----------
G : graph
A NetworkX graph
alpha : float
Attenuation factor
beta : scalar or dictionary, optional (default=1.0)
Weight attributed to the immediate neighborhood. If not a scalar the
dictionary must have an value for every node.
normalized : bool
If True normalize the resulting values.
Returns
-------
nodes : dictionary
Dictionary of nodes with Katz centrality as the value.
Examples
--------
>>> import math
>>> G = nx.path_graph(4)
>>> phi = (1+math.sqrt(5))/2.0 # largest eigenvalue of adj matrix
>>> centrality = nx.katz_centrality_numpy(G,1/phi)
>>> for n,c in sorted(centrality.items()):
... print("%d %0.2f"%(n,c))
0 0.37
1 0.60
2 0.60
3 0.37
Notes
------
This algorithm uses a direct linear solver to solve the above equation.
The constant alpha should be strictly less than the inverse of largest
eigenvalue of the adjacency matrix for there to be a solution. When
`\alpha = 1/\lambda_{max}` and `\beta=1` Katz centrality is the same as
eigenvector centrality.
References
----------
.. [1] M. Newman, Networks: An Introduction. Oxford University Press,
USA, 2010, p. 720.
See Also
--------
katz_centrality
eigenvector_centrality_numpy
eigenvector_centrality
pagerank
hits
"""
try:
import numpy as np
except ImportError:
raise ImportError('Requires NumPy: http://scipy.org/')
if len(G)==0:
return {}
try:
nodelist = beta.keys()
if set(nodelist) != set(G):
raise nx.NetworkXError('beta dictionary '
'must have a value for every node')
b = np.array(list(beta.values()),dtype=float)
except AttributeError:
nodelist = G.nodes()
try:
b = np.ones((len(nodelist),1))*float(beta)
except (TypeError,__HOLE__):
raise nx.NetworkXError('beta must be a number')
A=nx.adj_matrix(G, nodelist=nodelist)
n = np.array(A).shape[0]
centrality = np.linalg.solve( np.eye(n,n) - (alpha * A) , b)
if normalized:
norm = np.sign(sum(centrality)) * np.linalg.norm(centrality)
else:
norm = 1.0
centrality=dict(zip(nodelist, map(float,centrality/norm)))
return centrality
# fixture for nose tests
|
ValueError
|
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/algorithms/centrality/katz.py/katz_centrality_numpy
|
4,040 |
def _load_file(self, name):
try:
path = os.path.join(BAT_DIR, self.battery_name, name)
with open(path, 'r') as f:
return f.read().strip()
except __HOLE__:
if name == 'current_now':
return 0
return False
except Exception:
logger.exception("Failed to get %s" % name)
|
IOError
|
dataset/ETHPy150Open qtile/qtile/libqtile/widget/battery.py/_Battery._load_file
|
4,041 |
def _get_info(self):
try:
info = {
'stat': self._get_param('status_file'),
'now': float(self._get_param('energy_now_file')),
'full': float(self._get_param('energy_full_file')),
'power': float(self._get_param('power_now_file')),
}
except __HOLE__:
return False
return info
|
TypeError
|
dataset/ETHPy150Open qtile/qtile/libqtile/widget/battery.py/_Battery._get_info
|
4,042 |
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except __HOLE__:
pass
return False
|
IOError
|
dataset/ETHPy150Open AndroBugs/AndroBugs_Framework/tools/modified/androguard/patch/zipfile.py/_check_zipfile
|
4,043 |
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except __HOLE__:
pass
return result
|
IOError
|
dataset/ETHPy150Open AndroBugs/AndroBugs_Framework/tools/modified/androguard/patch/zipfile.py/is_zipfile
|
4,044 |
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except __HOLE__:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipfile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
|
IOError
|
dataset/ETHPy150Open AndroBugs/AndroBugs_Framework/tools/modified/androguard/patch/zipfile.py/_EndRecData64
|
4,045 |
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except __HOLE__:
return None
data = fpin.read()
if data[0:4] == stringEndArchive and data[-2:] == "\000\000":
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append("")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return
|
IOError
|
dataset/ETHPy150Open AndroBugs/AndroBugs_Framework/tools/modified/androguard/patch/zipfile.py/_EndRecData
|
4,046 |
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError,\
"Compression requires the (missing) zlib module"
else:
raise RuntimeError, "That compression method is not supported"
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self.comment = ''
# Check if we were passed a file-like object
if isinstance(file, basestring):
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = open(file, modeDict[mode])
except __HOLE__:
if mode == 'a':
mode = key = 'w'
self.fp = open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
if key == 'r':
self._GetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipfile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
if not self._filePassed:
self.fp.close()
self.fp = None
raise RuntimeError, 'Mode must be "r", "w" or "a"'
|
IOError
|
dataset/ETHPy150Open AndroBugs/AndroBugs_Framework/tools/modified/androguard/patch/zipfile.py/ZipFile.__init__
|
4,047 |
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except __HOLE__:
raise BadZipfile("File is not a zip file")
if not endrec:
raise BadZipfile, "File is not a zip file"
if self.debug > 1:
print endrec
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self.comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print "given, inferred, offset", offset_cd, inferred, concat
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = cStringIO.StringIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if centdir[0:4] != stringCentralDir:
raise BadZipfile, "Bad magic number for central directory"
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print centdir
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
x.filename = x._decodeFilename()
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print "total", total
|
IOError
|
dataset/ETHPy150Open AndroBugs/AndroBugs_Framework/tools/modified/androguard/patch/zipfile.py/ZipFile._RealGetContents
|
4,048 |
def add(self, key):
key = key.strip()
try:
return self.dct[key]
except __HOLE__:
res = self.dct[key] = self.counter
self.counter += 1
return res
|
KeyError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/writer/strings.py/StringTableBuilder.add
|
4,049 |
def _get_resource(package_name, resource, return_binary=False, encoding="utf-8"):
packages_path = sublime.packages_path()
content = None
if VERSION > 3013:
try:
if return_binary:
content = sublime.load_binary_resource("Packages/" + package_name + "/" + resource)
else:
content = sublime.load_resource("Packages/" + package_name + "/" + resource)
except __HOLE__:
pass
else:
path = None
if os.path.exists(os.path.join(packages_path, package_name, resource)):
path = os.path.join(packages_path, package_name, resource)
content = _get_directory_item_content(path, return_binary, encoding)
if VERSION >= 3006:
sublime_package = package_name + ".sublime-package"
packages_path = sublime.installed_packages_path()
if content is None:
if os.path.exists(os.path.join(packages_path, sublime_package)):
content = _get_zip_item_content(os.path.join(packages_path, sublime_package), resource, return_binary, encoding)
packages_path = os.path.dirname(sublime.executable_path()) + os.sep + "Packages"
if content is None:
if os.path.exists(os.path.join(packages_path, sublime_package)):
content = _get_zip_item_content(os.path.join(packages_path, sublime_package), resource, return_binary, encoding)
return content
|
IOError
|
dataset/ETHPy150Open chrisbreiding/ASCIIPresentation/lib/package_resources.py/_get_resource
|
4,050 |
def get_as_num(value):
"""Return the JS numeric equivalent for a value."""
if hasattr(value, 'get_literal_value'):
value = value.get_literal_value()
if value is None:
return 0
try:
if isinstance(value, types.StringTypes):
if value.startswith("0x"):
return int(value, 16)
else:
return float(value)
elif isinstance(value, (int, float, long)):
return value
else:
return int(value)
except (ValueError, __HOLE__):
return 0
|
TypeError
|
dataset/ETHPy150Open mozilla/app-validator/appvalidator/testcases/javascript/utils.py/get_as_num
|
4,051 |
def get_as_str(value):
"""Return the JS string equivalent for a literal value."""
if hasattr(value, 'get_literal_value'):
value = value.get_literal_value()
if value is None:
return ""
if isinstance(value, bool):
return u"true" if value else u"false"
elif isinstance(value, (int, float, long)):
if value == float('inf'):
return u"Infinity"
elif value == float('-inf'):
return u"-Infinity"
# Try to see if we can shave off some trailing significant figures.
try:
if int(value) == value:
return unicode(int(value))
except (ValueError, __HOLE__):
pass
return unicode(value)
|
TypeError
|
dataset/ETHPy150Open mozilla/app-validator/appvalidator/testcases/javascript/utils.py/get_as_str
|
4,052 |
@register.filter
def parse_isotime(timestr, default=None):
"""This duplicates oslo timeutils parse_isotime but with a
@register.filter annotation and a silent fallback on error.
"""
try:
return iso8601.parse_date(timestr)
except (iso8601.ParseError, __HOLE__):
return default or ''
|
TypeError
|
dataset/ETHPy150Open Havate/havate-openstack/proto-build/gui/horizon/Horizon_GUI/horizon/utils/filters.py/parse_isotime
|
4,053 |
def create_initial_security_groups(sender, instance=None, created=False, **kwargs):
if not created:
return
nc_settings = getattr(settings, 'NODECONDUCTOR', {})
config_groups = nc_settings.get('DEFAULT_SECURITY_GROUPS', [])
for group in config_groups:
sg_name = group.get('name')
if sg_name in (None, ''):
logger.error('Skipping misconfigured security group: parameter "name" not found or is empty.')
continue
rules = group.get('rules')
if type(rules) not in (list, tuple):
logger.error('Skipping misconfigured security group: parameter "rules" should be list or tuple.')
continue
sg_description = group.get('description', None)
sg = SecurityGroup.objects.get_or_create(
service_project_link=instance,
description=sg_description,
name=sg_name)[0]
for rule in rules:
if 'icmp_type' in rule:
rule['from_port'] = rule.pop('icmp_type')
if 'icmp_code' in rule:
rule['to_port'] = rule.pop('icmp_code')
try:
rule = SecurityGroupRule(security_group=sg, **rule)
rule.full_clean()
except __HOLE__ as e:
logger.error('Failed to create rule for security group %s: %s.' % (sg_name, e))
else:
rule.save()
|
ValidationError
|
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/openstack/handlers.py/create_initial_security_groups
|
4,054 |
def build_config_from_file(module):
"""Build module info from `app/config/desktop.py` files."""
data = []
module = frappe.scrub(module)
for app in frappe.get_installed_apps():
try:
data += get_config(app, module)
except __HOLE__:
pass
return data
|
ImportError
|
dataset/ETHPy150Open frappe/frappe/frappe/desk/moduleview.py/build_config_from_file
|
4,055 |
def add_setup_section(config, app, module, label, icon):
"""Add common sections to `/desk#Module/Setup`"""
try:
setup_section = get_setup_section(app, module, label, icon)
if setup_section:
config.append(setup_section)
except __HOLE__:
pass
|
ImportError
|
dataset/ETHPy150Open frappe/frappe/frappe/desk/moduleview.py/add_setup_section
|
4,056 |
def get_class(datatype):
# already done?
if not isinstance(datatype, basestring):
return datatype
# parse datatype string "v31/Election" --> from v31 import Election
parsed_datatype = datatype.split("/")
# get the module
dynamic_module = __import__(".".join(parsed_datatype[:-1]), globals(), locals(), [], level=-1)
if not dynamic_module:
raise Exception("no module for %s" % datatpye)
# go down the attributes to get to the class
try:
dynamic_ptr = dynamic_module
for attr in parsed_datatype[1:]:
dynamic_ptr = getattr(dynamic_ptr, attr)
dynamic_cls = dynamic_ptr
except __HOLE__:
raise Exception ("no module for %s" % datatype)
dynamic_cls.datatype = datatype
return dynamic_cls
|
AttributeError
|
dataset/ETHPy150Open benadida/helios-server/helios/datatypes/__init__.py/get_class
|
4,057 |
def sentinel(name, doc=None):
try:
value = sentinel._cache[name] # memoized
except __HOLE__:
pass
else:
if doc == value.__doc__:
return value
raise ValueError(dedent(
"""\
New sentinel value %r conflicts with an existing sentinel of the
same name.
Old sentinel docstring: %r
New sentinel docstring: %r
Resolve this conflict by changing the name of one of the sentinels.
""",
) % (name, value.__doc__, doc))
@object.__new__ # bind a single instance to the name 'Sentinel'
class Sentinel(object):
__doc__ = doc
__slots__ = ('__weakref__',)
__name__ = name
def __new__(cls):
raise TypeError('cannot create %r instances' % name)
def __repr__(self):
return 'sentinel(%r)' % name
def __reduce__(self):
return sentinel, (name, doc)
def __deepcopy__(self, _memo):
return self
def __copy__(self):
return self
cls = type(Sentinel)
try:
# traverse up one frame to find the module where this is defined
cls.__module__ = sys._getframe(1).f_globals['__name__']
except (ValueError, KeyError):
# Couldn't get the name from the calling scope, just use None.
cls.__module__ = None
sentinel._cache[name] = Sentinel # cache result
return Sentinel
|
KeyError
|
dataset/ETHPy150Open quantopian/zipline/zipline/utils/sentinel.py/sentinel
|
4,058 |
def _format_value(val, limit, level, len=len, repr=repr):
"""Format an arbitrary value as a compact string.
This is a variant on Python's built-in repr() function, also
borrowing some ideas from the repr.py standard library module, but
tuned for speed even in extreme cases (like very large longs or very
long strings) and safety (it never invokes user code).
For structured data types like lists and objects it calls itself
recursively; recursion is strictly limited by level.
Python's basic types (numbers, strings, lists, tuples, dicts, bool,
and None) are represented using their familiar Python notations.
Objects are represented as ClassName<attr1=val1, attr2=val2, ...>.
Portions omitted due to the various limits are represented using
three dots ('...').
Args:
val: An arbitrary value.
limit: Limit on the output length.
level: Recursion level countdown.
len, repr: Not arguments; for optimization.
Returns:
A str instance.
"""
if level <= 0:
return '...'
typ = type(val)
if typ in EASY_TYPES:
if typ is float:
rep = str(val)
elif typ is long:
if val >= 10L**99:
return '...L'
elif val <= -10L**98:
return '-...L'
else:
rep = repr(val)
else:
rep = repr(val)
if typ is long and len(rep) > limit:
n1 = (limit - 3) // 2
n2 = (limit - 3) - n1
rep = rep[:n1] + '...' + rep[-n2:]
return rep
if typ in META_TYPES:
return val.__name__
if typ in STRING_TYPES:
n1 = (limit - 3) // 2
if n1 < 1:
n1 = 1
n2 = (limit - 3) - n1
if n2 < 1:
n2 = 1
if len(val) > limit:
rep = repr(val[:n1] + val[-n2:])
else:
rep = repr(val)
if len(rep) <= limit:
return rep
return rep[:n1] + '...' + rep[-n2:]
if typ is types.MethodType:
if val.im_self is None:
fmt = '<unbound method %s of %s>'
else:
fmt = '<method %s of %s<>>'
if val.im_class is not None:
return fmt % (val.__name__, val.im_class.__name__)
else:
return fmt % (val.__name__, '?')
if typ is types.FunctionType:
nam = val.__name__
if nam == '<lambda>':
return nam
else:
return '<function %s>' % val.__name__
if typ is types.BuiltinFunctionType:
if val.__self__ is not None:
return '<built-in method %s of %s<>>' % (val.__name__,
type(val.__self__).__name__)
else:
return '<built-in function %s>' % val.__name__
if typ is types.ModuleType:
if hasattr(val, '__file__'):
return '<module %s>' % val.__name__
else:
return '<built-in module %s>' % val.__name__
if typ is types.CodeType:
return '<code object %s>' % val.co_name
if isinstance(val, ProtocolBuffer.ProtocolMessage):
buf = [val.__class__.__name__, '<']
limit -= len(buf[0]) + 2
append = buf.append
first = True
dct = getattr(val, '__dict__', None)
if dct:
for k, v in sorted(dct.items()):
if k.startswith('has_') or not k.endswith('_'):
continue
name = k[:-1]
has_method = getattr(val, 'has_' + name, None)
if has_method is not None:
if type(has_method) is not types.MethodType or not has_method():
continue
size_method = getattr(val, name + '_size', None)
if size_method is not None:
if type(size_method) is not types.MethodType or not size_method():
continue
if has_method is None and size_method is None:
continue
if first:
first = False
else:
append(', ')
limit -= len(name) + 2
if limit <= 0:
append('...')
break
append(name)
append('=')
rep = _format_value(v, limit, level-1)
limit -= len(rep)
append(rep)
append('>')
return ''.join(buf)
dct = getattr(val, '__dict__', None)
if type(dct) is dict:
if typ is INSTANCE_TYPE:
typ = val.__class__
typnam = typ.__name__
priv = '_' + typnam + '__'
buffer = [typnam, '<']
limit -= len(buffer[0]) + 2
if len(dct) <= limit//4:
names = sorted(dct)
else:
names = list(dct)
append = buffer.append
first = True
if issubclass(typ, BUILTIN_TYPES):
for builtin_typ in BUILTIN_TYPES:
if issubclass(typ, builtin_typ):
try:
val = builtin_typ(val)
assert type(val) is builtin_typ
except Exception:
break
else:
append(_format_value(val, limit, level-1))
first = False
break
for nam in names:
if not isinstance(nam, basestring):
continue
if first:
first = False
else:
append(', ')
pnam = nam
if pnam.startswith(priv):
pnam = pnam[len(priv)-2:]
limit -= len(pnam) + 2
if limit <= 0:
append('...')
break
append(pnam)
append('=')
rep = _format_value(dct[nam], limit, level-1)
limit -= len(rep)
append(rep)
append('>')
return ''.join(buffer)
how = CONTAINER_TYPES.get(typ)
if how:
head, tail = how
buffer = [head]
append = buffer.append
limit -= 2
series = val
isdict = typ is dict
if isdict and len(val) <= limit//4:
series = sorted(val)
try:
for elem in series:
if limit <= 0:
append('...')
break
rep = _format_value(elem, limit, level-1)
limit -= len(rep) + 2
append(rep)
if isdict:
rep = _format_value(val[elem], limit, level-1)
limit -= len(rep)
append(':')
append(rep)
append(', ')
if buffer[-1] == ', ':
if tail == ')' and len(val) == 1:
buffer[-1] = ',)'
else:
buffer[-1] = tail
else:
append(tail)
return ''.join(buffer)
except (RuntimeError, __HOLE__):
return head + tail + ' (Container modified during iteration)'
if issubclass(typ, BUILTIN_TYPES):
for builtin_typ in BUILTIN_TYPES:
if issubclass(typ, builtin_typ):
try:
val = builtin_typ(val)
assert type(val) is builtin_typ
except Exception:
break
else:
typnam = typ.__name__
limit -= len(typnam) + 2
return '%s<%s>' % (typnam, _format_value(val, limit, level-1))
if message is not None and isinstance(val, message.Message):
buffer = [typ.__name__, '<']
limit -= len(buffer[0]) + 2
append = buffer.append
first = True
fields = val.ListFields()
for f, v in fields:
if first:
first = False
else:
append(', ')
name = f.name
limit -= len(name) + 2
if limit <= 0:
append('...')
break
append(name)
append('=')
if f.label == f.LABEL_REPEATED:
limit -= 2
append('[')
first_sub = True
for item in v:
if first_sub:
first_sub = False
else:
limit -= 2
append(', ')
if limit <= 0:
append('...')
break
rep = _format_value(item, limit, level-1)
limit -= len(rep)
append(rep)
append(']')
else:
rep = _format_value(v, limit, level-1)
limit -= len(rep)
append(rep)
append('>')
return ''.join(buffer)
return typ.__name__ + '<>'
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/ext/appstats/formatting.py/_format_value
|
4,059 |
def process_file(fn, lines):
lines.insert(0, '\n')
lines.insert(0, '.. %s:\n' % target_name(fn))
try:
f = open(fn, 'w')
except __HOLE__:
print("Can't open %s for writing. Not touching it." % fn)
return
try:
f.writelines(lines)
except IOError:
print("Can't write to %s. Not touching it." % fn)
finally:
f.close()
|
IOError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/docs/_ext/applyxrefs.py/process_file
|
4,060 |
def has_target(fn):
try:
f = open(fn, 'r')
except __HOLE__:
print("Can't open %s. Not touching it." % fn)
return (True, None)
readok = True
try:
lines = f.readlines()
except IOError:
print("Can't read %s. Not touching it." % fn)
readok = False
finally:
f.close()
if not readok:
return (True, None)
#print fn, len(lines)
if len(lines) < 1:
print("Not touching empty file %s." % fn)
return (True, None)
if lines[0].startswith('.. _'):
return (True, None)
return (False, lines)
|
IOError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/docs/_ext/applyxrefs.py/has_target
|
4,061 |
def __init__(self, id_vendor=0x0fcf, id_product=0x1008, ep=1):
for dev in usb.core.find(idVendor=id_vendor, idProduct=id_product, find_all=True):
try:
dev.set_configuration()
usb.util.claim_interface(dev, 0)
self.dev = dev
self.ep = ep
break
except __HOLE__ as (err, msg):
if err == errno.EBUSY or "Device or resource busy" in msg: #libusb10 or libusb01
_log.info("Found device with vid(0x%04x) pid(0x%04x), but interface already claimed.", id_vendor, id_product)
else:
raise
else:
raise NoUsbHardwareFound(errno.ENOENT, "No available device matching vid(0x%04x) pid(0x%04x)." % (id_vendor, id_product))
|
IOError
|
dataset/ETHPy150Open braiden/python-ant-downloader/antd/hw.py/UsbHardware.__init__
|
4,062 |
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
'''This forks the current process into a daemon.
The stdin, stdout, and stderr arguments are file names that
will be opened and be used to replace the standard file descriptors
in sys.stdin, sys.stdout, and sys.stderr.
These arguments are optional and default to /dev/null.
Note that stderr is opened unbuffered, so
if it shares a file with stdout then interleaved output
may not appear in the order that you expect.
'''
# Do first fork.
try:
pid = os.fork()
if pid > 0:
return 0 # Return 0 from first parent.
#sys.exit(0) # Exit first parent.
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror) )
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit second parent.
except __HOLE__, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror) )
sys.exit(1)
# Now I am a daemon!
# Redirect standard file descriptors.
# NOTE: For debugging, you meight want to take these instead of /dev/null.
#so = file('/tmp/log', 'a+')
#se = file('/tmp/log', 'a+', 0)
si = file(stdin, 'r')
so = file(stdout, 'a+')
se = file(stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Return 1 from daemon.
return 1
|
OSError
|
dataset/ETHPy150Open jonathanslenders/python-deployer/deployer/daemonize.py/daemonize
|
4,063 |
def get_object_or_404(queryset, *args, **kwargs):
""" replacement of rest_framework.generics and django.shrtcuts analogues """
try:
return queryset.get(*args, **kwargs)
except (__HOLE__, TypeError, DoesNotExist):
raise Http404()
|
ValueError
|
dataset/ETHPy150Open umutbozkurt/django-rest-framework-mongoengine/rest_framework_mongoengine/generics.py/get_object_or_404
|
4,064 |
def get_comment_app():
"""
Get the comment app (i.e. "django.contrib.comments") as defined in the settings
"""
# Make sure the app's in INSTALLED_APPS
comments_app = get_comment_app_name()
if comments_app not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("The COMMENTS_APP (%r) "\
"must be in INSTALLED_APPS" % settings.COMMENTS_APP)
# Try to import the package
try:
package = import_module(comments_app)
except __HOLE__:
raise ImproperlyConfigured("The COMMENTS_APP setting refers to "\
"a non-existing package.")
return package
|
ImportError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/contrib/comments/__init__.py/get_comment_app
|
4,065 |
def wrapped_f(self, f, *args, **kwargs):
# At the first call, reset the time
if self.__last_reset == None:
self.__last_reset = datetime.datetime.now()
if self.__numcalls >= self.__max_calls:
time_delta = datetime.datetime.now() - self.__last_reset
try:
time_delta = int(time_delta.total_seconds()) + 1
except __HOLE__:
time_delta = int((time_delta.microseconds + (time_delta.seconds + time_delta.days * 24 * 3600) * 10**6) / 10**6)
if time_delta <= self.__time_interval:
time.sleep(self.__time_interval - time_delta + 1)
self.__numcalls = 0
self.__last_reset = datetime.datetime.now()
self.__numcalls += 1
return f(*args, **kwargs)
|
AttributeError
|
dataset/ETHPy150Open themiurgo/ratelim/ratelim/__init__.py/greedy.wrapped_f
|
4,066 |
def wrapped_f(self, f, *args, **kwargs):
now = datetime.datetime.now()
# At the first call, reset the time
if self.__last_call == None:
self.__last_call = now
return f(*args, **kwargs)
time_delta = now - self.__last_call
try:
time_delta = int(time_delta.total_seconds()) + 1
except __HOLE__:
time_delta = int((time_delta.microseconds + (time_delta.seconds + time_delta.days * 24 * 3600) * 10**6) / 10**6)
assert time_delta >= 0
if time_delta <= self.__time_interval:
to_sleep = self.__time_interval - time_delta
# print "To sleep", to_sleep
time.sleep(to_sleep)
self.__last_call = datetime.datetime.now()
return f(*args, **kwargs)
|
AttributeError
|
dataset/ETHPy150Open themiurgo/ratelim/ratelim/__init__.py/patient.wrapped_f
|
4,067 |
def __new__(cls, *args):
if not args:
# clone constructor
return object.__new__(cls)
else:
element, values = args
# pull appropriate subclass from registry of annotated
# classes
try:
cls = annotated_classes[element.__class__]
except __HOLE__:
cls = _new_annotation_type(element.__class__, cls)
return object.__new__(cls)
|
KeyError
|
dataset/ETHPy150Open zzzeek/sqlalchemy/lib/sqlalchemy/sql/annotation.py/Annotated.__new__
|
4,068 |
def to_global(prefix, mac, project_id):
try:
mac64 = netaddr.EUI(mac).eui64().words
int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
mac64_addr = netaddr.IPAddress(int_addr)
maskIP = netaddr.IPNetwork(prefix).ip
return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') |
maskIP).format()
except netaddr.AddrFormatError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
except __HOLE__:
raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix)
|
TypeError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/ipv6/rfc2462.py/to_global
|
4,069 |
def emit(self, record):
"""Emit a record."""
try:
self.mailer.new(plain=self.format(record)).send()
except (KeyboardInterrupt, __HOLE__):
raise
except:
self.handleError(record)
|
SystemExit
|
dataset/ETHPy150Open marrow/mailer/marrow/mailer/logger.py/MailHandler.emit
|
4,070 |
def init_args():
parser = argparse.ArgumentParser(
description='Generate Mantle models by a given JSON file.'
)
parser.add_argument('json_file',
help='the JSON file to be parsed')
parser.add_argument('output_dir',
help='output directory for generated Objective-C files')
parser.add_argument('--prefix',
help='class prefix of Objective-C files')
parser.add_argument('--author',
help='author info')
args = parser.parse_args()
if not os.path.exists(args.output_dir):
try:
os.mkdir(args.output_dir)
except __HOLE__:
print('Error: could not create directory {}'.format(
args.output_dir
))
exit()
return args
|
IOError
|
dataset/ETHPy150Open sutar/JSON2Mantle/JSON2Mantle/cli.py/init_args
|
4,071 |
def main():
""" Main function
"""
args = init_args()
try:
dict_data = json.loads(open(args.json_file).read())
except __HOLE__:
print('Error: no such file {}'.format(args.json_file))
exit()
j2m = JSON2Mantle()
# Gets meta data
j2m.class_prefix = args.prefix if args.prefix else ''
if args.author:
j2m.meta_data['author'] = args.author
# Get the file base name
file_basename = os.path.basename(args.json_file)
# Eliminating filename extension
class_name = file_basename.split('.')[0]
j2m.generate_properties(dict_data, class_name)
# .h and .m data for rendering
render_h, render_m = j2m.get_template_data()
template_renderer = TemplateRenderer(render_h, render_m, args.output_dir)
template_renderer.render()
|
IOError
|
dataset/ETHPy150Open sutar/JSON2Mantle/JSON2Mantle/cli.py/main
|
4,072 |
def clean(self, value):
super(NOSocialSecurityNumber, self).clean(value)
if value in EMPTY_VALUES:
return ''
if not re.match(r'^\d{11}$', value):
raise ValidationError(self.error_messages['invalid'])
day = int(value[:2])
month = int(value[2:4])
year2 = int(value[4:6])
inum = int(value[6:9])
self.birthday = None
try:
if 000 <= inum < 500:
self.birthday = datetime.date(1900+year2, month, day)
if 500 <= inum < 750 and year2 > 54:
self.birthday = datetime.date(1800+year2, month, day)
if 500 <= inum < 1000 and year2 < 40:
self.birthday = datetime.date(2000+year2, month, day)
if 900 <= inum < 1000 and year2 > 39:
self.birthday = datetime.date(1900+year2, month, day)
except __HOLE__:
raise ValidationError(self.error_messages['invalid'])
sexnum = int(value[8])
if sexnum % 2 == 0:
self.gender = 'F'
else:
self.gender = 'M'
digits = map(int, list(value))
weight_1 = [3, 7, 6, 1, 8, 9, 4, 5, 2, 1, 0]
weight_2 = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2, 1]
def multiply_reduce(aval, bval):
return sum([(a * b) for (a, b) in zip(aval, bval)])
if multiply_reduce(digits, weight_1) % 11 != 0:
raise ValidationError(self.error_messages['invalid'])
if multiply_reduce(digits, weight_2) % 11 != 0:
raise ValidationError(self.error_messages['invalid'])
return value
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/localflavor/no/forms.py/NOSocialSecurityNumber.clean
|
4,073 |
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
user_id_url = kwargs.get('response').get('id')
url = user_id_url + '?' + urlencode({'access_token': access_token})
try:
return self.get_json(url)
except __HOLE__:
return None
|
ValueError
|
dataset/ETHPy150Open omab/python-social-auth/social/backends/salesforce.py/SalesforceOAuth2.user_data
|
4,074 |
def action(self, params):
if len(params) == 0:
return WrongParamResp()
peer = afi = safi = None
try:
peer = params[0]
afi = params[1]
safi = params[2]
except __HOLE__:
pass
self.api.route_refresh(peer, afi, safi)
return CommandsResponse(STATUS_OK, '')
|
IndexError
|
dataset/ETHPy150Open osrg/ryu/ryu/services/protocols/bgp/operator/commands/clear.py/BGPCmd.action
|
4,075 |
def action(self, params):
peer = afi = safi = None
try:
afi = params[0]
safi = params[1]
except __HOLE__:
pass
self.api.route_refresh(peer, afi, safi)
return CommandsResponse(STATUS_OK, '')
|
IndexError
|
dataset/ETHPy150Open osrg/ryu/ryu/services/protocols/bgp/operator/commands/clear.py/BGPCmd.All.action
|
4,076 |
def handle(self, *fixture_labels, **options):
from django.db.models import get_apps
from django.core import serializers
from django.db import connection, transaction
from django.conf import settings
self.style = no_style()
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', False)
# commit is a stealth option - it isn't really useful as
# a command line option, but it can be useful when invoking
# loaddata from within another script.
# If commit=True, loaddata will use its own transaction;
# if commit=False, the data load SQL will become part of
# the transaction in place when loaddata was invoked.
commit = options.get('commit', True)
# Keep a count of the installed objects and fixtures
fixture_count = 0
object_count = 0
models = set()
humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database (if
# it isn't already initialized).
cursor = connection.cursor()
# Start transaction management. All fixtures are installed in a
# single transaction to ensure that all references are resolved.
if commit:
transaction.commit_unless_managed()
transaction.enter_transaction_management()
transaction.managed(True)
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if settings.DEBUG:
assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file."
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
compression_types = {
None: file,
'gz': gzip.GzipFile,
'zip': SingleZipReader
}
if has_bz2:
compression_types['bz2'] = bz2.BZ2File
app_fixtures = [os.path.join(os.path.dirname(app.__file__), 'fixtures') for app in get_apps()]
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) > 1 and parts[-1] in compression_types:
compression_formats = [parts[-1]]
parts = parts[:-1]
else:
compression_formats = compression_types.keys()
if len(parts) == 1:
fixture_name = parts[0]
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
if formats:
if verbosity > 1:
print "Loading '%s' fixtures..." % fixture_name
else:
sys.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format." %
(fixture_name, format)))
transaction.rollback()
transaction.leave_transaction_management()
return
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
if verbosity > 1:
print "Checking %s for fixtures..." % humanize(fixture_dir)
label_found = False
for format in formats:
for compression_format in compression_formats:
if compression_format:
file_name = '.'.join([fixture_name, format,
compression_format])
else:
file_name = '.'.join([fixture_name, format])
if verbosity > 1:
print "Trying %s for %s fixture '%s'..." % \
(humanize(fixture_dir), file_name, fixture_name)
full_path = os.path.join(fixture_dir, file_name)
open_method = compression_types[compression_format]
try:
fixture = open_method(full_path, 'r')
if label_found:
fixture.close()
print self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
transaction.rollback()
transaction.leave_transaction_management()
return
else:
fixture_count += 1
objects_in_fixture = 0
if verbosity > 0:
print "Installing %s fixture '%s' from %s." % \
(format, fixture_name, humanize(fixture_dir))
try:
objects = serializers.deserialize(format, fixture)
for obj in objects:
objects_in_fixture += 1
models.add(obj.object.__class__)
obj.save()
object_count += objects_in_fixture
label_found = True
except (__HOLE__, KeyboardInterrupt):
raise
except Exception:
import traceback
fixture.close()
transaction.rollback()
transaction.leave_transaction_management()
if show_traceback:
traceback.print_exc()
else:
sys.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s\n" %
(full_path, ''.join(traceback.format_exception(sys.exc_type,
sys.exc_value, sys.exc_traceback)))))
return
fixture.close()
# If the fixture we loaded contains 0 objects, assume that an
# error was encountered during fixture loading.
if objects_in_fixture == 0:
sys.stderr.write(
self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)" %
(fixture_name)))
transaction.rollback()
transaction.leave_transaction_management()
return
except Exception, e:
if verbosity > 1:
print "No %s fixture '%s' in %s." % \
(format, fixture_name, humanize(fixture_dir))
# If we found even one object in a fixture, we need to reset the
# database sequences.
if object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
if sequence_sql:
if verbosity > 1:
print "Resetting sequences"
for line in sequence_sql:
cursor.execute(line)
if commit:
transaction.commit()
transaction.leave_transaction_management()
if object_count == 0:
if verbosity > 1:
print "No fixtures found."
else:
if verbosity > 0:
print "Installed %d object(s) from %d fixture(s)" % (object_count, fixture_count)
# Close the DB connection. This is required as a workaround for an
# edge case in MySQL: if the same connection is used to
# create tables, load data, and query, the query can return
# incorrect results. See Django #7572, MySQL #37735.
if commit:
connection.close()
|
SystemExit
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/core/management/commands/loaddata.py/Command.handle
|
4,077 |
def as_tuple(item, type=None, length=None):
# Empty list if we get passed None
if item is None:
t = ()
else:
# Convert iterable to list...
try:
t = tuple(item)
# ... or create a list of a single item
except (__HOLE__, NotImplementedError):
t = (item,) * (length or 1)
if configuration["type_check"]:
if length and not len(t) == length:
raise ValueError("Tuple needs to be of length %d" % length)
if type and not all(isinstance(i, type) for i in t):
raise TypeError("Items need to be of type %s" % type)
return t
|
TypeError
|
dataset/ETHPy150Open OP2/PyOP2/pyop2/utils.py/as_tuple
|
4,078 |
def as_type(obj, typ):
"""Return obj if it is of dtype typ, otherwise return a copy type-cast to
typ."""
# Assume it's a NumPy data type
try:
return obj if obj.dtype == typ else obj.astype(typ)
except __HOLE__:
if isinstance(obj, int):
return np.int64(obj).astype(typ)
elif isinstance(obj, float):
return np.float64(obj).astype(typ)
else:
raise TypeError("Invalid type %s" % type(obj))
|
AttributeError
|
dataset/ETHPy150Open OP2/PyOP2/pyop2/utils.py/as_type
|
4,079 |
def tuplify(xs):
"""Turn a data structure into a tuple tree."""
try:
return tuple(tuplify(x) for x in xs)
except __HOLE__:
return xs
|
TypeError
|
dataset/ETHPy150Open OP2/PyOP2/pyop2/utils.py/tuplify
|
4,080 |
def check_args(self, args, kwargs):
for argname, argcond, exception in self._checks:
# If the argument argname is not present in the decorated function
# silently ignore it
try:
i = self.varnames.index(argname)
except __HOLE__:
# No formal parameter argname
continue
# Try the argument by keyword first, and by position second.
# If the argument isn't given, silently ignore it.
try:
arg = kwargs.get(argname)
arg = arg or args[i]
except IndexError:
# No actual parameter argname
continue
# If the argument has a default value, also accept that (since the
# constructor will be able to deal with that)
default_index = i - self.nargs + len(self.defaults)
if default_index >= 0 and arg == self.defaults[default_index]:
continue
self.check_arg(arg, argcond, exception)
|
ValueError
|
dataset/ETHPy150Open OP2/PyOP2/pyop2/utils.py/validate_base.check_args
|
4,081 |
def check_arg(self, arg, ignored, exception):
try:
np.dtype(arg)
except __HOLE__:
raise exception("%s:%d %s must be a valid dtype"
% (self.file, self.line, arg))
|
TypeError
|
dataset/ETHPy150Open OP2/PyOP2/pyop2/utils.py/validate_dtype.check_arg
|
4,082 |
def verify_reshape(data, dtype, shape, allow_none=False):
"""Verify data is of type dtype and try to reshaped to shape."""
try:
t = np.dtype(dtype) if dtype is not None else None
except __HOLE__:
raise DataTypeError("Invalid data type: %s" % dtype)
if data is None and allow_none:
return np.asarray([], dtype=t)
elif data is None:
raise DataValueError("Invalid data: None is not allowed!")
else:
try:
a = np.asarray(data, dtype=t)
except ValueError:
raise DataValueError("Invalid data: cannot convert to %s!" % dtype)
try:
# Destructively modify shape. Fails if data are not
# contiguous, but that's what we want anyway.
a.shape = shape
return a
except ValueError:
raise DataValueError("Invalid data: expected %d values, got %d!" %
(np.prod(shape), np.asarray(data).size))
|
TypeError
|
dataset/ETHPy150Open OP2/PyOP2/pyop2/utils.py/verify_reshape
|
4,083 |
def get_petsc_dir():
try:
arch = '/' + os.environ.get('PETSC_ARCH', '')
dir = os.environ['PETSC_DIR']
return (dir, dir + arch)
except __HOLE__:
try:
import petsc
return (petsc.get_petsc_dir(), )
except ImportError:
sys.exit("""Error: Could not find PETSc library.
Set the environment variable PETSC_DIR to your local PETSc base
directory or install PETSc from PyPI: pip install petsc""")
|
KeyError
|
dataset/ETHPy150Open OP2/PyOP2/pyop2/utils.py/get_petsc_dir
|
4,084 |
def SendStartRequest(self):
"""If the process has not been started, sends a request to /_ah/start."""
if self.started:
return
try:
response = self.SendRequest('GET', '/_ah/start')
rc = response.status
if (rc >= 200 and rc < 300) or rc == 404:
self.started = True
except __HOLE__:
pass
except Exception, e:
logging.error('Failed start request to %s: %s', self, e)
|
KeyboardInterrupt
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/tools/dev_appserver_multiprocess.py/ChildProcess.SendStartRequest
|
4,085 |
def PosixShutdown():
"""Kills a posix process with os.kill."""
dev_process = GlobalProcess()
children = dev_process.Children()
for term_signal in (signal.SIGTERM, signal.SIGKILL):
for child in children:
if child.process is None:
continue
if child.process.returncode is not None:
continue
pid = child.process.pid
try:
logging.debug('posix kill %d with signal %d', pid, term_signal)
os.kill(pid, term_signal)
except OSError, err:
logging.error('Error encountered sending pid %d signal %d:%s\n',
pid, term_signal, err)
break
time.sleep(0.2)
for child in children:
if child.process is None:
continue
if child.process.returncode is not None:
continue
try:
child.process.wait()
except __HOLE__, e:
if e.errno != errno.ECHILD:
raise e
|
OSError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/tools/dev_appserver_multiprocess.py/PosixShutdown
|
4,086 |
def __init__(self):
try:
webanalytics_app = settings.WEBANALYTICS_APP
if webanalytics_app == 'PIWIK':
self.process_response = self.insert_piwik
elif webanalytics_app == 'GOOGLE_ANALYTICS':
self.process_response = self.insert_google_analytics
else:
self.process_response = self.return_unaltered
except __HOLE__:
self.process_response = self.return_unaltered
|
AttributeError
|
dataset/ETHPy150Open rennerocha/dojopuzzles/dojopuzzles/webanalytics/middleware.py/WebAnalyticsMiddleware.__init__
|
4,087 |
def lazyproperty(f):
"""
@lazyprop decorator. Decorated method will be called only on first access
to calculate a cached property value. After that, the cached value is
returned.
"""
cache_attr_name = '_%s' % f.__name__ # like '_foobar' for prop 'foobar'
docstring = f.__doc__
def get_prop_value(obj):
try:
return getattr(obj, cache_attr_name)
except __HOLE__:
value = f(obj)
setattr(obj, cache_attr_name, value)
return value
return property(get_prop_value, doc=docstring)
|
AttributeError
|
dataset/ETHPy150Open scanny/python-pptx/pptx/util.py/lazyproperty
|
4,088 |
def clear_check(self):
ch = self._clear_hook
try:
wr = list(ch)[0]
except __HOLE__:
self.clear_setup()
else:
c = wr()
if c is None:
self.clear_setup()
elif self._root.sys.getrefcount(c) > 3:
print 'GC hook object was referred to from somebody!'
self.clear_callback(wr)
c.cb.callback = None
|
IndexError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/View.py/_GLUECLAMP_.clear_check
|
4,089 |
def obj_at(self, addr):
try:
return self.immnodeset(self.hv.static_types).obj_at(addr)
except __HOLE__:
pass
try:
return self.immnodeset(self.gc.get_objects()).obj_at(addr)
except ValueError:
pass
try:
return self.immnodeset(self.hv.heap()).obj_at(addr)
except ValueError:
raise ValueError, 'No object found at address %s'%hex(addr)
|
ValueError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/View.py/_GLUECLAMP_.obj_at
|
4,090 |
@staticmethod
def _infimum_key(expr):
"""
Return infimum (if possible) else S.Infinity.
"""
try:
infimum = expr.inf
assert infimum.is_comparable
except (NotImplementedError,
AttributeError, AssertionError, __HOLE__):
infimum = S.Infinity
return infimum
|
ValueError
|
dataset/ETHPy150Open sympy/sympy/sympy/sets/sets.py/Set._infimum_key
|
4,091 |
def _contains(self, element):
"""
'in' operator for ProductSets
Examples
========
>>> from sympy import Interval
>>> (2, 3) in Interval(0, 5) * Interval(0, 5)
True
>>> (10, 10) in Interval(0, 5) * Interval(0, 5)
False
Passes operation on to constituent sets
"""
try:
if len(element) != len(self.args):
return false
except __HOLE__: # maybe element isn't an iterable
return false
return And(*
[set.contains(item) for set, item in zip(self.sets, element)])
|
TypeError
|
dataset/ETHPy150Open sympy/sympy/sympy/sets/sets.py/ProductSet._contains
|
4,092 |
def _eval_imageset(self, f):
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.solvers.solveset import solveset
from sympy.core.function import diff, Lambda
from sympy.series import limit
from sympy.calculus.singularities import singularities
# TODO: handle functions with infinitely many solutions (eg, sin, tan)
# TODO: handle multivariate functions
expr = f.expr
if len(expr.free_symbols) > 1 or len(f.variables) != 1:
return
var = f.variables[0]
if expr.is_Piecewise:
result = S.EmptySet
domain_set = self
for (p_expr, p_cond) in expr.args:
if p_cond is true:
intrvl = domain_set
else:
intrvl = p_cond.as_set()
intrvl = Intersection(domain_set, intrvl)
if p_expr.is_Number:
image = FiniteSet(p_expr)
else:
image = imageset(Lambda(var, p_expr), intrvl)
result = Union(result, image)
# remove the part which has been `imaged`
domain_set = Complement(domain_set, intrvl)
if domain_set.is_EmptySet:
break
return result
if not self.start.is_comparable or not self.end.is_comparable:
return
try:
sing = [x for x in singularities(expr, var)
if x.is_real and x in self]
except __HOLE__:
return
if self.left_open:
_start = limit(expr, var, self.start, dir="+")
elif self.start not in sing:
_start = f(self.start)
if self.right_open:
_end = limit(expr, var, self.end, dir="-")
elif self.end not in sing:
_end = f(self.end)
if len(sing) == 0:
solns = list(solveset(diff(expr, var), var))
extr = [_start, _end] + [f(x) for x in solns
if x.is_real and x in self]
start, end = Min(*extr), Max(*extr)
left_open, right_open = False, False
if _start <= _end:
# the minimum or maximum value can occur simultaneously
# on both the edge of the interval and in some interior
# point
if start == _start and start not in solns:
left_open = self.left_open
if end == _end and end not in solns:
right_open = self.right_open
else:
if start == _end and start not in solns:
left_open = self.right_open
if end == _start and end not in solns:
right_open = self.left_open
return Interval(start, end, left_open, right_open)
else:
return imageset(f, Interval(self.start, sing[0],
self.left_open, True)) + \
Union(*[imageset(f, Interval(sing[i], sing[i + 1], True, True))
for i in range(0, len(sing) - 1)]) + \
imageset(f, Interval(sing[-1], self.end, True, self.right_open))
|
NotImplementedError
|
dataset/ETHPy150Open sympy/sympy/sympy/sets/sets.py/Interval._eval_imageset
|
4,093 |
def __iter__(self):
import itertools
# roundrobin recipe taken from itertools documentation:
# https://docs.python.org/2/library/itertools.html#recipes
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
if PY3:
nexts = itertools.cycle(iter(it).__next__ for it in iterables)
else:
nexts = itertools.cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except __HOLE__:
pending -= 1
nexts = itertools.cycle(itertools.islice(nexts, pending))
if all(set.is_iterable for set in self.args):
return roundrobin(*(iter(arg) for arg in self.args))
else:
raise TypeError("Not all constituent sets are iterable")
|
StopIteration
|
dataset/ETHPy150Open sympy/sympy/sympy/sets/sets.py/Union.__iter__
|
4,094 |
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
try:
parents = [b for b in bases if issubclass(b, ModelDataset)]
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except __HOLE__:
pass
for field_name, obj in attrs.copy().items():
if issubclass(type(obj), Field):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DatasetMetaclass, cls).__new__(cls, name,
bases, attrs)
opts = new_class._meta = DatasetOptions(getattr(new_class,
'Meta', None))
if new_class.__name__ == 'ModelDataset':
return new_class
if not opts.model and not opts.queryset:
raise NoObjectsException("You must set a model or non-empty "
"queryset for each Dataset subclass")
if opts.queryset is not None:
queryset = opts.queryset
model = queryset.model
new_class.queryset = queryset
new_class.model = model
else:
model = opts.model
queryset = model.objects.all()
new_class.model = model
new_class.queryset = queryset
return new_class
|
NameError
|
dataset/ETHPy150Open joshourisman/django-tablib/django_tablib/models.py/DatasetMetaclass.__new__
|
4,095 |
def setUp(self):
cs = 10.
ncx, ncy, ncz = 10, 10, 10
npad = 4
freq = 1e2
hx = [(cs,npad,-1.3), (cs,ncx), (cs,npad,1.3)]
hy = [(cs,npad,-1.3), (cs,ncy), (cs,npad,1.3)]
hz = [(cs,npad,-1.3), (cs,ncz), (cs,npad,1.3)]
mesh = Mesh.TensorMesh([hx,hy,hz], 'CCC')
mapping = Maps.ExpMap(mesh)
x = np.linspace(-10,10,5)
XYZ = Utils.ndgrid(x,np.r_[0],np.r_[0])
rxList = EM.FDEM.Rx(XYZ, 'exi')
Src0 = EM.FDEM.Src.MagDipole([rxList],loc=np.r_[0.,0.,0.], freq=freq)
survey = EM.FDEM.Survey([Src0])
prb = EM.FDEM.Problem_b(mesh, mapping=mapping)
prb.pair(survey)
try:
from pymatsolver import MumpsSolver
prb.Solver = MumpsSolver
except __HOLE__, e:
prb.Solver = SolverLU
sig = 1e-1
sigma = np.ones(mesh.nC)*sig
sigma[mesh.gridCC[:,2] > 0] = 1e-8
m = np.log(sigma)
self.prb = prb
self.mesh = mesh
self.m = m
self.Src0 = Src0
self.sig = sig
|
ImportError
|
dataset/ETHPy150Open simpeg/simpeg/tests/em/fdem/forward/test_FDEM_analytics.py/FDEM_analyticTests.setUp
|
4,096 |
def __init__(self, filebasename, mode):
self._mode = mode
# The directory file is a text file. Each line looks like
# "%r, (%d, %d)\n" % (key, pos, siz)
# where key is the string key, pos is the offset into the dat
# file of the associated value's first byte, and siz is the number
# of bytes in the associated value.
self._dirfile = filebasename + _os.extsep + 'dir'
# The data file is a binary file pointed into by the directory
# file, and holds the values associated with keys. Each value
# begins at a _BLOCKSIZE-aligned byte offset, and is a raw
# binary 8-bit string value.
self._datfile = filebasename + _os.extsep + 'dat'
self._bakfile = filebasename + _os.extsep + 'bak'
# The index is an in-memory dict, mirroring the directory file.
self._index = None # maps keys to (pos, siz) pairs
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except __HOLE__:
f = _open(self._datfile, 'w')
self._chmod(self._datfile)
f.close()
self._update()
# Read directory file into the in-memory index dict.
|
IOError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/dumbdbm.py/_Database.__init__
|
4,097 |
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except __HOLE__:
pass
else:
for line in f:
line = line.rstrip()
key, pos_and_siz_pair = eval(line)
self._index[key] = pos_and_siz_pair
f.close()
# Write the index dict to the directory file. The original directory
# file (if any) is renamed with a .bak extension first. If a .bak
# file currently exists, it's deleted.
|
IOError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/dumbdbm.py/_Database._update
|
4,098 |
def open(file, flag=None, mode=0666):
"""Open the database file, filename, and return corresponding object.
The flag argument, used to control how the database is opened in the
other DBM implementations, is ignored in the dumbdbm module; the
database is always opened for update, and will be created if it does
not exist.
The optional mode argument is the UNIX mode of the file, used only when
the database has to be created. It defaults to octal code 0666 (and
will be modified by the prevailing umask).
"""
# flag argument is currently ignored
# Modify mode depending on the umask
try:
um = _os.umask(0)
_os.umask(um)
except __HOLE__:
pass
else:
# Turn off any bits that are set in the umask
mode = mode & (~um)
return _Database(file, mode)
|
AttributeError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/dumbdbm.py/open
|
4,099 |
def find_errors(self, output):
"""
Convert flow's json output into a set of matches SublimeLinter can process.
I'm not sure why find_errors isn't exposed in SublimeLinter's docs, but
this would normally attempt to parse a regex and then return a generator
full of sanitized matches. Instead, this implementation returns a list
of errors processed by _error_to_tuple, ready for SublimeLinter to unpack
"""
try:
# calling flow in a matching syntax without a `flowconfig` will cause the
# output of flow to be an error message. catch and return []
parsed = json.loads(output)
except __HOLE__:
persist.debug('flow {}'.format(output))
return []
errors = parsed.get('errors', [])
persist.debug('flow {} errors. passed: {}'.format(len(errors), parsed.get('passed', True)))
return map(self._error_to_tuple, errors)
|
ValueError
|
dataset/ETHPy150Open SublimeLinter/SublimeLinter-flow/linter.py/Flow.find_errors
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.