Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
5,300 |
def get_sha(a_file):
"""
Returns sha1 hash of the file supplied as an argument
"""
try:
BLOCKSIZE = 65536
hasher = hashlib.sha1()
with io.open(a_file, "rb") as fh:
buf = fh.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = fh.read(BLOCKSIZE)
the_hash = hasher.hexdigest()
except __HOLE__:
errmes = "File '{}' could not be read! Exiting!\n".format(a_file)
sys.stdout.write(errmes)
sys.exit(1)
except:
errmes = "Unspecified error returning sha1 hash. Exiting!\n"
sys.stdout.write(errmes)
sys.exit(1)
return the_hash
|
IOError
|
dataset/ETHPy150Open tonyfischetti/sake/functests/test3/functest.py/get_sha
|
5,301 |
def GetValue(self, ignore_error=True):
"""Extracts and returns a single value from a DataBlob."""
if self.HasField("none"):
return None
field_names = ["integer", "string", "data", "boolean", "list", "dict",
"rdf_value", "float"]
values = [getattr(self, x) for x in field_names if self.HasField(x)]
if len(values) != 1:
return None
if self.HasField("boolean"):
return bool(values[0])
# Unpack RDFValues.
if self.HasField("rdf_value"):
try:
return rdfvalue.RDFValue.classes[self.rdf_value.name](
initializer=self.rdf_value.data,
age=self.rdf_value.age)
except (__HOLE__, KeyError) as e:
if ignore_error:
return e
raise
elif self.HasField("list"):
return [x.GetValue() for x in self.list.content]
else:
return values[0]
|
ValueError
|
dataset/ETHPy150Open google/grr/grr/lib/rdfvalues/protodict.py/DataBlob.GetValue
|
5,302 |
def GetItem(self, key, default=None):
try:
return self[key]
except __HOLE__:
return default
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/rdfvalues/protodict.py/Dict.GetItem
|
5,303 |
def __init__(self, initializer=None, age=None):
super(RDFValueArray, self).__init__(age=age)
if self.__class__ == initializer.__class__:
self.content = initializer.Copy().content
self.age = initializer.age
# Initialize from a serialized protobuf.
elif isinstance(initializer, str):
self.ParseFromString(initializer)
else:
try:
for item in initializer:
self.Append(item)
except __HOLE__:
if initializer is not None:
raise rdfvalue.InitializeError(
"%s can not be initialized from %s" % (
self.__class__.__name__, type(initializer)))
|
TypeError
|
dataset/ETHPy150Open google/grr/grr/lib/rdfvalues/protodict.py/RDFValueArray.__init__
|
5,304 |
def Append(self, value=None, **kwarg):
"""Add another member to the array.
Args:
value: The new data to append to the array.
**kwarg: Create a new element from these keywords.
Returns:
The value which was added. This can be modified further by the caller and
changes will be propagated here.
Raises:
ValueError: If the value to add is not allowed.
"""
if self.rdf_type is not None:
if (isinstance(value, rdfvalue.RDFValue) and
value.__class__ != self.rdf_type):
raise ValueError("Can only accept %s" % self.rdf_type)
try:
# Try to coerce the value.
value = self.rdf_type(value, **kwarg) # pylint: disable=not-callable
except (__HOLE__, ValueError):
raise ValueError("Unable to initialize %s from type %s" % (
self.__class__.__name__, type(value)))
self.content.Append(DataBlob().SetValue(value))
|
TypeError
|
dataset/ETHPy150Open google/grr/grr/lib/rdfvalues/protodict.py/RDFValueArray.Append
|
5,305 |
@property
def payload(self):
"""Extracts and returns the serialized object."""
try:
rdf_cls = self.classes.get(self.name)
value = rdf_cls(self.data)
value.age = self.embedded_age
return value
except __HOLE__:
return None
|
TypeError
|
dataset/ETHPy150Open google/grr/grr/lib/rdfvalues/protodict.py/EmbeddedRDFValue.payload
|
5,306 |
def __init__(self, v):
if isinstance(v, solr_date):
self._dt_obj = v._dt_obj
elif isinstance(v, basestring):
try:
self._dt_obj = datetime_from_w3_datestring(v)
except __HOLE__, e:
raise SolrError(*e.args)
elif hasattr(v, "strftime"):
self._dt_obj = self.from_date(v)
else:
raise SolrError("Cannot initialize solr_date from %s object"
% type(v))
|
ValueError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/solr_date.__init__
|
5,307 |
def __cmp__(self, other):
try:
other = other._dt_obj
except __HOLE__:
pass
if self._dt_obj < other:
return -1
elif self._dt_obj > other:
return 1
else:
return 0
|
AttributeError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/solr_date.__cmp__
|
5,308 |
def solr_point_factory(dimension):
if dimension < 1:
raise ValueError("dimension of PointType must be greater than one")
class solr_point(object):
dim = int(dimension)
def __init__(self, *args):
if dimension > 1 and len(args) == 1:
v = args[0]
if isinstance(v, basestring):
v_arr = v.split(',')
else:
try:
v_arr = list(v)
except __HOLE__:
raise ValueError("bad value provided for point list")
else:
v_arr = args
if len(v_arr) != self.dim:
raise ValueError("point has wrong number of dimensions")
self.point = tuple(float(v) for v in v_arr)
def __repr__(self):
return "solr_point(%s)" % unicode(self)
def __unicode__(self):
return ','.join(str(p) for p in self.point)
return solr_point
|
TypeError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/solr_point_factory
|
5,309 |
def from_user_data(self, value):
try:
return str(value)
except (__HOLE__, ValueError):
raise SolrError("Could not convert data to binary string (field %s)" %
self.name)
|
TypeError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/SolrBinaryField.from_user_data
|
5,310 |
def normalize(self, value):
try:
v = self.base_type(value)
except (OverflowError, __HOLE__, ValueError):
raise SolrError("%s is invalid value for %s (field %s)" %
(value, self.__class__, self.name))
if v < self.min or v > self.max:
raise SolrError("%s out of range for a %s (field %s)" %
(value, self.__class__, self.name))
return v
|
TypeError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/SolrNumericalField.normalize
|
5,311 |
def field_type_factory(self, field_type_node):
try:
name, class_name = field_type_node.attrib['name'], field_type_node.attrib['class']
except __HOLE__, e:
raise SolrError("Invalid schema.xml: missing %s attribute on fieldType" % e.args[0])
#Obtain field type for given class. Defaults to generic SolrField.
field_class = self.solr_data_types.get(class_name, SolrField)
return name, SolrFieldTypeFactory(field_class,
**self.translate_attributes(field_type_node.attrib))
|
KeyError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/SolrSchema.field_type_factory
|
5,312 |
def field_factory(self, field_node, field_type_classes, dynamic):
try:
name, field_type = field_node.attrib['name'], field_node.attrib['type']
except KeyError, e:
raise SolrError("Invalid schema.xml: missing %s attribute on field" % e.args[0])
try:
field_type_class = field_type_classes[field_type]
except __HOLE__, e:
raise SolrError("Invalid schema.xml: %s field_type undefined" % field_type)
return name, field_type_class(dynamic=dynamic,
**self.translate_attributes(field_node.attrib))
# From XML Datatypes
|
KeyError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/SolrSchema.field_factory
|
5,313 |
def match_dynamic_field(self, name):
try:
return self.dynamic_field_cache[name]
except __HOLE__:
for field in self.dynamic_fields:
if field.match(name):
self.dynamic_field_cache[name] = field
return field
|
KeyError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/SolrSchema.match_dynamic_field
|
5,314 |
def match_field(self, name):
try:
return self.fields[name]
except __HOLE__:
field = self.match_dynamic_field(name)
return field
|
KeyError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/SolrSchema.match_field
|
5,315 |
def doc_id_from_doc(self, doc):
# Is this a dictionary, or an document object, or a thing
# that can be cast to a uniqueKey? (which could also be an
# arbitrary object.
if isinstance(doc, (basestring, int, long, float)):
# It's obviously not a document object, just coerce to appropriate type
doc_id = doc
elif hasattr(doc, "items"):
# It's obviously a dictionary
try:
doc_id = doc[self.schema.unique_key]
except __HOLE__:
raise SolrError("No unique key on this document")
else:
doc_id = get_attribute_or_callable(doc, self.schema.unique_key)
if doc_id is None:
# Well, we couldn't get an ID from it; let's try
# coercing the doc to the type of an ID field.
doc_id = doc
try:
doc_id_inst = self.schema.unique_field.instance_from_user_data(doc_id)
except SolrError:
raise SolrError("Could not parse argument as object or document id")
return doc_id_inst
|
KeyError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/SolrDelete.doc_id_from_doc
|
5,316 |
@classmethod
def from_response_json(cls, response):
try:
facet_counts_dict = response['facet_counts']
except __HOLE__:
return SolrFacetCounts()
facet_fields = {}
for facet_field, facet_values in facet_counts_dict['facet_fields'].viewitems():
facets = []
# Change each facet list from [a, 1, b, 2, c, 3 ...] to
# [(a, 1), (b, 2), (c, 3) ...]
for n, value in enumerate(facet_values):
if n&1 == 0:
name = value
else:
facets.append((name, value))
facet_fields[facet_field] = facets
facet_counts_dict['facet_fields'] = facet_fields
return SolrFacetCounts(**facet_counts_dict)
|
KeyError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/SolrFacetCounts.from_response_json
|
5,317 |
def get_attribute_or_callable(o, name):
try:
a = getattr(o, name)
# Might be attribute or callable
if callable(a):
try:
a = a()
except TypeError:
a = None
except __HOLE__:
a = None
return a
|
AttributeError
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/schema.py/get_attribute_or_callable
|
5,318 |
def event_attach(self, eventtype, callback, *args, **kwds):
"""Register an event notification.
@param eventtype: the desired event type to be notified about.
@param callback: the function to call when the event occurs.
@param args: optional positional arguments for the callback.
@param kwds: optional keyword arguments for the callback.
@return: 0 on success, ENOMEM on error.
@note: The callback function must have at least one argument,
an Event instance. Any other, optional positional and keyword
arguments are in B{addition} to the first one.
"""
if not isinstance(eventtype, EventType):
raise VLCException("%s required: %r" % ('EventType', eventtype))
if not hasattr(callback, '__call__'): # callable()
raise VLCException("%s required: %r" % ('callable', callback))
# check that the callback expects arguments
if not any(getargspec(callback)[:2]): # list(...)
raise VLCException("%s required: %r" % ('argument', callback))
if self._callback_handler is None:
_called_from_ctypes = ctypes.CFUNCTYPE(None, ctypes.POINTER(Event), ctypes.c_void_p)
@_called_from_ctypes
def _callback_handler(event, k):
"""(INTERNAL) handle callback call from ctypes.
@note: We cannot simply make this an EventManager
method since ctypes does not prepend self as the
first parameter, hence this closure.
"""
try: # retrieve Python callback and arguments
call, args, kwds = self._callbacks[k]
# deref event.contents to simplify callback code
call(event.contents, *args, **kwds)
except __HOLE__: # detached?
pass
self._callback_handler = _callback_handler
self._callbacks = {}
k = eventtype.value
r = libvlc_event_attach(self, k, self._callback_handler, k)
if not r:
self._callbacks[k] = (callback, args, kwds)
return r
|
KeyError
|
dataset/ETHPy150Open disqus/playa/playa/lib/vlc.py/EventManager.event_attach
|
5,319 |
def hex_version():
"""Return the version of these bindings in hex or 0 if unavailable.
"""
try:
return _dot2int(__version__.split('-')[-1])
except (NameError, __HOLE__):
return 0
|
ValueError
|
dataset/ETHPy150Open disqus/playa/playa/lib/vlc.py/hex_version
|
5,320 |
def libvlc_hex_version():
"""Return the libvlc version in hex or 0 if unavailable.
"""
try:
return _dot2int(libvlc_get_version().split()[0])
except __HOLE__:
return 0
|
ValueError
|
dataset/ETHPy150Open disqus/playa/playa/lib/vlc.py/libvlc_hex_version
|
5,321 |
def test_bad_commands(self):
sources_cache = get_cache_dir()
cmd_extras = ['-c', sources_cache]
for commands in [[], ['fake', 'something'], ['check'], ['install', '-a', 'rospack_fake'],
['check', 'rospack_fake', '--os', 'ubuntulucid'],
]:
try:
rosdep_main(commands+cmd_extras)
assert False, "system exit should have occurred"
except __HOLE__:
pass
|
SystemExit
|
dataset/ETHPy150Open ros-infrastructure/rosdep/test/test_rosdep_main.py/TestRosdepMain.test_bad_commands
|
5,322 |
def test_check(self):
sources_cache = get_cache_dir()
cmd_extras = ['-c', sources_cache]
with fakeout() as b:
try:
rosdep_main(['check', 'python_dep']+cmd_extras)
except SystemExit:
assert False, "system exit occurred: %s\n%s"%(b[0].getvalue(), b[1].getvalue())
stdout, stderr = b
assert stdout.getvalue().strip() == "All system dependencies have been satisified", stdout.getvalue()
assert not stderr.getvalue(), stderr.getvalue()
try:
osd = rospkg.os_detect.OsDetect()
override = "%s:%s"%(osd.get_name(), osd.get_codename())
with fakeout() as b:
rosdep_main(['check', 'python_dep', '--os', override]+cmd_extras)
stdout, stderr = b
assert stdout.getvalue().strip() == "All system dependencies have been satisified"
assert not stderr.getvalue(), stderr.getvalue()
except SystemExit:
assert False, "system exit occurred"
# this used to abort, but now rosdep assumes validity for even empty stack args
try:
with fakeout() as b:
rosdep_main(['check', 'packageless']+cmd_extras)
stdout, stderr = b
assert stdout.getvalue().strip() == "All system dependencies have been satisified"
assert not stderr.getvalue(), stderr.getvalue()
except SystemExit:
assert False, "system exit occurred"
try:
rosdep_main(['check', 'nonexistent']+cmd_extras)
assert False, "system exit should have occurred"
except __HOLE__:
pass
|
SystemExit
|
dataset/ETHPy150Open ros-infrastructure/rosdep/test/test_rosdep_main.py/TestRosdepMain.test_check
|
5,323 |
def test_install(self):
sources_cache = get_cache_dir()
cmd_extras = ['-c', sources_cache]
try:
# python must have already been installed
with fakeout() as b:
rosdep_main(['install', 'python_dep']+cmd_extras)
stdout, stderr = b
assert "All required rosdeps installed" in stdout.getvalue(), stdout.getvalue()
assert not stderr.getvalue(), stderr.getvalue()
with fakeout() as b:
rosdep_main(['install', 'python_dep', '-r']+cmd_extras)
stdout, stderr = b
assert "All required rosdeps installed" in stdout.getvalue(), stdout.getvalue()
assert not stderr.getvalue(), stderr.getvalue()
except __HOLE__:
assert False, "system exit occurred: "+b[1].getvalue()
try:
rosdep_main(['check', 'nonexistent'])
assert False, "system exit should have occurred"
except SystemExit:
pass
|
SystemExit
|
dataset/ETHPy150Open ros-infrastructure/rosdep/test/test_rosdep_main.py/TestRosdepMain.test_install
|
5,324 |
def test_where_defined(self):
try:
sources_cache = get_cache_dir()
expected = GITHUB_PYTHON_URL
for command in (['where_defined', 'testpython'], ['where_defined', 'testpython']):
with fakeout() as b:
# set os to ubuntu so this test works on different platforms
rosdep_main(command + ['-c', sources_cache, '--os=ubuntu:lucid'])
stdout, stderr = b
output = stdout.getvalue().strip()
assert output == expected, output
except __HOLE__:
assert False, "system exit occurred"
|
SystemExit
|
dataset/ETHPy150Open ros-infrastructure/rosdep/test/test_rosdep_main.py/TestRosdepMain.test_where_defined
|
5,325 |
def test_what_needs(self):
try:
sources_cache = get_cache_dir()
cmd_extras = ['-c', sources_cache]
expected = ['python_dep']
with fakeout() as b:
rosdep_main(['what-needs', 'testpython']+cmd_extras)
stdout, stderr = b
output = stdout.getvalue().strip()
assert output.split('\n') == expected
expected = ['python_dep']
with fakeout() as b:
rosdep_main(['what_needs', 'testpython', '--os', 'ubuntu:lucid', '--verbose']+cmd_extras)
stdout, stderr = b
output = stdout.getvalue().strip()
assert output.split('\n') == expected
except __HOLE__:
assert False, "system exit occurred"
|
SystemExit
|
dataset/ETHPy150Open ros-infrastructure/rosdep/test/test_rosdep_main.py/TestRosdepMain.test_what_needs
|
5,326 |
def test_keys(self):
sources_cache = get_cache_dir()
cmd_extras = ['-c', sources_cache]
try:
with fakeout() as b:
rosdep_main(['keys', 'rospack_fake']+cmd_extras)
stdout, stderr = b
assert stdout.getvalue().strip() == "testtinyxml", stdout.getvalue()
assert not stderr.getvalue(), stderr.getvalue()
with fakeout() as b:
rosdep_main(['keys', 'rospack_fake', '--os', 'ubuntu:lucid', '--verbose']+cmd_extras)
stdout, stderr = b
assert stdout.getvalue().strip() == "testtinyxml", stdout.getvalue()
except __HOLE__:
assert False, "system exit occurred"
try:
rosdep_main(['keys', 'nonexistent']+cmd_extras)
assert False, "system exit should have occurred"
except SystemExit:
pass
|
SystemExit
|
dataset/ETHPy150Open ros-infrastructure/rosdep/test/test_rosdep_main.py/TestRosdepMain.test_keys
|
5,327 |
def _handle(self, expression, command, x=0, y=0):
"""
:param expression: the expression to handle
:param command: the function to apply, either _draw_command or _visit_command
:param x: the top of the current drawing area
:param y: the left side of the current drawing area
:return: the bottom-rightmost point
"""
if command == self._visit_command:
#if we don't need to draw the item, then we can use the cached values
try:
#attempt to retrieve cached values
right = expression._drawing_width + x
bottom = expression._drawing_height + y
return (right, bottom)
except __HOLE__:
#the values have not been cached yet, so compute them
pass
if isinstance(expression, DrtAbstractVariableExpression):
factory = self._handle_VariableExpression
elif isinstance(expression, DRS):
factory = self._handle_DRS
elif isinstance(expression, DrtNegatedExpression):
factory = self._handle_NegatedExpression
elif isinstance(expression, DrtLambdaExpression):
factory = self._handle_LambdaExpression
elif isinstance(expression, BinaryExpression):
factory = self._handle_BinaryExpression
elif isinstance(expression, DrtApplicationExpression):
factory = self._handle_ApplicationExpression
elif isinstance(expression, PossibleAntecedents):
factory = self._handle_VariableExpression
elif isinstance(expression, DrtProposition):
factory = self._handle_DrtProposition
else:
raise Exception(expression.__class__.__name__)
(right, bottom) = factory(expression, command, x, y)
#cache the values
expression._drawing_width = right - x
expression._drawing_height = bottom - y
return (right, bottom)
|
AttributeError
|
dataset/ETHPy150Open nltk/nltk/nltk/sem/drt.py/DrsDrawer._handle
|
5,328 |
def test_draw():
try:
from tkinter import Tk
except __HOLE__:
from nose import SkipTest
raise SkipTest("tkinter is required, but it's not available.")
expressions = [
r'x',
r'([],[])',
r'([x],[])',
r'([x],[man(x)])',
r'([x,y],[sees(x,y)])',
r'([x],[man(x), walks(x)])',
r'\x.([],[man(x), walks(x)])',
r'\x y.([],[sees(x,y)])',
r'([],[(([],[walks(x)]) + ([],[runs(x)]))])',
r'([x],[man(x), -([],[walks(x)])])',
r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])'
]
for e in expressions:
d = DrtExpression.fromstring(e)
d.draw()
|
ImportError
|
dataset/ETHPy150Open nltk/nltk/nltk/sem/drt.py/test_draw
|
5,329 |
def is_expected(item):
with open(item) as f:
expect(f).to_be_a_file()
expect(item).to_be_a_file()
try:
expect(item).not_to_be_a_file()
except __HOLE__:
return
assert False, 'Should not have gotten this far'
|
AssertionError
|
dataset/ETHPy150Open heynemann/preggy/tests/types/test_file.py/is_expected
|
5,330 |
def is_not_expected(item):
expect(item).Not.to_be_a_file()
expect(item).not_to_be_a_file()
try:
expect(item).to_be_a_file()
except __HOLE__:
return
assert False, 'Should not have gotten this far'
#-----------------------------------------------------------------------------
|
AssertionError
|
dataset/ETHPy150Open heynemann/preggy/tests/types/test_file.py/is_not_expected
|
5,331 |
def test_is_not_file_obj():
with open('./README.md') as f:
try:
expect(f).not_to_be_a_file()
except __HOLE__:
return
assert False, 'Should not have gotten this far'
|
AssertionError
|
dataset/ETHPy150Open heynemann/preggy/tests/types/test_file.py/test_is_not_file_obj
|
5,332 |
def test_other_entities_are_not_deleted(self):
gazette = GazetteItemFactory()
entity1 = EntityFactory()
entity2 = EntityFactory()
gazette.save()
entity1.gazette = gazette
entity1.save()
entity2.save()
entity1_id = entity1.id
entity2_id = entity2.id
gazette.delete()
with self.assertRaises(ObjectDoesNotExist):
Entity.objects.get(pk=entity1_id)
try:
Entity.objects.get(pk=entity2_id)
except __HOLE__:
self.fail("Entity does not exists and it should")
|
ObjectDoesNotExist
|
dataset/ETHPy150Open machinalis/iepy/tests/test_entity.py/TestEntityGazetteRelation.test_other_entities_are_not_deleted
|
5,333 |
def lookup_object(model, object_id, slug, slug_field):
"""
Return the ``model`` object with the passed ``object_id``. If
``object_id`` is None, then return the object whose ``slug_field``
equals the passed ``slug``. If ``slug`` and ``slug_field`` are not passed,
then raise Http404 exception.
"""
lookup_kwargs = {}
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise GenericViewError(
"Generic view must be called with either an object_id or a"
" slug/slug_field.")
try:
return model.objects.get(**lookup_kwargs)
except __HOLE__:
raise Http404("No %s found for %s"
% (model._meta.verbose_name, lookup_kwargs))
|
ObjectDoesNotExist
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/views/generic/create_update.py/lookup_object
|
5,334 |
def verify_cache(index, res_folder):
num_files = len(index)
missing = 0
corrupt = 0
scanned = 0
for entry in index:
scanned += 1
progress.write("Scanned %6.1d of %6.1d files (%6.1d corrupt, %6.1d missing)\r" %
(scanned, num_files, corrupt, missing))
filename = os.path.join(res_folder, entry.cached_name)
if not os.path.exists(filename):
missing += 1
continue
checksum = calc_checksum(filename)
if checksum != entry.md5_checksum:
corrupt += 1
try:
os.remove(filename)
except __HOLE__:
pass
progress.clear()
print "Verified %d files:" % num_files
print " %6.1d files corrupt" % corrupt
print " %6.1d files not yet downloaded" % missing
return corrupt, missing
|
IOError
|
dataset/ETHPy150Open ccpgames/rescache/verify.py/verify_cache
|
5,335 |
def string_references(self, minimum_length=2):
"""
All of the constant string references used by this function.
:param minimum_length: The minimum length of strings to find (default is 1)
:return: A list of tuples of (address, string) where is address is the location of the string in
memory.
"""
strings = []
memory = self._project.loader.memory
# get known instruction addresses and call targets
# these addresses cannot be string references, but show up frequently in the runtime values
known_executable_addresses = set()
for block in self.blocks:
known_executable_addresses.update(block.instruction_addrs)
for function in self._function_manager.values():
known_executable_addresses.update(set(x.addr for x in function.graph.nodes()))
# loop over all local runtime values and check if the value points to a printable string
for addr in self.local_runtime_values:
if not isinstance(addr, claripy.fp.FPV) and addr in memory:
# check that the address isn't an pointing to known executable code
# and that it isn't an indirect pointer to known executable code
try:
possible_pointer = memory.read_addr_at(addr)
if addr not in known_executable_addresses and possible_pointer not in known_executable_addresses:
# build string
stn = ""
offset = 0
current_char = memory[addr + offset]
while current_char in string.printable:
stn += current_char
offset += 1
current_char = memory[addr + offset]
# check that the string was a null terminated string with minimum length
if current_char == "\x00" and len(stn) >= minimum_length:
strings.append((addr, stn))
except __HOLE__:
pass
return strings
|
KeyError
|
dataset/ETHPy150Open angr/angr/angr/knowledge/function.py/Function.string_references
|
5,336 |
def _get_cudafft():
"""Helper to deal with scikit-cuda namespace change"""
try:
from skcuda import fft
except __HOLE__:
try:
from scikits.cuda import fft
except ImportError:
fft = None
return fft
|
ImportError
|
dataset/ETHPy150Open mne-tools/mne-python/mne/cuda.py/_get_cudafft
|
5,337 |
def init_cuda(ignore_config=False):
"""Initialize CUDA functionality
This function attempts to load the necessary interfaces
(hardware connectivity) to run CUDA-based filtering. This
function should only need to be run once per session.
If the config var (set via mne.set_config or in ENV)
MNE_USE_CUDA == 'true', this function will be executed when
the first CUDA setup is performed. If this variable is not
set, this function can be manually executed.
"""
global _cuda_capable, _multiply_inplace_c128, _halve_c128, _real_c128
if _cuda_capable:
return
if not ignore_config and (get_config('MNE_USE_CUDA', 'false').lower() !=
'true'):
logger.info('CUDA not enabled in config, skipping initialization')
return
# Triage possible errors for informative messaging
_cuda_capable = False
try:
from pycuda import gpuarray, driver # noqa
from pycuda.elementwise import ElementwiseKernel
except __HOLE__:
warn('module pycuda not found, CUDA not enabled')
return
try:
# Initialize CUDA; happens with importing autoinit
import pycuda.autoinit # noqa
except ImportError:
warn('pycuda.autoinit could not be imported, likely a hardware error, '
'CUDA not enabled')
return
# Make sure scikit-cuda is installed
cudafft = _get_cudafft()
if cudafft is None:
warn('module scikit-cuda not found, CUDA not enabled')
return
# let's construct our own CUDA multiply in-place function
_multiply_inplace_c128 = ElementwiseKernel(
'pycuda::complex<double> *a, pycuda::complex<double> *b',
'b[i] *= a[i]', 'multiply_inplace')
_halve_c128 = ElementwiseKernel(
'pycuda::complex<double> *a', 'a[i] /= 2.0', 'halve_value')
_real_c128 = ElementwiseKernel(
'pycuda::complex<double> *a', 'a[i] = real(a[i])', 'real_value')
# Make sure we can use 64-bit FFTs
try:
cudafft.Plan(16, np.float64, np.complex128) # will get auto-GC'ed
except:
warn('Device does not support 64-bit FFTs, CUDA not enabled')
return
_cuda_capable = True
# Figure out limit for CUDA FFT calculations
logger.info('Enabling CUDA with %s available memory' % get_cuda_memory())
###############################################################################
# Repeated FFT multiplication
|
ImportError
|
dataset/ETHPy150Open mne-tools/mne-python/mne/cuda.py/init_cuda
|
5,338 |
def __init__(self,
address=None,
payload=None,
sources=None,
resources=None, # Old-style resources (file list, Fileset).
resource_targets=None, # New-style resources (Resources target specs).
provides=None,
compatibility=None,
**kwargs):
"""
:param dependencies: Other targets that this target depends on.
These dependencies may
be ``python_library``-like targets (``python_library``,
``python_thrift_library``, ``python_antlr_library`` and so forth) or
``python_requirement_library`` targets.
:type dependencies: List of target specs
:param sources: Files to "include". Paths are relative to the
BUILD file's directory.
:type sources: ``Fileset`` or list of strings
:param resources: non-Python resources, e.g. templates, keys, other data
(it is
recommended that your application uses the pkgutil package to access these
resources in a .zip-module friendly way.)
:param provides:
The `setup_py <#setup_py>`_ to publish that represents this
target outside the repo.
:param compatibility: either a string or list of strings that represents
interpreter compatibility for this target, using the Requirement-style
format, e.g. ``'CPython>=3', or just ['>=2.7','<3']`` for requirements
agnostic to interpreter class.
"""
self.address = address
payload = payload or Payload()
payload.add_fields({
'sources': self.create_sources_field(sources, address.spec_path, key_arg='sources'),
'resources': self.create_sources_field(resources, address.spec_path, key_arg='resources'),
'provides': provides,
'compatibility': PrimitiveField(maybe_list(compatibility or ())),
})
super(PythonTarget, self).__init__(address=address,
payload=payload,
**kwargs)
self._resource_target_specs = resource_targets
self.add_labels('python')
if provides and not isinstance(provides, PythonArtifact):
raise TargetDefinitionException(self,
"Target must provide a valid pants setup_py object. Received a '{}' object instead.".format(
provides.__class__.__name__))
self._provides = provides
# Check that the compatibility requirements are well-formed.
for req in self.payload.compatibility:
try:
PythonIdentity.parse_requirement(req)
except __HOLE__ as e:
raise TargetDefinitionException(self, str(e))
|
ValueError
|
dataset/ETHPy150Open pantsbuild/pants/src/python/pants/backend/python/targets/python_target.py/PythonTarget.__init__
|
5,339 |
def draw(cursor, out=sys.stdout, paginate=True, max_fieldsize=100):
"""Render an result set as an ascii-table.
Renders an SQL result set to `out`, some file-like object.
Assumes that we can determine the current terminal height and
width via the termsize module.
Args:
cursor: An iterable of rows. Each row is a list or tuple
with index access to each cell. The cursor
has a list/tuple of headings via cursor.keys().
out: File-like object.
"""
def heading_line(sizes):
for size in sizes:
out.write(b'+' + b'-' * (size + 2))
out.write(b'+\n')
def draw_headings(headings, sizes):
heading_line(sizes)
for idx, size in enumerate(sizes):
fmt = '| %%-%is ' % size
out.write((fmt % headings[idx]).encode('utf8'))
out.write(b'|\n')
heading_line(sizes)
cols, lines = termsize()
headings = cursor.keys()
sizes = list(map(lambda x: len(x), headings))
if paginate:
cursor = isublists(cursor, lines - 4)
# else we assume cursor arrive here pre-paginated
for screenrows in cursor:
for row in screenrows:
if row is None:
break
for idx, value in enumerate(row):
if not isinstance(value, basestring):
value = str(value)
size = max(sizes[idx], len(value))
sizes[idx] = min(size, max_fieldsize)
draw_headings(headings, sizes)
for rw in screenrows:
if rw is None:
break # from isublists impl
for idx, size in enumerate(sizes):
fmt = '| %%-%is ' % size
value = rw[idx]
if not isinstance(value, basestring):
value = str(value)
if len(value) > max_fieldsize:
value = value[:max_fieldsize - 5] + '[...]'
value = value.replace('\n', '^')
value = value.replace('\r', '^').replace('\t', ' ')
try:
value = fmt % value
except __HOLE__:
value = fmt % value.decode('utf8')
out.write(value.encode('utf8'))
out.write(b'|\n')
if not paginate:
heading_line(sizes)
out.write(b'\n')
|
UnicodeDecodeError
|
dataset/ETHPy150Open jaysw/ipydb/ipydb/asciitable.py/draw
|
5,340 |
def _ReadNumericFile(file_name):
"""Reads a file containing a number.
@rtype: None or int
@return: None if file is not found, otherwise number
"""
try:
contents = utils.ReadFile(file_name)
except EnvironmentError, err:
if err.errno in (errno.ENOENT, ):
return None
raise
try:
return int(contents)
except (__HOLE__, TypeError), err:
# Couldn't convert to int
raise errors.JobQueueError("Content of file '%s' is not numeric: %s" %
(file_name, err))
|
ValueError
|
dataset/ETHPy150Open ganeti/ganeti/lib/jstore.py/_ReadNumericFile
|
5,341 |
def ParseJobId(job_id):
"""Parses a job ID and converts it to integer.
"""
try:
return int(job_id)
except (ValueError, __HOLE__):
raise errors.ParameterError("Invalid job ID '%s'" % job_id)
|
TypeError
|
dataset/ETHPy150Open ganeti/ganeti/lib/jstore.py/ParseJobId
|
5,342 |
def iter_cont_objs_to_expire(self):
"""
Yields (container, obj) tuples to be deleted
"""
obj_cache = {}
cnt = 0
all_containers = set()
for c in self.swift.iter_containers(self.expiring_objects_account):
container = str(c['name'])
timestamp = int(container)
if timestamp > int(time()):
break
all_containers.add(container)
for o in self.swift.iter_objects(self.expiring_objects_account,
container):
obj = o['name'].encode('utf8')
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
try:
cust_account, cust_cont, cust_obj = \
split_path('/' + actual_obj, 3, 3, True)
cache_key = '%s/%s' % (cust_account, cust_cont)
except __HOLE__:
cache_key = None
if self.processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (container, obj)).
hexdigest(), 16)
if obj_process % self.processes != self.process:
continue
if cache_key not in obj_cache:
obj_cache[cache_key] = []
obj_cache[cache_key].append((container, obj))
cnt += 1
if cnt > MAX_OBJECTS_TO_CACHE:
while obj_cache:
for key in obj_cache.keys():
if obj_cache[key]:
yield obj_cache[key].pop()
cnt -= 1
else:
del obj_cache[key]
while obj_cache:
for key in obj_cache.keys():
if obj_cache[key]:
yield obj_cache[key].pop()
else:
del obj_cache[key]
for container in all_containers:
yield (container, None)
|
ValueError
|
dataset/ETHPy150Open openstack/swift/swift/obj/expirer.py/ObjectExpirer.iter_cont_objs_to_expire
|
5,343 |
@authorization_required(is_admin=True)
@threaded
def post(self):
try:
login = self.json["login"]
email = self.json["email"]
is_admin = bool(self.json.get("is_admin", 0))
password = self.json["password"]
assert password and len(password) > 3
assert LOGIN_EXP.match(login)
assert EMAIL_EXP.match(email)
except (__HOLE__, AssertionError, TypeError):
raise HTTPError(400)
if Users.select().where(Users.login == login).count():
raise HTTPError(409)
user = Users(
login=login,
email=email,
is_admin=is_admin,
password=password,
)
user.save()
self.response({
'id': user.id,
'login': user.login,
'email': user.email,
'is_admin': user.is_admin,
})
|
KeyError
|
dataset/ETHPy150Open mosquito/pypi-server/pypi_server/handlers/api/users.py/UsersHandler.post
|
5,344 |
def process(self):
self.logger.info('asynscheduler : [started]')
while True:
try:
for entity in ENTITYS:
fp = open(self.cf["%s.mkfifo.path" % entity], 'w')
try:
fp.write(str(self.cf['%s.mkfifo.start.code' % entity]))
self.logger.info('Start code was written. - file=%s : code=%s'
% (self.cf["%s.mkfifo.path" % entity], self.cf['%s.mkfifo.start.code' % entity]))
finally:
fp.close()
self.logger.debug('interval start, interval=%s' % (self.cf['asynscheduler.interval']))
time.sleep(self.cf['asynscheduler.interval'])
except __HOLE__, i:
if i.errno == 4:
return PROCSUCCESS # When ending with the signal
return PROCERROR # beyond expectation
|
IOError
|
dataset/ETHPy150Open karesansui/pysilhouette/pysilhouette/asynscheduler.py/AsynScheduler.process
|
5,345 |
def main():
(opts, args) = getopts()
if chkopts(opts) is True:
return PROCERROR
cf = readconf(opts.config)
if cf is None:
print >>sys.stderr, 'Failed to load the config file "%s". (%s)' % (opts.config, sys.argv[0])
return PROCERROR
# conf parse
if parse_conf(cf) is False:
return PROCERROR
if reload_conf(cf["env.sys.log.conf.path"]):
logger = logging.getLogger('pysilhouette.asynscheduler')
else:
print >>sys.stderr, 'Failed to load the log file. (%s)' % sys.argv[0]
return PROCERROR
try:
try:
signal.signal(signal.SIGTERM, sigterm_handler)
asynscheduler = AsynScheduler(opts, cf)
ret = asynscheduler.process() # start!!
return ret
except __HOLE__, k:
logger.critical('Keyboard interrupt occurred. - %s' % str(k.args))
print >>sys.stderr, 'Keyboard interrupt occurred. - %s' % str(k.args)
except Exception, e:
logger.critical('A system error has occurred. - %s' % str(e.args))
print >>sys.stderr, 'A system error has occurred. - %s' % str(e.args)
print >>sys.stderr, traceback.format_exc()
t_logger = logging.getLogger('pysilhouette_traceback')
t_logger.critical(traceback.format_exc())
finally:
if opts.daemon is True and os.path.isfile(opts.pidfile):
os.unlink(opts.pidfile)
logger.warning('Process file has been deleted.. - pidfile=%s' % opts.pidfile)
return PROCERROR
|
KeyboardInterrupt
|
dataset/ETHPy150Open karesansui/pysilhouette/pysilhouette/asynscheduler.py/main
|
5,346 |
@cli.command()
@click.argument('environment', required=True)
@click.argument('zappa_settings', required=True, type=click.File('rb'))
def tail(environment, zappa_settings):
""" Stolen verbatim from django-zappa:
https://github.com/Miserlou/django-zappa/blob/master/django_zappa/management/commands/tail.py
"""
def print_logs(logs):
for log in logs:
timestamp = log['timestamp']
message = log['message']
if "START RequestId" in message:
continue
if "REPORT RequestId" in message:
continue
if "END RequestId" in message:
continue
print("[" + str(timestamp) + "] " + message.strip())
zappa, settings, _, lambda_name = _init(environment, zappa_settings)
try:
# Tail the available logs
all_logs = zappa.fetch_logs(lambda_name)
print_logs(all_logs)
# Keep polling, and print any new logs.
while True:
all_logs_again = zappa.fetch_logs(lambda_name)
new_logs = []
for log in all_logs_again:
if log not in all_logs:
new_logs.append(log)
print_logs(new_logs)
all_logs = all_logs + new_logs
except __HOLE__:
# Die gracefully
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
KeyboardInterrupt
|
dataset/ETHPy150Open Miserlou/flask-zappa/bin/client.py/tail
|
5,347 |
def has_permission(self, request, view):
"""
Check if the subscriber has an active subscription.
Returns false if:
* a subscriber isn't passed through the request
See ``utils.subscriber_has_active_subscription`` for more rules.
"""
try:
subscriber_has_active_subscription(subscriber_request_callback(request))
except __HOLE__:
return False
|
AttributeError
|
dataset/ETHPy150Open pydanny/dj-stripe/djstripe/contrib/rest_framework/permissions.py/DJStripeSubscriptionPermission.has_permission
|
5,348 |
def sici(x_gpu):
"""
Sine/Cosine integral.
Computes the sine and cosine integral of every element in the
input matrix.
Parameters
----------
x_gpu : GPUArray
Input matrix of shape `(m, n)`.
Returns
-------
(si_gpu, ci_gpu) : tuple of GPUArrays
Tuple of GPUarrays containing the sine integrals and cosine
integrals of the entries of `x_gpu`.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import scipy.special
>>> import special
>>> x = np.array([[1, 2], [3, 4]], np.float32)
>>> x_gpu = gpuarray.to_gpu(x)
>>> (si_gpu, ci_gpu) = sici(x_gpu)
>>> (si, ci) = scipy.special.sici(x)
>>> np.allclose(si, si_gpu.get())
True
>>> np.allclose(ci, ci_gpu.get())
True
"""
if x_gpu.dtype == np.float32:
args = 'float *x, float *si, float *ci'
op = 'sicif(x[i], &si[i], &ci[i])'
elif x_gpu.dtype == np.float64:
args = 'double *x, double *si, double *ci'
op = 'sici(x[i], &si[i], &ci[i])'
else:
raise ValueError('unsupported type')
try:
func = sici.cache[x_gpu.dtype]
except __HOLE__:
func = elementwise.ElementwiseKernel(args, op,
options=["-I", install_headers],
preamble='#include "cuSpecialFuncs.h"')
sici.cache[x_gpu.dtype] = func
si_gpu = gpuarray.empty_like(x_gpu)
ci_gpu = gpuarray.empty_like(x_gpu)
func(x_gpu, si_gpu, ci_gpu)
return (si_gpu, ci_gpu)
|
KeyError
|
dataset/ETHPy150Open lebedov/scikit-cuda/skcuda/special.py/sici
|
5,349 |
def exp1(z_gpu):
"""
Exponential integral with `n = 1` of complex arguments.
Parameters
----------
z_gpu : GPUArray
Input matrix of shape `(m, n)`.
Returns
-------
e_gpu : GPUArray
GPUarrays containing the exponential integrals of
the entries of `z_gpu`.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import scipy.special
>>> import special
>>> z = np.asarray(np.random.rand(4, 4)+1j*np.random.rand(4, 4), np.complex64)
>>> z_gpu = gpuarray.to_gpu(z)
>>> e_gpu = exp1(z_gpu)
>>> e_sp = scipy.special.exp1(z)
>>> np.allclose(e_sp, e_gpu.get())
True
"""
if z_gpu.dtype == np.complex64:
args = 'pycuda::complex<float> *z, pycuda::complex<float> *e'
elif z_gpu.dtype == np.complex128:
args = 'pycuda::complex<double> *z, pycuda::complex<double> *e'
else:
raise ValueError('unsupported type')
op = 'e[i] = exp1(z[i])'
try:
func = exp1.cache[z_gpu.dtype]
except __HOLE__:
func = elementwise.ElementwiseKernel(args, op,
options=["-I", install_headers],
preamble='#include "cuSpecialFuncs.h"')
exp1.cache[z_gpu.dtype] = func
e_gpu = gpuarray.empty_like(z_gpu)
func(z_gpu, e_gpu)
return e_gpu
|
KeyError
|
dataset/ETHPy150Open lebedov/scikit-cuda/skcuda/special.py/exp1
|
5,350 |
def expi(z_gpu):
"""
Exponential integral of complex arguments.
Parameters
----------
z_gpu : GPUArray
Input matrix of shape `(m, n)`.
Returns
-------
e_gpu : GPUArray
GPUarrays containing the exponential integrals of
the entries of `z_gpu`.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import scipy.special
>>> import special
>>> z = np.asarray(np.random.rand(4, 4)+1j*np.random.rand(4, 4), np.complex64)
>>> z_gpu = gpuarray.to_gpu(z)
>>> e_gpu = expi(z_gpu)
>>> e_sp = scipy.special.expi(z)
>>> np.allclose(e_sp, e_gpu.get())
True
"""
if z_gpu.dtype == np.complex64:
args = 'pycuda::complex<float> *z, pycuda::complex<float> *e'
elif z_gpu.dtype == np.complex128:
args = 'pycuda::complex<double> *z, pycuda::complex<double> *e'
else:
raise ValueError('unsupported type')
op = 'e[i] = expi(z[i])'
try:
func = expi.cache[z_gpu.dtype]
except __HOLE__:
func = elementwise.ElementwiseKernel(args, op,
options=["-I", install_headers],
preamble='#include "cuSpecialFuncs.h"')
expi.cache[z_gpu.dtype] = func
e_gpu = gpuarray.empty_like(z_gpu)
func(z_gpu, e_gpu)
return e_gpu
|
KeyError
|
dataset/ETHPy150Open lebedov/scikit-cuda/skcuda/special.py/expi
|
5,351 |
def arg_parser():
"""
Parses the arguments and calls the help() function if any problem is found
"""
global PRINT_PIXIE
global PRINT_REAVER
global USE_PIXIEWPS
global AIRODUMP_TIME
global REAVER_TIME
global CHANNEL
global PROMPT_APS
global OUTPUT_FILE
global OUTPUT
global GET_PASSWORD
global FOREVER
global OVERRIDE
global BLACKLIST
global RSSI
global MAX_APS
global USE_MODES
H = ['-h','--help']
flags = ['-p','-P','-f','-q','-F','-A']
binary_flags = ['-a','-t','-c','-o','-s','-m','-M',
'--max-aps','--rssi','--airodump-time','--time','--channel','--output','--mode']
for arg in argv[1:]:
if arg in H:
help()
exit()
elif argv[argv.index(arg)-1] in binary_flags:
continue
elif arg == '-m' or arg == '--mode':
USE_MODES = True
mode = argv[argv.index(arg)+1]
if mode == 'WALK':
USE_PIXIEWPS = True
AIRODUMP_TIME = 4
REAVER_TIME = 8
GET_PASSWORD = True
FOREVER = True
MAX_APS = 2
elif mode == 'DRIVE':
USE_PIXIEWPS = True
REAVER_TIME = 10
FOREVER = True
MAX_APS = 1
elif mode == 'STATIC':
USE_PIXIEWPS = True
AIRODUMP_TIME = 5
REAVER_TIME = 10
GET_PASSWORD = True
PROMPT_APS = True
OVERRIDE = False
else:
print ALERT + "Unknown mode %s." %mode
print " Check available modes in the help."
help()
elif arg == '-M' or arg == '--max-aps':
try:
MAX_APS == int(argv[argv.index(arg)+1])
except __HOLE__:
help()
elif arg == '-s' or arg == '--rssi':
try:
RSSI = int(argv[argv.index(arg)+1])
if RSSI < -100 or RSSI > 0: help()
except ValueError:
help()
elif arg == '-q' or arg == '--quiet':
PRINT_PIXIE = False
PRINT_REAVER = False
elif arg == '-p' or arg == '--use-pixie':
USE_PIXIEWPS = True
elif arg == '-a' or arg == '--airodump-time':
try:
AIRODUMP_TIME = int(argv[argv.index(arg)+1])
if REAVER_TIME <= 0: help()
except ValueError:
help()
elif arg == '-t' or arg == '--time':
try:
REAVER_TIME = int(argv[argv.index(arg)+1])
if REAVER_TIME <= 0: help()
except ValueError:
help()
elif arg == '-c' or arg == '--channel':
try:
CHANNEL = int(argv[argv.index(arg)+1])
if CHANNEL <= 0 or CHANNEL >= 15: help()
except ValueError:
help()
elif arg == '-P' or arg == '--prompt':
PROMPT_APS = True
elif arg == '-o' or arg == '--output':
OUTPUT = True
try:
m = argv[argv.index(arg)+1]
if m not in flags:
if m not in binary_flags: OUTPUT_FILE = m
except IndexError:
pass
elif arg == '-f' or arg == '--pass':
GET_PASSWORD = True
elif arg == '-F' or arg == '--forever':
FOREVER = True
elif arg == '-A' or arg == '--again':
OVERRIDE = False
BLACKLIST = False
else:
help()
if CHANNEL != '':
AIRODUMP_TIME = 1
|
ValueError
|
dataset/ETHPy150Open jgilhutton/pyxiewps_WPShack-Python/pyxiewps-EN.py/arg_parser
|
5,352 |
def parse_airodump(self, input):
"""
Parses the airodump output
If you find some error in the program flow, check this function first.
returns ESSIDs, WPSstatus, channel, bssid and RSSI
"""
plist = []
input.reverse() # Important
inds = [47,73,86] # CHANNEL, WPS, ESSID indexes
if CHANNEL != '': inds = [i+4 for i in inds]
for line in input: # Skip all the clients on the output
if 'Probe' in line: #
input = input[(input.index(line)+1):] # Uses the 'Probe' keyword
break #
for i in input:
if "][ Elapsed:" not in i and ":" in i and "<length:" not in i:
i = i.lstrip().strip()
snowden = i[inds[1]:] # I ran out of names
try:
wps = snowden[0:snowden.index(' ')].strip()
essid = snowden[(snowden.index(' ')+2):].lstrip()
except (IndexError, __HOLE__): # hence ' '
continue
channel = i[inds[0]:inds[0]+2].lstrip()
bssid = i[0:17]
rssi = i[19:22]
try:
if bssid not in blacklist and wps != '' and '0.0' not in wps and int(rssi) >= RSSI:
a = '%s|%s|%s|%s|%s|%s' %(bssid,channel.zfill(2),rssi,wps,wps,essid)
plist.append(a)
except ValueError:
print ALERT + "There was a parsing error in parse_airodump function."
except:
return plist
elif "][ Elapsed:" in i:
break
plist.sort(key=lambda x: int(x[21:24]), reverse = True) # Sorts the list by RSSI
if MAX_APS != 'All':
try:
return plist[0:MAX_APS]
except IndexError:
return plist
if MAX_APS == 'All': # For the sake of readability
return plist
|
ValueError
|
dataset/ETHPy150Open jgilhutton/pyxiewps_WPShack-Python/pyxiewps-EN.py/Engine.parse_airodump
|
5,353 |
def run(self, cmd, shell = False, kill_tree = True, timeout = -1, airodump = False):
"""
Runs a command witha given time after wich is terminated
returns stdout of proc.
output is a list without passing strip() on the lines.
"""
class Alarm(Exception):
pass
def alarm_handler(signum, frame):
raise Alarm
output = []
if timeout != -1:
signal(SIGALRM, alarm_handler) # Time's ticking...
alarm(timeout)
if airodump:
proc = subprocess.Popen(cmd, shell = shell, stderr = subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, shell = shell, stdout = subprocess.PIPE)
try:
if airodump:
for line in iter(proc.stderr.readline, ''):
output.append(line)
if timeout != -1:
alarm(0)
else:
for line in iter(proc.stdout.readline, ''):
output.append(line)
if timeout != -1:
alarm(0)
except Alarm: # time's out! alarm is raised
pids = [proc.pid] # kill the process tree related with the main process.
if airodump: system('pkill airodump')
if kill_tree:
pids.extend(self.get_process_children(proc.pid))
for pid in pids:
try:
kill(pid, SIGKILL)
except __HOLE__:
pass
return output
return output
|
OSError
|
dataset/ETHPy150Open jgilhutton/pyxiewps_WPShack-Python/pyxiewps-EN.py/Engine.run
|
5,354 |
def get_iface(self):
"""
If any monitor interfaces are found, returns the wlans.
If more than onw are found, ask the user to choose.
If monitor mode is already enable, returns the name.
"""
if self.IS_MON: # If the interface is already in monitor mode, it returns its name
proc = subprocess.Popen('iwconfig',shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE).communicate()[0].strip().split('\n')
for linea in proc:
if 'Monitor' in linea:
mon = linea[0:10].strip()
self.IFACE_MON = mon
return mon
else:
proc = subprocess.Popen('iwconfig',shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE).communicate()[0].strip().split('\n')
ifaces = []
for linea in proc:
if 'IEEE' in linea:
ifaces.append(linea[0:10].strip())
if len(ifaces) == 1 and ifaces[0] == '':
print ALERT + "No wireless interfaces were found!"
print " Please check if any wireless device in your PC."
print " if you are running on a virtual machine"
print " go get an USB wireless device."
exit()
elif len(ifaces) > 1:
print INPUT + "Choose the W.Interface: "
for i in ifaces:
print str(ifaces.index(i)) + " >> " + i
while True: # Control the input! you bugseeker!
try:
choice = int(raw_input(INPUT))
self.IFACE = ifaces[choice]
return ifaces[choice]
break
except (IndexError, __HOLE__):
print ALERT + "Number between 0 and %s" %(len(ifaces)-1) #Index error handling
except KeyboardInterrupt:
print
print ALERT + "Interrupted program!"
print
engine.exit_clean()
else:
self.IFACE = ifaces[0]
return ifaces[0]
|
ValueError
|
dataset/ETHPy150Open jgilhutton/pyxiewps_WPShack-Python/pyxiewps-EN.py/Config.get_iface
|
5,355 |
def get_wps_aps(self):
"""
Enumerates any WPS-active APs
Goes to get_reaver_info
"""
print INFO + "Enumerating WPS-active APs..."
cmd = 'airodump-ng -c 1-11 --wps %s' %(c.IFACE_MON)
if CHANNEL != '':
cmd = 'airodump-ng -c %d --wps %s' %(CHANNEL, c.IFACE_MON)
output = engine.run(cmd, shell = True, timeout = AIRODUMP_TIME, airodump = True)
ap_list = engine.parse_airodump(output)
last = len(ap_list)-1
if ap_list == []:
print
print ALERT + "No WPS-active APs were found."
print
if not FOREVER:
engine.exit_clean()
else:
for_fill = ap_list #\
essids = [] #|
for line in for_fill: #|- Formats the list
line = line.split('|') #|
essids.append(line[5]) #|
fill = len(max(essids)) #/
print INFO + "The following WPS-active APs were found:"
for line in ap_list:
line = line.split('|')
fill_line = fill - len(line[5])
print '\t' + INPUT + str(line[5]) + ' '*fill_line + ' || ' + line[0] + ' || Channel: ' + line[1] + ' || RSSI: ' + line[2] + ' || WPS: ' + line[4]
while True:
try:
if len(ap_list) != 1 and PROMPT_APS:
choice = raw_input("%sIndex of the AP or press ENTER to choose all of them: " %INPUT)
if choice == '':
break
else:
choice = int(choice)
temp = []
temp.append(ap_list[choice])
ap_list = temp
break
else:
break
except __HOLE__:
print
engine.exit_clean()
break
except (ValueError, IndexError):
print ALERT + "Number between 0 and %d" %last
if path.isfile('pyxiewpsdata.txt'):
match = []
wpspin = []
with open('pyxiewpsdata.txt') as f:
already_found_pins = f.readlines()
if len(already_found_pins) > 1:
already_found_pins.reverse() # reverts the list so it takes the newest pin
for target in ap_list: # if any pin were changed by the AP administrator
for line in already_found_pins[1:]:
if target.split('|')[5] == line.strip():
match.append(target)
wpspin.append(already_found_pins[already_found_pins.index(line)-1].strip())
for i in set(match):
print OPTION + "The %s pin was already found!" %i.split('|')[5]
print '\t'+ INPUT + wpspin[match.index(i)]
if not OVERRIDE:
print INFO + "Will attack again as requested."
print
else:
print INFO + "Skiped forever."
ap_list.remove(i) # Removed from the AP list
blacklist.append(i[:17])
print
for line in ap_list: # main for-loop
line = line.split('|')
self.get_reaver_info(line[0],line[1],line[5])
print SEPARATOR
if not FOREVER:
engine.exit_clean()
|
KeyboardInterrupt
|
dataset/ETHPy150Open jgilhutton/pyxiewps_WPShack-Python/pyxiewps-EN.py/Attack.get_wps_aps
|
5,356 |
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except __HOLE__:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
# Please note: the following definition hides a builtin function.
# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
# instead of "from webbrowser import *".
|
KeyError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/webbrowser.py/get
|
5,357 |
def _synthesize(browser, update_tryorder=1):
"""Attempt to synthesize a controller base on existing controllers.
This is useful to create a controller when a user specifies a path to
an entry in the BROWSER environment variable -- we can copy a general
controller to operate using a specific installation of the desired
browser in this way.
If we can't create a controller in this way, or if there is no
executable for the requested browser, return [None, None].
"""
cmd = browser.split()[0]
if not _iscommand(cmd):
return [None, None]
name = os.path.basename(cmd)
try:
command = _browsers[name.lower()]
except __HOLE__:
return [None, None]
# now attempt to clone to fit the new name:
controller = command[1]
if controller and name.lower() == controller.basename:
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, controller, update_tryorder)
return [None, controller]
return [None, None]
|
KeyError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/webbrowser.py/_synthesize
|
5,358 |
def open(self, url, new=0, autoraise=1):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True)
return not p.wait()
except __HOLE__:
return False
|
OSError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/webbrowser.py/GenericBrowser.open
|
5,359 |
def open(self, url, new=0, autoraise=1):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
p = subprocess.Popen(cmdline, close_fds=True, preexec_fn=setsid)
return (p.poll() is None)
except __HOLE__:
return False
|
OSError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/webbrowser.py/BackgroundBrowser.open
|
5,360 |
def open(self, url, new=0, autoraise=1):
# XXX Currently I know no way to prevent KFM from opening a new win.
if new == 2:
action = "newTab"
else:
action = "openURL"
devnull = file(os.devnull, "r+")
# if possible, put browser in separate process group, so
# keyboard interrupts don't affect browser as well as Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
try:
p = subprocess.Popen(["kfmclient", action, url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull)
except __HOLE__:
# fall through to next variant
pass
else:
p.wait()
# kfmclient's return code unfortunately has no meaning as it seems
return True
try:
p = subprocess.Popen(["konqueror", "--silent", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
preexec_fn=setsid)
except OSError:
# fall through to next variant
pass
else:
if p.poll() is None:
# Should be running now.
return True
try:
p = subprocess.Popen(["kfm", "-d", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
preexec_fn=setsid)
except OSError:
return False
else:
return (p.poll() is None)
|
OSError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/webbrowser.py/Konqueror.open
|
5,361 |
def _find_grail_rc(self):
import glob
import pwd
import socket
import tempfile
tempdir = os.path.join(tempfile.gettempdir(),
".grail-unix")
user = pwd.getpwuid(os.getuid())[0]
filename = os.path.join(tempdir, user + "-*")
maybes = glob.glob(filename)
if not maybes:
return None
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for fn in maybes:
# need to PING each one until we find one that's live
try:
s.connect(fn)
except socket.error:
# no good; attempt to clean it out, but don't fail:
try:
os.unlink(fn)
except __HOLE__:
pass
else:
return s
|
IOError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/webbrowser.py/Grail._find_grail_rc
|
5,362 |
def Scalar(self, event, loader):
"""Handle scalar value
Since scalars are simple values that are passed directly in by the
parser, handle like any value with no additional processing.
Of course, key values will be handles specially. A key value is recognized
when the top token is _TOKEN_MAPPING.
Args:
event: Event containing scalar value.
"""
self._HandleAnchor(event)
if event.tag is None and self._top[0] != _TOKEN_MAPPING:
try:
tag = loader.resolve(yaml.nodes.ScalarNode,
event.value, event.implicit)
except __HOLE__:
tag = loader.DEFAULT_SCALAR_TAG
else:
tag = event.tag
if tag is None:
value = event.value
else:
node = yaml.nodes.ScalarNode(tag,
event.value,
event.start_mark,
event.end_mark,
event.style)
value = loader.construct_object(node)
self._HandleValue(value)
|
IndexError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/api/yaml_builder.py/BuilderHandler.Scalar
|
5,363 |
@classmethod
def get_transform(cls, spec):
try:
return cls.spec_map[spec['type']].wrap(spec)
except __HOLE__:
raise BadSpecError(_('Invalid or missing transform type: {}. Valid options are: {}').format(
spec.get('type', None),
', '.join(cls.spec_map.keys()),
))
except BadValueError as e:
raise BadSpecError(_('Problem creating transform: {}. Message is: {}').format(
json.dumps(spec, indent=2),
str(e),
))
|
KeyError
|
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/userreports/transforms/factory.py/TransformFactory.get_transform
|
5,364 |
@property
def token(self):
"""
set the instance attribute `token` following the following logic,
stopping whenever a token is found. Raises NoTokenFound is no token
is found
- environment variables TOKEN_ENVIRON_VARS
- file containing plaintext as the contents in TOKEN_FILES
- ads.config.token
"""
if self._token is None:
for v in map(os.environ.get, TOKEN_ENVIRON_VARS):
if v is not None:
self._token = v
return self._token
for f in TOKEN_FILES:
try:
with open(f) as fp:
self._token = fp.read().strip()
return self._token
except __HOLE__:
pass
if ads.config.token is not None:
self._token = ads.config.token
return self._token
warnings.warn("No token found", RuntimeWarning)
return self._token
|
IOError
|
dataset/ETHPy150Open andycasey/ads/ads/base.py/BaseQuery.token
|
5,365 |
def get_tests(suite):
"""Generates a sequence of tests from a test suite
"""
for item in suite:
try:
# TODO: This could be "yield from..." with Python 3.3+
for i in get_tests(item):
yield i
except __HOLE__:
yield item
|
TypeError
|
dataset/ETHPy150Open jborg/attic/attic/testsuite/__init__.py/get_tests
|
5,366 |
def find_module(self, fullname, path=None):
tail = fullname.rsplit('.', 1)[-1]
try:
fd, fn, info = imp.find_module(tail, path)
if fullname in self._cache:
old_fd = self._cache[fullname][0]
if old_fd:
old_fd.close()
self._cache[fullname] = (fd, fn, info)
except __HOLE__:
return None
else:
return self # this is a loader as well
|
ImportError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/tests/regressiontests/utils/module_loading.py/ProxyFinder.find_module
|
5,367 |
def parse_arguments():
# Construct the option parser.
usage = '%prog [ACTION] [options] NAMES...'
version = "Epydoc, version %s" % epydoc.__version__
optparser = OptionParser(usage=usage, add_help_option=False)
optparser.add_option('--config',
action='append', dest="configfiles", metavar='FILE',
help=("A configuration file, specifying additional OPTIONS "
"and/or NAMES. This option may be repeated."))
optparser.add_option("--output", "-o",
dest="target", metavar="PATH",
help="The output directory. If PATH does not exist, then "
"it will be created.")
optparser.add_option("--quiet", "-q",
action="count", dest="quiet",
help="Decrease the verbosity.")
optparser.add_option("--verbose", "-v",
action="count", dest="verbose",
help="Increase the verbosity.")
optparser.add_option("--debug",
action="store_true", dest="debug",
help="Show full tracebacks for internal errors.")
optparser.add_option("--simple-term",
action="store_true", dest="simple_term",
help="Do not try to use color or cursor control when displaying "
"the progress bar, warnings, or errors.")
action_group = OptionGroup(optparser, 'Actions')
optparser.add_option_group(action_group)
action_group.add_option("--html",
action="store_const", dest="action", const="html",
help="Write HTML output.")
action_group.add_option("--text",
action="store_const", dest="action", const="text",
help="Write plaintext output. (not implemented yet)")
action_group.add_option("--latex",
action="store_const", dest="action", const="latex",
help="Write LaTeX output.")
action_group.add_option("--dvi",
action="store_const", dest="action", const="dvi",
help="Write DVI output.")
action_group.add_option("--ps",
action="store_const", dest="action", const="ps",
help="Write Postscript output.")
action_group.add_option("--pdf",
action="store_const", dest="action", const="pdf",
help="Write PDF output.")
action_group.add_option("--check",
action="store_const", dest="action", const="check",
help="Check completeness of docs.")
action_group.add_option("--pickle",
action="store_const", dest="action", const="pickle",
help="Write the documentation to a pickle file.")
# Provide our own --help and --version options.
action_group.add_option("--version",
action="store_const", dest="action", const="version",
help="Show epydoc's version number and exit.")
action_group.add_option("-h", "--help",
action="store_const", dest="action", const="help",
help="Show this message and exit. For help on specific "
"topics, use \"--help TOPIC\". Use \"--help topics\" for a "
"list of available help topics")
generation_group = OptionGroup(optparser, 'Generation Options')
optparser.add_option_group(generation_group)
generation_group.add_option("--docformat",
dest="docformat", metavar="NAME",
help="The default markup language for docstrings. Defaults "
"to \"%s\"." % DEFAULT_DOCFORMAT)
generation_group.add_option("--parse-only",
action="store_false", dest="introspect",
help="Get all information from parsing (don't introspect)")
generation_group.add_option("--introspect-only",
action="store_false", dest="parse",
help="Get all information from introspecting (don't parse)")
generation_group.add_option("--exclude",
dest="exclude", metavar="PATTERN", action="append",
help="Exclude modules whose dotted name matches "
"the regular expression PATTERN")
generation_group.add_option("--exclude-introspect",
dest="exclude_introspect", metavar="PATTERN", action="append",
help="Exclude introspection of modules whose dotted name matches "
"the regular expression PATTERN")
generation_group.add_option("--exclude-parse",
dest="exclude_parse", metavar="PATTERN", action="append",
help="Exclude parsing of modules whose dotted name matches "
"the regular expression PATTERN")
generation_group.add_option("--inheritance",
dest="inheritance", metavar="STYLE",
help="The format for showing inheritance objects. STYLE "
"should be one of: %s." % ', '.join(INHERITANCE_STYLES))
generation_group.add_option("--show-private",
action="store_true", dest="show_private",
help="Include private variables in the output. (default)")
generation_group.add_option("--no-private",
action="store_false", dest="show_private",
help="Do not include private variables in the output.")
generation_group.add_option("--show-imports",
action="store_true", dest="show_imports",
help="List each module's imports.")
generation_group.add_option("--no-imports",
action="store_false", dest="show_imports",
help="Do not list each module's imports. (default)")
generation_group.add_option('--show-sourcecode',
action='store_true', dest='include_source_code',
help=("Include source code with syntax highlighting in the "
"HTML output. (default)"))
generation_group.add_option('--no-sourcecode',
action='store_false', dest='include_source_code',
help=("Do not include source code with syntax highlighting in the "
"HTML output."))
generation_group.add_option('--include-log',
action='store_true', dest='include_log',
help=("Include a page with the process log (epydoc-log.html)"))
generation_group.add_option(
'--redundant-details',
action='store_true', dest='redundant_details',
help=("Include values in the details lists even if all info "
"about them is already provided by the summary table."))
output_group = OptionGroup(optparser, 'Output Options')
optparser.add_option_group(output_group)
output_group.add_option("--name", "-n",
dest="prj_name", metavar="NAME",
help="The documented project's name (for the navigation bar).")
output_group.add_option("--css", "-c",
dest="css", metavar="STYLESHEET",
help="The CSS stylesheet. STYLESHEET can be either a "
"builtin stylesheet or the name of a CSS file.")
output_group.add_option("--url", "-u",
dest="prj_url", metavar="URL",
help="The documented project's URL (for the navigation bar).")
output_group.add_option("--navlink",
dest="prj_link", metavar="HTML",
help="HTML code for a navigation link to place in the "
"navigation bar.")
output_group.add_option("--top",
dest="top_page", metavar="PAGE",
help="The \"top\" page for the HTML documentation. PAGE can "
"be a URL, the name of a module or class, or one of the "
"special names \"trees.html\", \"indices.html\", or \"help.html\"")
output_group.add_option("--help-file",
dest="help_file", metavar="FILE",
help="An alternate help file. FILE should contain the body "
"of an HTML file -- navigation bars will be added to it.")
output_group.add_option("--show-frames",
action="store_true", dest="show_frames",
help="Include frames in the HTML output. (default)")
output_group.add_option("--no-frames",
action="store_false", dest="show_frames",
help="Do not include frames in the HTML output.")
output_group.add_option('--separate-classes',
action='store_true', dest='list_classes_separately',
help=("When generating LaTeX or PDF output, list each class in "
"its own section, instead of listing them under their "
"containing module."))
output_group.add_option('--src-code-tab-width',
action='store', type='int', dest='src_code_tab_width',
help=("When generating HTML output, sets the number of spaces "
"each tab in source code listings is replaced with."))
# The group of external API options.
# Skip if the module couldn't be imported (usually missing docutils)
if xlink is not None:
link_group = OptionGroup(optparser,
xlink.ApiLinkReader.settings_spec[0])
optparser.add_option_group(link_group)
for help, names, opts in xlink.ApiLinkReader.settings_spec[2]:
opts = opts.copy()
opts['help'] = help
link_group.add_option(*names, **opts)
graph_group = OptionGroup(optparser, 'Graph Options')
optparser.add_option_group(graph_group)
graph_group.add_option('--graph',
action='append', dest='graphs', metavar='GRAPHTYPE',
help=("Include graphs of type GRAPHTYPE in the generated output. "
"Graphs are generated using the Graphviz dot executable. "
"If this executable is not on the path, then use --dotpath "
"to specify its location. This option may be repeated to "
"include multiple graph types in the output. GRAPHTYPE "
"should be one of: all, %s." % ', '.join(GRAPH_TYPES)))
graph_group.add_option("--dotpath",
dest="dotpath", metavar='PATH',
help="The path to the Graphviz 'dot' executable.")
graph_group.add_option('--graph-font',
dest='graph_font', metavar='FONT',
help=("Specify the font used to generate Graphviz graphs. (e.g., "
"helvetica or times)."))
graph_group.add_option('--graph-font-size',
dest='graph_font_size', metavar='SIZE',
help=("Specify the font size used to generate Graphviz graphs, "
"in points."))
graph_group.add_option('--pstat',
action='append', dest='pstat_files', metavar='FILE',
help="A pstat output file, to be used in generating call graphs.")
# this option is for developers, not users.
graph_group.add_option("--profile-epydoc",
action="store_true", dest="profile",
help=SUPPRESS_HELP or
("Run the hotshot profiler on epydoc itself. Output "
"will be written to profile.out."))
return_group = OptionGroup(optparser, 'Return Value Options')
optparser.add_option_group(return_group)
return_group.add_option("--fail-on-error",
action="store_const", dest="fail_on", const=log.ERROR,
help="Return a non-zero exit status, indicating failure, if any "
"errors are encountered.")
return_group.add_option("--fail-on-warning",
action="store_const", dest="fail_on", const=log.WARNING,
help="Return a non-zero exit status, indicating failure, if any "
"errors or warnings are encountered (not including docstring "
"warnings).")
return_group.add_option("--fail-on-docstring-warning",
action="store_const", dest="fail_on", const=log.DOCSTRING_WARNING,
help="Return a non-zero exit status, indicating failure, if any "
"errors or warnings are encountered (including docstring "
"warnings).")
# Set the option parser's defaults.
optparser.set_defaults(**OPTION_DEFAULTS)
# Parse the arguments.
options, names = optparser.parse_args()
# Print help message, if requested. We also provide support for
# --help [topic]
if options.action == 'help':
names = set([n.lower() for n in names])
for (topic, msg) in HELP_TOPICS.items():
if topic.lower() in names:
print '\n' + msg.rstrip() + '\n'
sys.exit(0)
optparser.print_help()
sys.exit(0)
# Print version message, if requested.
if options.action == 'version':
print version
sys.exit(0)
# Process any config files.
if options.configfiles:
try:
parse_configfiles(options.configfiles, options, names)
except (KeyboardInterrupt,__HOLE__): raise
except Exception, e:
if len(options.configfiles) == 1:
cf_name = 'config file %s' % options.configfiles[0]
else:
cf_name = 'config files %s' % ', '.join(options.configfiles)
optparser.error('Error reading %s:\n %s' % (cf_name, e))
# Check if the input file is a pickle file.
for name in names:
if name.endswith('.pickle'):
if len(names) != 1:
optparser.error("When a pickle file is specified, no other "
"input files may be specified.")
options.load_pickle = True
# Check to make sure all options are valid.
if len(names) == 0:
optparser.error("No names specified.")
# perform shell expansion.
for i, name in reversed(list(enumerate(names[:]))):
if '?' in name or '*' in name:
names[i:i+1] = glob(name)
if options.inheritance not in INHERITANCE_STYLES:
optparser.error("Bad inheritance style. Valid options are " +
",".join(INHERITANCE_STYLES))
if not options.parse and not options.introspect:
optparser.error("Invalid option combination: --parse-only "
"and --introspect-only.")
if options.action == 'text' and len(names) > 1:
optparser.error("--text option takes only one name.")
# Check the list of requested graph types to make sure they're
# acceptable.
options.graphs = [graph_type.lower() for graph_type in options.graphs]
for graph_type in options.graphs:
if graph_type == 'callgraph' and not options.pstat_files:
optparser.error('"callgraph" graph type may only be used if '
'one or more pstat files are specified.')
# If it's 'all', then add everything (but don't add callgraph if
# we don't have any profiling info to base them on).
if graph_type == 'all':
if options.pstat_files:
options.graphs = GRAPH_TYPES
else:
options.graphs = [g for g in GRAPH_TYPES if g != 'callgraph']
break
elif graph_type not in GRAPH_TYPES:
optparser.error("Invalid graph type %s." % graph_type)
# Calculate verbosity.
verbosity = getattr(options, 'verbosity', 0)
options.verbosity = verbosity + options.verbose - options.quiet
# The target default depends on the action.
if options.target is None:
options.target = options.action
# Return parsed args.
options.names = names
return options, names
|
SystemExit
|
dataset/ETHPy150Open QuantSoftware/QuantSoftwareToolkit/Legacy/epydoc-3.0.1/epydoc/cli.py/parse_arguments
|
5,368 |
def _str_to_int(val, optname):
try:
return int(val)
except __HOLE__:
raise ValueError('"%s" option expected an int' % optname)
|
ValueError
|
dataset/ETHPy150Open QuantSoftware/QuantSoftwareToolkit/Legacy/epydoc-3.0.1/epydoc/cli.py/_str_to_int
|
5,369 |
def main(options, names):
# Set the debug flag, if '--debug' was specified.
if options.debug:
epydoc.DEBUG = True
## [XX] Did this serve a purpose? Commenting out for now:
#if options.action == 'text':
# if options.parse and options.introspect:
# options.parse = False
# Set up the logger
if options.simple_term:
TerminalController.FORCE_SIMPLE_TERM = True
if options.action == 'text':
logger = None # no logger for text output.
elif options.verbosity > 1:
logger = ConsoleLogger(options.verbosity)
log.register_logger(logger)
else:
# Each number is a rough approximation of how long we spend on
# that task, used to divide up the unified progress bar.
stages = [40, # Building documentation
7, # Merging parsed & introspected information
1, # Linking imported variables
3, # Indexing documentation
1, # Checking for overridden methods
30, # Parsing Docstrings
1, # Inheriting documentation
2] # Sorting & Grouping
if options.load_pickle:
stages = [30] # Loading pickled documentation
if options.action == 'html': stages += [100]
elif options.action == 'text': stages += [30]
elif options.action == 'latex': stages += [60]
elif options.action == 'dvi': stages += [60,30]
elif options.action == 'ps': stages += [60,40]
elif options.action == 'pdf': stages += [60,50]
elif options.action == 'check': stages += [10]
elif options.action == 'pickle': stages += [10]
else: raise ValueError, '%r not supported' % options.action
if options.parse and not options.introspect:
del stages[1] # no merging
if options.introspect and not options.parse:
del stages[1:3] # no merging or linking
logger = UnifiedProgressConsoleLogger(options.verbosity, stages)
log.register_logger(logger)
# check the output directory.
if options.action not in ('text', 'check', 'pickle'):
if os.path.exists(options.target):
if not os.path.isdir(options.target):
log.error("%s is not a directory" % options.target)
sys.exit(1)
if options.include_log:
if options.action == 'html':
if not os.path.exists(options.target):
os.mkdir(options.target)
log.register_logger(HTMLLogger(options.target, options))
else:
log.warning("--include-log requires --html")
# Set the default docformat
from epydoc import docstringparser
docstringparser.DEFAULT_DOCFORMAT = options.docformat
# Configure the external API linking
if xlink is not None:
try:
xlink.ApiLinkReader.read_configuration(options, problematic=False)
except Exception, exc:
log.error("Error while configuring external API linking: %s: %s"
% (exc.__class__.__name__, exc))
# Set the dot path
if options.dotpath:
from epydoc.docwriter import dotgraph
dotgraph.DOT_COMMAND = options.dotpath
# Set the default graph font & size
if options.graph_font:
from epydoc.docwriter import dotgraph
fontname = options.graph_font
dotgraph.DotGraph.DEFAULT_NODE_DEFAULTS['fontname'] = fontname
dotgraph.DotGraph.DEFAULT_EDGE_DEFAULTS['fontname'] = fontname
if options.graph_font_size:
from epydoc.docwriter import dotgraph
fontsize = options.graph_font_size
dotgraph.DotGraph.DEFAULT_NODE_DEFAULTS['fontsize'] = fontsize
dotgraph.DotGraph.DEFAULT_EDGE_DEFAULTS['fontsize'] = fontsize
# If the input name is a pickle file, then read the docindex that
# it contains. Otherwise, build the docs for the input names.
if options.load_pickle:
assert len(names) == 1
log.start_progress('Deserializing')
log.progress(0.1, 'Loading %r' % names[0])
t0 = time.time()
unpickler = pickle.Unpickler(open(names[0], 'rb'))
unpickler.persistent_load = pickle_persistent_load
docindex = unpickler.load()
log.debug('deserialization time: %.1f sec' % (time.time()-t0))
log.end_progress()
else:
# Build docs for the named values.
from epydoc.docbuilder import build_doc_index
exclude_parse = '|'.join(options.exclude_parse+options.exclude)
exclude_introspect = '|'.join(options.exclude_introspect+
options.exclude)
docindex = build_doc_index(names, options.introspect, options.parse,
add_submodules=(options.action!='text'),
exclude_introspect=exclude_introspect,
exclude_parse=exclude_parse)
if docindex is None:
if log.ERROR in logger.reported_message_levels:
sys.exit(1)
else:
return # docbuilder already logged an error.
# Load profile information, if it was given.
if options.pstat_files:
try: import pstats
except __HOLE__:
log.error("Could not import pstats -- ignoring pstat files.")
try:
profile_stats = pstats.Stats(options.pstat_files[0])
for filename in options.pstat_files[1:]:
profile_stats.add(filename)
except KeyboardInterrupt: raise
except Exception, e:
log.error("Error reading pstat file: %s" % e)
profile_stats = None
if profile_stats is not None:
docindex.read_profiling_info(profile_stats)
# Perform the specified action.
if options.action == 'html':
write_html(docindex, options)
elif options.action in ('latex', 'dvi', 'ps', 'pdf'):
write_latex(docindex, options, options.action)
elif options.action == 'text':
write_text(docindex, options)
elif options.action == 'check':
check_docs(docindex, options)
elif options.action == 'pickle':
write_pickle(docindex, options)
else:
print >>sys.stderr, '\nUnsupported action %s!' % options.action
# If we suppressed docstring warnings, then let the user know.
if logger is not None and logger.suppressed_docstring_warning:
if logger.suppressed_docstring_warning == 1:
prefix = '1 markup error was found'
else:
prefix = ('%d markup errors were found' %
logger.suppressed_docstring_warning)
log.warning("%s while processing docstrings. Use the verbose "
"switch (-v) to display markup errors." % prefix)
# Basic timing breakdown:
if options.verbosity >= 2 and logger is not None:
logger.print_times()
# If we encountered any message types that we were requested to
# fail on, then exit with status 2.
if options.fail_on is not None:
max_reported_message_level = max(logger.reported_message_levels)
if max_reported_message_level >= options.fail_on:
sys.exit(2)
|
ImportError
|
dataset/ETHPy150Open QuantSoftware/QuantSoftwareToolkit/Legacy/epydoc-3.0.1/epydoc/cli.py/main
|
5,370 |
def write_latex(docindex, options, format):
from epydoc.docwriter.latex import LatexWriter
latex_writer = LatexWriter(docindex, **options.__dict__)
log.start_progress('Writing LaTeX docs')
latex_writer.write(options.target)
log.end_progress()
# If we're just generating the latex, and not any output format,
# then we're done.
if format == 'latex': return
if format == 'dvi': steps = 4
elif format == 'ps': steps = 5
elif format == 'pdf': steps = 6
log.start_progress('Processing LaTeX docs')
oldpath = os.path.abspath(os.curdir)
running = None # keep track of what we're doing.
try:
try:
os.chdir(options.target)
# Clear any old files out of the way.
for ext in 'tex aux log out idx ilg toc ind'.split():
if os.path.exists('apidoc.%s' % ext):
os.remove('apidoc.%s' % ext)
# The first pass generates index files.
running = 'latex'
log.progress(0./steps, 'LaTeX: First pass')
run_subprocess('latex api.tex')
# Build the index.
running = 'makeindex'
log.progress(1./steps, 'LaTeX: Build index')
run_subprocess('makeindex api.idx')
# The second pass generates our output.
running = 'latex'
log.progress(2./steps, 'LaTeX: Second pass')
out, err = run_subprocess('latex api.tex')
# The third pass is only necessary if the second pass
# changed what page some things are on.
running = 'latex'
if _RERUN_LATEX_RE.match(out):
log.progress(3./steps, 'LaTeX: Third pass')
out, err = run_subprocess('latex api.tex')
# A fourth path should (almost?) never be necessary.
running = 'latex'
if _RERUN_LATEX_RE.match(out):
log.progress(3./steps, 'LaTeX: Fourth pass')
run_subprocess('latex api.tex')
# If requested, convert to postscript.
if format in ('ps', 'pdf'):
running = 'dvips'
log.progress(4./steps, 'dvips')
run_subprocess('dvips api.dvi -o api.ps -G0 -Ppdf')
# If requested, convert to pdf.
if format in ('pdf'):
running = 'ps2pdf'
log.progress(5./steps, 'ps2pdf')
run_subprocess(
'ps2pdf -sPAPERSIZE#letter -dMaxSubsetPct#100 '
'-dSubsetFonts#true -dCompatibilityLevel#1.2 '
'-dEmbedAllFonts#true api.ps api.pdf')
except RunSubprocessError, e:
if running == 'latex':
e.out = re.sub(r'(?sm)\A.*?!( LaTeX Error:)?', r'', e.out)
e.out = re.sub(r'(?sm)\s*Type X to quit.*', '', e.out)
e.out = re.sub(r'(?sm)^! Emergency stop.*', '', e.out)
log.error("%s failed: %s" % (running, (e.out+e.err).lstrip()))
except __HOLE__, e:
log.error("%s failed: %s" % (running, e))
finally:
os.chdir(oldpath)
log.end_progress()
|
OSError
|
dataset/ETHPy150Open QuantSoftware/QuantSoftwareToolkit/Legacy/epydoc-3.0.1/epydoc/cli.py/write_latex
|
5,371 |
def cli():
# Parse command-line arguments.
options, names = parse_arguments()
try:
try:
if options.profile:
_profile()
else:
main(options, names)
finally:
log.close()
except __HOLE__:
raise
except KeyboardInterrupt:
print '\n\n'
print >>sys.stderr, 'Keyboard interrupt.'
except:
if options.debug: raise
print '\n\n'
exc_info = sys.exc_info()
if isinstance(exc_info[0], basestring): e = exc_info[0]
else: e = exc_info[1]
print >>sys.stderr, ('\nUNEXPECTED ERROR:\n'
'%s\n' % (str(e) or e.__class__.__name__))
print >>sys.stderr, 'Use --debug to see trace information.'
sys.exit(3)
|
SystemExit
|
dataset/ETHPy150Open QuantSoftware/QuantSoftwareToolkit/Legacy/epydoc-3.0.1/epydoc/cli.py/cli
|
5,372 |
def _profile():
# Hotshot profiler.
if PROFILER == 'hotshot':
try: import hotshot, hotshot.stats
except ImportError:
print >>sys.stderr, "Could not import profile module!"
return
try:
prof = hotshot.Profile('hotshot.out')
prof = prof.runctx('main(*parse_arguments())', globals(), {})
except SystemExit:
pass
prof.close()
# Convert profile.hotshot -> profile.out
print 'Consolidating hotshot profiling info...'
hotshot.stats.load('hotshot.out').dump_stats('profile.out')
# Standard 'profile' profiler.
elif PROFILER == 'profile':
# cProfile module was added in Python 2.5 -- use it if its'
# available, since it's faster.
try: from cProfile import Profile
except ImportError:
try: from profile import Profile
except __HOLE__:
print >>sys.stderr, "Could not import profile module!"
return
# There was a bug in Python 2.4's profiler. Check if it's
# present, and if so, fix it. (Bug was fixed in 2.4maint:
# <http://mail.python.org/pipermail/python-checkins/
# 2005-September/047099.html>)
if (hasattr(Profile, 'dispatch') and
Profile.dispatch['c_exception'] is
Profile.trace_dispatch_exception.im_func):
trace_dispatch_return = Profile.trace_dispatch_return.im_func
Profile.dispatch['c_exception'] = trace_dispatch_return
try:
prof = Profile()
prof = prof.runctx('main(*parse_arguments())', globals(), {})
except SystemExit:
pass
prof.dump_stats('profile.out')
else:
print >>sys.stderr, 'Unknown profiler %s' % PROFILER
return
######################################################################
#{ Logging
######################################################################
|
ImportError
|
dataset/ETHPy150Open QuantSoftware/QuantSoftwareToolkit/Legacy/epydoc-3.0.1/epydoc/cli.py/_profile
|
5,373 |
def addIntergenicSegment(last, this, fasta, options):
"""add an intergenic segment between last and this.
At telomeres, either can be None.
"""
if not this and not last:
return 0
nadded = 0
if not this:
# last telomere
try:
lcontig = fasta.getLength(last.contig)
except __HOLE__, msg:
if options.ignore_missing:
return nadded
else:
raise KeyError(msg)
flank = min(last.end + options.flank, lcontig)
nadded += addFlank(last.end, flank, last, options)
nadded += addSegment("telomeric", flank, lcontig, last, options)
elif not last:
# first telomere
flank = max(0, this.start - options.flank)
nadded += addSegment("telomeric", 0, flank, this, options)
nadded += addFlank(flank, this.start, this, options)
else:
# intergenic region
d = this.start - last.end
flank = options.flank
if d > flank * 2:
nadded += addFlank(last.end, last.end + flank, last, options)
nadded += addSegment("intergenic", last.end +
flank, this.start - flank,
(last, this), options)
nadded += addFlank(this.start - flank, this.start, this, options)
else:
# add short flank between two genes. If they can not agree
# on the directionality, "flank" is used.
is_positive1 = Genomics.IsPositiveStrand(last.strand)
is_positive2 = Genomics.IsPositiveStrand(this.strand)
if is_positive1 and not is_positive2:
key = "3flank"
elif not is_positive1 and is_positive2:
key = "5flank"
else:
key = "flank"
nadded += addSegment(key, last.end, this.start,
(last, this), options)
return nadded
|
KeyError
|
dataset/ETHPy150Open CGATOxford/cgat/scripts/gtf2gff.py/addIntergenicSegment
|
5,374 |
def annotateGenes(iterator, fasta, options):
"""annotate gene structures
This method outputs intervals for first/middle/last exon/intron,
UTRs and flanking regions.
This method annotates per transcript. In order to achieve a unique tiling,
use only a single transcript per gene and remove any overlap between
genes.
"""
gene_iterator = GTF.gene_iterator(iterator)
ngenes, ntranscripts, nskipped = 0, 0, 0
results = []
increment = options.increment
introns_detail = "introns" in options.detail
exons_detail = "exons" in options.detail
for gene in gene_iterator:
ngenes += 1
is_negative_strand = Genomics.IsNegativeStrand(gene[0][0].strand)
try:
lcontig = fasta.getLength(gene[0][0].contig)
except __HOLE__:
nskipped += 1
continue
results = []
for transcript in gene:
def _add(interval, anno):
gtf = GTF.Entry()
gtf.contig = transcript[0].contig
gtf.gene_id = transcript[0].gene_id
gtf.transcript_id = transcript[0].transcript_id
gtf.strand = transcript[0].strand
gtf.feature = anno
gtf.start, gtf.end = interval
results.append(gtf)
ntranscripts += 1
exons = [(x.start, x.end)
for x in transcript if x.feature == "exon"]
if len(exons) == 0:
nskipped += 1
exons.sort()
introns = []
end = exons[0][1]
for exon in exons[1:]:
introns.append((end, exon[0]))
end = exon[1]
# add flank
start, end = exons[0][0], exons[-1][1]
upstream, downstream = [], []
for x in xrange(0, options.flank, increment):
upstream.append((start - increment, start))
start -= increment
downstream.append((end, end + increment))
end += increment
# remove out-of-bounds coordinates
upstream = [x for x in upstream if x[0] >= 0]
downstream = [x for x in downstream if x[1] <= lcontig]
if is_negative_strand:
exons.reverse()
introns.reverse()
upstream, downstream = downstream, upstream
# add exons
if exons_detail:
_add(exons[0], "first_exon")
if len(exons) > 1:
_add(exons[-1], "last_exon")
for e in exons[1:-1]:
_add(e, "middle_exon")
else:
for e in exons:
_add(e, "exon")
# add introns
if introns_detail:
if len(introns) > 0:
_add(introns[0], "first_intron")
if len(introns) > 1:
_add(introns[-1], "last_intron")
for i in introns[1:-1]:
_add(i, "middle_intron")
else:
for i in introns:
_add(i, "intron")
for x, u in enumerate(upstream):
_add(u, "upstream_%i" % (increment * (x + 1)))
for x, u in enumerate(downstream):
_add(u, "downstream_%i" % (increment * (x + 1)))
results.sort(key=lambda x: x.feature)
cache = []
for key, vals in itertools.groupby(results, key=lambda x: x.feature):
v = list(vals)
intervals = [(x.start, x.end) for x in v]
intervals = Intervals.combine(intervals)
for start, end in intervals:
r = GTF.Entry()
r.copy(v[0])
r.start, r.end = start, end
cache.append(r)
cache.sort(key=lambda x: x.start)
for r in cache:
options.stdout.write("%s\n" % str(r))
E.info("ngenes=%i, ntranscripts=%i, nskipped=%i\n" %
(ngenes, ntranscripts, nskipped))
# ------------------------------------------------------------
|
KeyError
|
dataset/ETHPy150Open CGATOxford/cgat/scripts/gtf2gff.py/annotateGenes
|
5,375 |
def _get_profiles(self):
log.info("Getting firefox profiles")
if self.os == 'linux':
profiles_folder = self._expand("~/.mozilla/firefox/")
elif self.os == 'windows':
if platform.release() == "XP":
log.error("Unsupported OS (Windows XP). Returning None")
return None
profiles_folder = self._expand("%APPDATA%\\Mozilla\\Firefox\\")
elif self.os == "darwin":
profiles_folder = self._expand("~/Library/Mozilla/Firefox/")
if not os.path.exists(profiles_folder):
profiles_folder = self._expand("~/Library/Application Support/Firefox/")
else:
log.error("Unsupported OS. Returning None")
return None
log.debug("Firefox profiles root folder set to {}".format(profiles_folder))
if not os.path.exists(profiles_folder):
log.error("profiles_folder does not exists, returning {}")
return {}
try:
profiles_path = os.path.join(profiles_folder, "profiles.ini")
log.debug("profiles.ini path: {}".format(profiles_path))
except __HOLE__:
log.error("Joining folder and profiles.ini failed. Returning None")
return {}
except Exception as e:
log.exception(e)
return {}
if not os.path.exists(profiles_path):
# If profiles.ini does not exist no profile folder exists either
# or does it...
log.error("Profiles path not found. New FF installation?. Returning None")
return {}
profiles = ConfigParser.RawConfigParser()
profiles.read(profiles_path)
profiles.remove_section('General')
available_profiles = {}
for index, profile in enumerate(profiles.sections()):
name = profiles.get(profiles.sections()[index], 'Name')
path = profiles.get(profiles.sections()[index], 'Path')
available_profiles[name] = path
log.debug("Profiles:{}".format(available_profiles))
return available_profiles
|
AttributeError
|
dataset/ETHPy150Open Nikola-K/RESTool/RESTool/browsers/firefox.py/Firefox._get_profiles
|
5,376 |
def _get_res(self, profile_name):
log.debug("Getting firefox path for profile name: {}".format(profile_name))
ff_profile = self.available_profiles.get(profile_name)
res_file = "jetpack/jid1-xUfzOsOFlzSOXg@jetpack/simple-storage/store.json"
if not ff_profile:
log.error("Could not get selected profile path for {}".format(profile_name))
return None
if self.os == 'linux':
res_folder = self._expand("~/.mozilla/firefox/")
elif self.os == 'windows':
if platform.release() == "XP":
log.error("Unsupported OS (Windows XP). Returning None")
return None
res_folder = self._expand("%APPDATA%\\Mozilla\\Firefox\\")
elif self.os == "darwin":
res_folder = self._expand("~/Library/Mozilla/Firefox/")
if not os.path.exists(res_folder):
res_folder = self._expand("~/Library/Application Support/Firefox/")
else:
log.error("Unsupported OS: {} Returning None".format(self.os))
return None
log.debug("Firefox res_folder set to: {}".format(res_folder))
try:
full_path = os.path.join(res_folder, ff_profile, res_file)
log.debug("Full firefox path set to {}".format(full_path))
if os.path.exists(full_path):
log.debug("Full firefox path exists")
return full_path
else:
log.error("Full firefox path does not exist. RES Not installed?")
return None
except __HOLE__:
log.error("Firefox joining failed for {}, {} and {}".format(res_folder, ff_profile, res_file))
return None
except Exception as e:
log.exception(e)
|
AttributeError
|
dataset/ETHPy150Open Nikola-K/RESTool/RESTool/browsers/firefox.py/Firefox._get_res
|
5,377 |
def ec2_id_to_id(ec2_id):
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
try:
return int(ec2_id.split('-')[-1], 16)
except __HOLE__:
raise exception.InvalidEc2Id(ec2_id=ec2_id)
|
ValueError
|
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/ec2/ec2utils.py/ec2_id_to_id
|
5,378 |
def _try_convert(value):
"""Return a non-string from a string or unicode, if possible.
============= =====================================================
When value is returns
============= =====================================================
zero-length ''
'None' None
'True' True case insensitive
'False' False case insensitive
'0', '-0' 0
0xN, -0xN int from hex (postitive) (N is any number)
0bN, -0bN int from binary (positive) (N is any number)
* try conversion to int, float, complex, fallback value
"""
if len(value) == 0:
return ''
if value == 'None':
return None
lowered_value = value.lower()
if lowered_value == 'true':
return True
if lowered_value == 'false':
return False
valueneg = value[1:] if value[0] == '-' else value
if valueneg == '0':
return 0
if valueneg == '':
return value
if valueneg[0] == '0':
if valueneg[1] in 'xX':
return int(value, 16)
elif valueneg[1] in 'bB':
return int(value, 2)
else:
try:
return int(value, 8)
except ValueError:
pass
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except __HOLE__:
pass
try:
return complex(value)
except ValueError:
return value
|
ValueError
|
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/ec2/ec2utils.py/_try_convert
|
5,379 |
def _key_press_event(self, widget, event):
keyname = gtk.gdk.keyval_name(event.keyval)
if keyname == 'Up':
try:
text = self.history.prev()
self.set_text(text)
self.widget.set_position(len(text))
except __HOLE__:
pass
return True
elif keyname == 'Down':
try:
text = self.history.next()
self.set_text(text)
self.widget.set_position(len(text))
except ValueError:
pass
return True
return False
|
ValueError
|
dataset/ETHPy150Open ejeschke/ginga/ginga/gtkw/Widgets.py/TextEntry._key_press_event
|
5,380 |
def _add_subtree(self, level, shadow, model, parent_item, key, node):
if level >= self.levels:
# leaf node
try:
bnch = shadow[key]
item_iter = bnch.item
# TODO: update leaf item
except KeyError:
# new item
item_iter = model.append(parent_item, [node])
shadow[key] = Bunch.Bunch(node=node, item=item_iter,
terminal=True)
else:
try:
# node already exists
bnch = shadow[key]
item = bnch.item
d = bnch.node
except __HOLE__:
# new node
item = model.append(None, [str(key)])
d = {}
shadow[key] = Bunch.Bunch(node=d, item=item, terminal=False)
# recurse for non-leaf interior node
for key in node:
self._add_subtree(level+1, d, model, item, key, node[key])
|
KeyError
|
dataset/ETHPy150Open ejeschke/ginga/ginga/gtkw/Widgets.py/TreeView._add_subtree
|
5,381 |
def list_or_args(command, keys, args):
oldapi = bool(args)
try:
iter(keys)
if isinstance(keys, (str, unicode)):
raise TypeError
except __HOLE__:
oldapi = True
keys = [keys]
if oldapi:
warnings.warn(DeprecationWarning(
"Passing *args to redis.%s is deprecated. "
"Pass an iterable to ``keys`` instead" % command))
keys.extend(args)
return keys
# Possible first characters in a string containing an integer or a float.
|
TypeError
|
dataset/ETHPy150Open jookies/jasmin/jasmin/vendor/txredisapi.py/list_or_args
|
5,382 |
def dataReceived(self, data, unpause=False):
if unpause is True:
if self.__buffer:
self.__buffer = data + self.__buffer
else:
self.__buffer += data
self.resumeProducing()
else:
self.__buffer = self.__buffer + data
while self.line_mode and not self.paused:
try:
line, self.__buffer = self.__buffer.split(self.delimiter, 1)
except __HOLE__:
if len(self.__buffer) > self.MAX_LENGTH:
line, self.__buffer = self.__buffer, ''
return self.lineLengthExceeded(line)
break
else:
linelength = len(line)
if linelength > self.MAX_LENGTH:
exceeded = line + self.__buffer
self.__buffer = ''
return self.lineLengthExceeded(exceeded)
why = self.lineReceived(line)
if why or self.transport and self.transport.disconnecting:
return why
else:
if not self.paused:
data = self.__buffer
self.__buffer = ''
if data:
return self.rawDataReceived(data)
|
ValueError
|
dataset/ETHPy150Open jookies/jasmin/jasmin/vendor/txredisapi.py/LineReceiver.dataReceived
|
5,383 |
def lineReceived(self, line):
"""
Reply types:
"-" error message
"+" single line status reply
":" integer number (protocol level only?)
"$" bulk data
"*" multi-bulk data
"""
if line:
self.resetTimeout()
token, data = line[0], line[1:]
else:
return
if token == "$": # bulk data
try:
self.bulk_length = long(data)
except ValueError:
self.replyReceived(InvalidResponse("Cannot convert data "
"'%s' to integer" % data))
else:
if self.bulk_length == -1:
self.bulk_length = 0
self.bulkDataReceived(None)
else:
self.bulk_length += 2 # 2 == \r\n
self.setRawMode()
elif token == "*": # multi-bulk data
try:
n = long(data)
except (TypeError, ValueError):
self.multi_bulk = MultiBulkStorage()
self.replyReceived(InvalidResponse("Cannot convert "
"multi-response header "
"'%s' to integer" % data))
else:
self.multi_bulk = self.multi_bulk.set_pending(n)
if n in (0, -1):
self.multiBulkDataReceived()
elif token == "+": # single line status
if data == "QUEUED":
self.transactions += 1
self.replyReceived(data)
else:
if self.multi_bulk.pending:
self.handleMultiBulkElement(data)
else:
self.replyReceived(data)
elif token == "-": # error
reply = ResponseError(data[4:] if data[:4] == "ERR" else data)
if self.multi_bulk.pending:
self.handleMultiBulkElement(reply)
else:
self.replyReceived(reply)
elif token == ":": # integer
try:
reply = int(data)
except __HOLE__:
reply = InvalidResponse(
"Cannot convert data '%s' to integer" % data)
if self.multi_bulk.pending:
self.handleMultiBulkElement(reply)
else:
self.replyReceived(reply)
|
ValueError
|
dataset/ETHPy150Open jookies/jasmin/jasmin/vendor/txredisapi.py/RedisProtocol.lineReceived
|
5,384 |
def bulkDataReceived(self, data):
"""
Receipt of a bulk data element.
"""
el = None
if data is not None:
if data and data[0] in _NUM_FIRST_CHARS: # Most likely a number
try:
el = int(data) if data.find('.') == -1 else float(data)
except __HOLE__:
pass
if el is None:
try:
el = data.decode(self.charset)
except UnicodeDecodeError:
el = data
if self.multi_bulk.pending or self.multi_bulk.items:
self.handleMultiBulkElement(el)
else:
self.replyReceived(el)
|
ValueError
|
dataset/ETHPy150Open jookies/jasmin/jasmin/vendor/txredisapi.py/RedisProtocol.bulkDataReceived
|
5,385 |
def execute_command(self, *args, **kwargs):
if self.connected == 0:
raise ConnectionError("Not connected")
else:
cmds = []
cmd_template = "$%s\r\n%s\r\n"
for s in args:
if isinstance(s, str):
cmd = s
elif isinstance(s, unicode):
try:
cmd = s.encode(self.charset, self.errors)
except UnicodeEncodeError, e:
raise InvalidData(
"Error encoding unicode value '%s': %s" %
(repr(s), e))
elif isinstance(s, float):
try:
cmd = format(s, "f")
except __HOLE__:
cmd = "%0.6f" % s
else:
cmd = str(s)
cmds.append(cmd_template % (len(cmd), cmd))
self.transport.write("*%s\r\n%s" % (len(cmds), "".join(cmds)))
r = self.replyQueue.get().addCallback(self.handle_reply)
if self.inTransaction:
self.post_proc.append(kwargs.get("post_proc"))
else:
if "post_proc" in kwargs:
f = kwargs["post_proc"]
if callable(f):
r.addCallback(f)
return r
##
# REDIS COMMANDS
##
# Connection handling
|
NameError
|
dataset/ETHPy150Open jookies/jasmin/jasmin/vendor/txredisapi.py/RedisProtocol.execute_command
|
5,386 |
def fetch_covtype(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the covertype dataset, downloading it if necessary.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
dataset.target : numpy array of shape (581012,)
Each value corresponds to one of the 7 forest covertypes with values
ranging between 1 to 7.
dataset.DESCR : string
Description of the forest covertype dataset.
"""
data_home = get_data_home(data_home=data_home)
covtype_dir = join(data_home, "covertype")
samples_path = _pkl_filepath(covtype_dir, "samples")
targets_path = _pkl_filepath(covtype_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(covtype_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
Xy = np.genfromtxt(GzipFile(fileobj=f), delimiter=',')
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except __HOLE__:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
|
NameError
|
dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/datasets/covtype.py/fetch_covtype
|
5,387 |
def make_rst(self):
app = import_object(self.arguments[0])
for method, path, endpoint in get_routes(app):
try:
blueprint, _, endpoint_internal = endpoint.rpartition('.')
if self.blueprints and blueprint not in self.blueprints:
continue
if blueprint in self.undoc_blueprints:
continue
except __HOLE__:
pass # endpoint is not within a blueprint
if self.endpoints and endpoint not in self.endpoints:
continue
if endpoint in self.undoc_endpoints:
continue
try:
static_url_path = app.static_url_path # Flask 0.7 or higher
except AttributeError:
static_url_path = app.static_path # Flask 0.6 or under
if ('undoc-static' in self.options and endpoint == 'static' and
path == static_url_path + '/(path:filename)'):
continue
view = app.view_functions[endpoint]
docstring = view.__doc__ or ''
if hasattr(view, 'view_class'):
meth_func = getattr(view.view_class, method.lower(), None)
if meth_func and meth_func.__doc__:
docstring = meth_func.__doc__
if not isinstance(docstring, six.text_type):
analyzer = ModuleAnalyzer.for_module(view.__module__)
docstring = force_decode(docstring, analyzer.encoding)
if not docstring and 'include-empty-docstring' not in self.options:
continue
docstring = prepare_docstring(docstring)
for line in http_directive(method, path, docstring):
yield line
|
ValueError
|
dataset/ETHPy150Open preems/nltk-server/docs/source/sphinxcontrib/autohttp/flask.py/AutoflaskDirective.make_rst
|
5,388 |
def _get_version(self, listener):
try:
return int(getattr(listener, 'ROBOT_LISTENER_API_VERSION', 1))
except __HOLE__:
return 1
|
ValueError
|
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/output/listeners.py/_ListenerProxy._get_version
|
5,389 |
def try_number(value):
"""
Attempt to cast the string `value` to an int, and failing that, a float,
failing that, raise a ValueError.
"""
for cast_function in [int, float]:
try:
return cast_function(value)
except __HOLE__:
pass
raise ValueError("Unable to use value as int or float: {0!r}"
.format(value))
|
ValueError
|
dataset/ETHPy150Open alphagov/performanceplatform-collector/performanceplatform/collector/ga/core.py/try_number
|
5,390 |
def _flush_pipeline(self):
self.io.flush_send()
while True:
try:
reply = self.reply_queue.pop(0)
except __HOLE__:
return None
reply.recv(self.io)
if reply.is_error():
self.last_error = reply
|
IndexError
|
dataset/ETHPy150Open slimta/python-slimta/slimta/smtp/client.py/Client._flush_pipeline
|
5,391 |
def drop_privs(username):
uid = pwd.getpwnam(username).pw_uid
gid = pwd.getpwnam(username).pw_gid
os.setgroups([])
os.setgid(gid)
os.setuid(uid)
try:
os.setuid(0)
except __HOLE__:
pass
else:
raise AssertionError("setuid(0) succeeded after attempting to drop privileges")
# A decorator to ignore "broken pipe" errors.
|
OSError
|
dataset/ETHPy150Open arlolra/flashproxy/flashproxy/proc.py/drop_privs
|
5,392 |
def catch_epipe(fn):
def ret(self, *args):
try:
return fn(self, *args)
except socket.error, e:
try:
err_num = e.errno
except __HOLE__:
# Before Python 2.6, exception can be a pair.
err_num, errstr = e
except:
raise
if err_num != errno.EPIPE:
raise
return ret
|
AttributeError
|
dataset/ETHPy150Open arlolra/flashproxy/flashproxy/proc.py/catch_epipe
|
5,393 |
def upgrade(migrate_engine):
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
types = {}
for instance in migrate_engine.execute(instances.select()):
if instance.instance_type_id is None:
types[instance.id] = None
continue
try:
types[instance.id] = int(instance.instance_type_id)
except __HOLE__:
logging.warn("Instance %s did not have instance_type_id "
"converted to an integer because its value is %s" %
(instance.id, instance.instance_type_id))
types[instance.id] = None
integer_column = Column('instance_type_id_int', Integer(), nullable=True)
string_column = instances.c.instance_type_id
integer_column.create(instances)
for instance_id, instance_type_id in types.iteritems():
update = instances.update().\
where(instances.c.id == instance_id).\
values(instance_type_id_int=instance_type_id)
migrate_engine.execute(update)
string_column.alter(name='instance_type_id_str')
integer_column.alter(name='instance_type_id')
string_column.drop()
|
ValueError
|
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py/upgrade
|
5,394 |
def _eventlet_sendfile(fdout, fdin, offset, nbytes):
while True:
try:
return o_sendfile(fdout, fdin, offset, nbytes)
except __HOLE__ as e:
if e.args[0] == errno.EAGAIN:
trampoline(fdout, write=True)
else:
raise
|
OSError
|
dataset/ETHPy150Open chalasr/Flask-P2P/venv/lib/python2.7/site-packages/gunicorn/workers/geventlet.py/_eventlet_sendfile
|
5,395 |
def authenticate(self, token):
# For backwards compatibility only use the ApiToken model if
# Serrano is installed as an app as this was not a requirement
# previously.
if 'serrano' in settings.INSTALLED_APPS:
# NOTE: This has the limitation of requiring the token to be
# associated with a user since non-user/non-session access is
# not supported in Serrano.
try:
token = ApiToken.objects.get_active_token(token)
return token.user
except ApiToken.DoesNotExist:
pass
pk, token = token_generator.split(token)
try:
pk = int(pk)
except (__HOLE__, TypeError):
return
try:
user = User.objects.get(pk=pk, is_active=True)
except User.DoesNotExist:
return
if token_generator.check(user, token):
return user
|
ValueError
|
dataset/ETHPy150Open chop-dbhi/serrano/serrano/backends.py/TokenBackend.authenticate
|
5,396 |
def first(self):
"""Retrieve the first object matching the query.
"""
try:
result = self[0]
except __HOLE__:
result = None
return result
|
IndexError
|
dataset/ETHPy150Open aparo/django-elasticsearch/django_elasticsearch/manager.py/QuerySet.first
|
5,397 |
def __getitem__(self, key):
"""Support skip and limit using getitem and slicing syntax.
"""
# Slice provided
if isinstance(key, slice):
try:
self._cursor_obj = self._cursor[key]
self._skip, self._limit = key.start, key.stop
except __HOLE__, err:
# PyMongo raises an error if key.start == key.stop, catch it,
# bin it, kill it.
start = key.start or 0
if start >= 0 and key.stop >= 0 and key.step is None:
if start == key.stop:
self.limit(0)
self._skip, self._limit = key.start, key.stop - start
return self
raise err
# Allow further QuerySet modifications to be performed
return self
# Integer index provided
elif isinstance(key, int):
return self._document(**dict_keys_to_str(self._cursor[key]))
|
IndexError
|
dataset/ETHPy150Open aparo/django-elasticsearch/django_elasticsearch/manager.py/QuerySet.__getitem__
|
5,398 |
def daemonize(pidfile=None, progname=None, stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null', umask=022):
"""Fork a daemon process."""
if pidfile:
# Check whether the pid file already exists and refers to a still
# process running
pidfile = os.path.abspath(pidfile)
if os.path.exists(pidfile):
with open(pidfile) as fileobj:
try:
pid = int(fileobj.read())
except ValueError:
sys.exit('Invalid pid in file %s\nPlease remove it to '
'proceed' % pidfile)
try: # signal the process to see if it is still running
os.kill(pid, 0)
if not progname:
progname = os.path.basename(sys.argv[0])
sys.exit('%s is already running with pid %s' % (progname, pid))
except __HOLE__ as e:
if e.errno != errno.ESRCH:
raise
# The pid file must be writable
try:
fileobj = open(pidfile, 'a+')
fileobj.close()
except IOError as e:
from trac.util.text import exception_to_unicode
sys.exit('Error writing to pid file: %s' % exception_to_unicode(e))
# Perform first fork
pid = os.fork()
if pid > 0:
sys.exit(0) # exit first parent
# Decouple from parent environment
os.chdir('/')
os.umask(umask)
os.setsid()
# Perform second fork
pid = os.fork()
if pid > 0:
sys.exit(0) # exit second parent
# The process is now daemonized, redirect standard file descriptors
for stream in sys.stdout, sys.stderr:
stream.flush()
stdin = open(stdin, 'r')
stdout = open(stdout, 'a+')
stderr = open(stderr, 'a+', 0)
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
if pidfile:
# Register signal handlers to ensure atexit hooks are called on exit
for signum in [signal.SIGTERM, signal.SIGHUP]:
signal.signal(signum, handle_signal)
# Create/update the pid file, and register a hook to remove it when the
# process exits
def remove_pidfile():
if os.path.exists(pidfile):
os.remove(pidfile)
atexit.register(remove_pidfile)
with open(pidfile, 'w') as fileobj:
fileobj.write(str(os.getpid()))
|
OSError
|
dataset/ETHPy150Open edgewall/trac/trac/util/daemon.py/daemonize
|
5,399 |
def test_uninitialized_array(self):
expected = ": out1.x was not initialized. OpenMDAO does not support uninitialized variables."
"""
out1.x is:
- uninitialized
- flattenable
- the source of a connection
- not a slice
"""
top = set_as_top(Assembly())
top.add('out1', self.C1())
top.add('in1', self.C2())
top.connect('out1.x', 'in1.x')
top.driver.workflow.add(['out1', 'in1'])
try:
top.run()
except __HOLE__ as e:
self.assertEqual(str(e), expected)
else:
self.fail("Should have raised error message: {}".format(expected))
"""
out1.x is:
- uninitialized
- not flattenable
- the source of a connection
- not a slice
"""
top = set_as_top(Assembly())
top.add('out1', self.C3())
top.add('in1', self.C2())
top.connect('out1.x', 'in1.x')
top.driver.workflow.add(['out1', 'in1'])
top.run()
"""
out1.x is:
- initialized
- flattenable
- the source of a connection
- not a slice
"""
top = set_as_top(Assembly())
top.add('out1', self.C4(np.eye(2)))
top.add('in1', self.C2())
top.connect('out1.x', 'in1.x')
top.driver.workflow.add(['out1', 'in1'])
top.run()
"""
out1.x is:
- initialized
- flattenable
- the source of a connection
- not a slice
in1.x[::1] is:
- initialized
- flattenable
- the source of a connection
- a slice
"""
top = set_as_top(Assembly())
top.add('out1', self.C4(np.array(range(5))))
top.add('in1', self.C2())
top.add('in2', self.C2())
top.connect('out1.x', 'in1.x')
top.connect('in1.x[::1]', 'in2.x')
top.driver.workflow.add(['out1', 'in1', 'in2'])
top.run()
"""
sub.out1.x is:
- not initialized
- flattenable
- source of a connection
- not a slice
"""
expected = "sub: out1.x was not initialized. OpenMDAO does not support uninitialized variables."
top = set_as_top(Assembly())
top.add('sub', Assembly())
top.sub.add('out1', self.C1())
top.sub.add('in1', self.C2())
top.sub.connect('out1.x', 'in1.x')
top.sub.driver.workflow.add(['out1', 'in1'])
top.driver.workflow.add(['sub'])
try:
top.run()
except ValueError as e:
self.assertEqual(str(e), expected)
else:
self.fail("Should have raised error message: {}".format(expected))
|
ValueError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/test/test_system.py/UninitializedArray.test_uninitialized_array
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.