Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
3,000 |
@protocol.commands.add('list')
def list_(context, *args):
"""
*musicpd.org, music database section:*
``list {TYPE} [ARTIST]``
Lists all tags of the specified type. ``TYPE`` should be ``album``,
``artist``, ``albumartist``, ``date``, or ``genre``.
``ARTIST`` is an optional parameter when type is ``album``,
``date``, or ``genre``. This filters the result list by an artist.
*Clarifications:*
The musicpd.org documentation for ``list`` is far from complete. The
command also supports the following variant:
``list {TYPE} {QUERY}``
Where ``QUERY`` applies to all ``TYPE``. ``QUERY`` is one or more pairs
of a field name and a value. If the ``QUERY`` consists of more than one
pair, the pairs are AND-ed together to find the result. Examples of
valid queries and what they should return:
``list "artist" "artist" "ABBA"``
List artists where the artist name is "ABBA". Response::
Artist: ABBA
OK
``list "album" "artist" "ABBA"``
Lists albums where the artist name is "ABBA". Response::
Album: More ABBA Gold: More ABBA Hits
Album: Absolute More Christmas
Album: Gold: Greatest Hits
OK
``list "artist" "album" "Gold: Greatest Hits"``
Lists artists where the album name is "Gold: Greatest Hits".
Response::
Artist: ABBA
OK
``list "artist" "artist" "ABBA" "artist" "TLC"``
Lists artists where the artist name is "ABBA" *and* "TLC". Should
never match anything. Response::
OK
``list "date" "artist" "ABBA"``
Lists dates where artist name is "ABBA". Response::
Date:
Date: 1992
Date: 1993
OK
``list "date" "artist" "ABBA" "album" "Gold: Greatest Hits"``
Lists dates where artist name is "ABBA" and album name is "Gold:
Greatest Hits". Response::
Date: 1992
OK
``list "genre" "artist" "The Rolling Stones"``
Lists genres where artist name is "The Rolling Stones". Response::
Genre:
Genre: Rock
OK
*ncmpc:*
- capitalizes the field argument.
"""
params = list(args)
if not params:
raise exceptions.MpdArgError('incorrect arguments')
field = params.pop(0).lower()
field = _LIST_MAPPING.get(field)
if field is None:
raise exceptions.MpdArgError('incorrect arguments')
query = None
if len(params) == 1:
if field != 'album':
raise exceptions.MpdArgError('should be "Album" for 3 arguments')
if params[0].strip():
query = {'artist': params}
else:
try:
query = _query_from_mpd_search_parameters(params, _LIST_MAPPING)
except exceptions.MpdArgError as e:
e.message = 'not able to parse args'
raise
except __HOLE__:
return
name = _LIST_NAME_MAPPING[field]
result = context.core.library.get_distinct(field, query)
return [(name, value) for value in result.get()]
|
ValueError
|
dataset/ETHPy150Open mopidy/mopidy/mopidy/mpd/protocol/music_db.py/list_
|
3,001 |
@protocol.commands.add('search')
def search(context, *args):
"""
*musicpd.org, music database section:*
``search {TYPE} {WHAT} [...]``
Searches for any song that contains ``WHAT``. Parameters have the same
meaning as for ``find``, except that search is not case sensitive.
*GMPC:*
- uses the undocumented field ``any``.
- searches for multiple words like this::
search any "foo" any "bar" any "baz"
*ncmpc:*
- capitalizes the field argument.
*ncmpcpp:*
- also uses the search type "date".
- uses "file" instead of "filename".
"""
try:
query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING)
except __HOLE__:
return
with deprecation.ignore('core.library.search:empty_query'):
results = context.core.library.search(query).get()
artists = [_artist_as_track(a) for a in _get_artists(results)]
albums = [_album_as_track(a) for a in _get_albums(results)]
tracks = _get_tracks(results)
return translator.tracks_to_mpd_format(artists + albums + tracks)
|
ValueError
|
dataset/ETHPy150Open mopidy/mopidy/mopidy/mpd/protocol/music_db.py/search
|
3,002 |
@protocol.commands.add('searchadd')
def searchadd(context, *args):
"""
*musicpd.org, music database section:*
``searchadd {TYPE} {WHAT} [...]``
Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds
them to current playlist.
Parameters have the same meaning as for ``find``, except that search is
not case sensitive.
"""
try:
query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING)
except __HOLE__:
return
results = context.core.library.search(query).get()
with deprecation.ignore('core.tracklist.add:tracks_arg'):
# TODO: for now just use tracks as other wise we have to lookup the
# tracks we just got from the search.
context.core.tracklist.add(_get_tracks(results)).get()
|
ValueError
|
dataset/ETHPy150Open mopidy/mopidy/mopidy/mpd/protocol/music_db.py/searchadd
|
3,003 |
@protocol.commands.add('searchaddpl')
def searchaddpl(context, *args):
"""
*musicpd.org, music database section:*
``searchaddpl {NAME} {TYPE} {WHAT} [...]``
Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds
them to the playlist named ``NAME``.
If a playlist by that name doesn't exist it is created.
Parameters have the same meaning as for ``find``, except that search is
not case sensitive.
"""
parameters = list(args)
if not parameters:
raise exceptions.MpdArgError('incorrect arguments')
playlist_name = parameters.pop(0)
try:
query = _query_from_mpd_search_parameters(parameters, _SEARCH_MAPPING)
except __HOLE__:
return
results = context.core.library.search(query).get()
uri = context.lookup_playlist_uri_from_name(playlist_name)
playlist = uri is not None and context.core.playlists.lookup(uri).get()
if not playlist:
playlist = context.core.playlists.create(playlist_name).get()
tracks = list(playlist.tracks) + _get_tracks(results)
playlist = playlist.replace(tracks=tracks)
context.core.playlists.save(playlist)
|
ValueError
|
dataset/ETHPy150Open mopidy/mopidy/mopidy/mpd/protocol/music_db.py/searchaddpl
|
3,004 |
def get_fields(self):
"""
Returns all fields of the serializer.
"""
fields = OrderedDict()
for field_name, field in super().get_fields().items():
try:
field_name += field.field_name_suffix
except __HOLE__:
pass
fields[field_name] = field
return fields
|
AttributeError
|
dataset/ETHPy150Open OpenSlides/OpenSlides/openslides/utils/rest_api.py/ModelSerializer.get_fields
|
3,005 |
def __eq__(self, other):
if other is None:
return False
try:
for key in self.keys():
if getattr(self, key) != getattr(other, key):
return False
return True
except __HOLE__:
return False
|
KeyError
|
dataset/ETHPy150Open mediawiki-utilities/python-mediawiki-utilities/mw/types/serializable.py/Type.__eq__
|
3,006 |
def test_Signal_to_sys_exit():
global PORT
PORT += 1
def f():
jobmanager.Signal_to_sys_exit()
while True:
try:
time.sleep(10)
except __HOLE__:
print("[+] caught SystemExit, keep running")
else:
return
p = mp.Process(target=f)
p.start()
time.sleep(0.2)
assert p.is_alive()
print("[+] is alive")
print(" send SIGINT")
os.kill(p.pid, signal.SIGINT)
time.sleep(0.2)
assert p.is_alive()
print("[+] is alive")
print(" send SIGTERM")
os.kill(p.pid, signal.SIGTERM)
time.sleep(0.2)
assert p.is_alive()
print("[+] is alive")
print(" send SIGKILL")
os.kill(p.pid, signal.SIGKILL)
time.sleep(0.2)
assert not p.is_alive()
print("[+] terminated")
|
SystemExit
|
dataset/ETHPy150Open cimatosa/jobmanager/tests/test_jobmanager.py/test_Signal_to_sys_exit
|
3,007 |
def test_start_server_on_used_port():
global PORT
PORT += 1
def start_server():
const_arg = None
arg = [10,20,30]
with jobmanager.JobManager_Server(authkey = AUTHKEY,
port = PORT,
const_arg=const_arg,
fname_dump=None) as server:
server.args_from_list(arg)
server.start()
def start_server2():
const_arg = None
arg = [10,20,30]
with jobmanager.JobManager_Server(authkey=AUTHKEY,
port = PORT,
const_arg=const_arg,
fname_dump=None) as server:
server.args_from_list(arg)
server.start()
p1 = mp.Process(target=start_server)
p1.start()
time.sleep(1)
other_error = False
try:
start_server2()
except (__HOLE__, OSError) as e:
print("caught Exception '{}' {}".format(type(e).__name__, e))
except:
other_error = True
time.sleep(1)
p1.terminate()
time.sleep(1)
p1.join()
assert not other_error
|
RuntimeError
|
dataset/ETHPy150Open cimatosa/jobmanager/tests/test_jobmanager.py/test_start_server_on_used_port
|
3,008 |
def test_exception():
global PORT
class MyManager_Client(jobmanager.BaseManager):
pass
def autoproxy_server(which_python, port, authkey, outfile):
libpath = os.path.dirname(os.__file__)
python_env = os.environ.copy()
envpath = "{LIB}:{LIB}/site-packages".format(LIB=libpath)
envpath += ":{LIB}/lib-old".format(LIB=libpath)
envpath += ":{LIB}/lib-tk".format(LIB=libpath)
envpath += ":{LIB}/lib-dynload".format(LIB=libpath)
envpath += ":{LIB}/plat-linux2".format(LIB=libpath)
# env will be
# "/usr/lib/python2.7" for python 2
# "/usr/lib/python3.4" for python 3
if which_python == 2:
python_interpreter = "python2.7"
envpath = envpath.replace("3.4", "2.7")
elif which_python == 3:
python_interpreter = "python3.4"
envpath = envpath.replace("2.7", "3.4")
else:
raise ValueError("'which_python' must be 2 or 3")
python_env["PYTHONPATH"] = envpath
path = dirname(abspath(__file__))
cmd = [python_interpreter,
"{}/start_autoproxy_server.py".format(path),
str(port),
authkey]
print("+"*40)
print("start an autoproxy server with command")
print(cmd)
print("and environment")
print(python_env)
print("+"*40)
return subprocess.Popen(cmd, env=python_env, stdout=outfile, stderr=subprocess.STDOUT)
def autoproxy_connect(server, port, authkey):
MyManager_Client.register('get_q')
m = MyManager_Client(address = (server, port),
authkey = bytearray(authkey, encoding='utf8'))
jobmanager.call_connect(m.connect, dest = jobmanager.address_authkey_from_manager(m), verbose=2)
return m
for p_version_server in [2, 3]:
PORT += 2 # plus two because we also check for wrong port
port = PORT
authkey = 'q'
with open("ap_server.out", 'w') as outfile:
p_server = autoproxy_server(p_version_server, port, authkey, outfile)
print("autoproxy server running with PID {}".format(p_server.pid))
time.sleep(1)
try:
print("running tests with python {} ...".format(sys.version_info[0]))
print()
if sys.version_info[0] == 3:
print("we are using python 3 ... try to connect ...")
try:
autoproxy_connect(server=SERVER, port=port, authkey=authkey)
except jobmanager.RemoteValueError as e:
if p_version_server == 2:
print("that is ok, because the server is running on python2") # the occurrence of this Exception is normal
print()
else:
print("RemoteValueError error")
raise # reraise exception
except Exception as e:
print("unexpected error {}".format(e))
raise
elif sys.version_info[0] == 2:
print("we are using python 2 ... try to connect ...")
try:
autoproxy_connect(server=SERVER, port=port, authkey=authkey)
except __HOLE__ as e:
if p_version_server == 3:
print("that is ok, because the server is running on python3") # the occurrence of this Exception is normal
print()
else:
print("JMConnectionRefusedError error")
raise # reraise exception
except Exception as e:
print("unexpected error {}".format(e))
raise
# all the following only for the same python versions
if (sys.version_info[0] != p_version_server):
continue
try:
print("try to connect to server, use wrong port")
autoproxy_connect(server=SERVER, port=port+1, authkey=authkey)
except jobmanager.JMConnectionRefusedError:
print("that is ok")
print()
except:
raise
try:
print("try to connect to server, use wrong authkey")
autoproxy_connect(server=SERVER, port=port, authkey=authkey+'_')
except jobmanager.AuthenticationError:
print("that is ok")
print()
except:
raise
m = autoproxy_connect(server=SERVER, port=port, authkey=authkey)
print("try pass some data forth and back ...")
q = m.get_q()
q_get = jobmanager.proxy_operation_decorator_python3(q, 'get')
q_put = jobmanager.proxy_operation_decorator_python3(q, 'put')
s1 = 'hallo welt'
q_put(s1)
s2 = q_get()
assert s1 == s2
finally:
print()
print("tests done! terminate server ...".format())
p_server.send_signal(signal.SIGTERM)
t = time.time()
timeout = 10
r = None
while r is None:
r = p_server.poll()
time.sleep(1)
print("will kill server in {:.1f}s".format(timeout - (time.time() - t)))
if (time.time() - t) > timeout:
print("timeout exceeded, kill p_server")
print("the managers subprocess will still be running, and needs to be killed by hand")
p_server.send_signal(signal.SIGKILL)
break
print("server terminated with exitcode {}".format(r))
with open("ap_server.out", 'r') as outfile:
print("+"*40)
print("this is the server output:")
for l in outfile:
print(" {}".format(l[:-1]))
print("+"*40)
|
ValueError
|
dataset/ETHPy150Open cimatosa/jobmanager/tests/test_jobmanager.py/test_exception
|
3,009 |
def _eintr_retry_call(func, *args):
"""Fixes OSErrors and IOErrors
From: http://code.google.com/p/seascope/source/detail?spec=svn8dbe5e23d41db673727ce90fd338e9a43f8877e8&name=8dbe5e23d41d&r=8dbe5e23d41db673727ce90fd338e9a43f8877e8
IOError added
"""
while True:
try:
return func(*args)
except (__HOLE__, IOError), e: # pragma: no cover
if e.errno == errno.EINTR:
continue
raise
|
OSError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/CLTools/init.py/_eintr_retry_call
|
3,010 |
def _add_tool(path):
# first create classes
tool_name = os.path.basename(path)
if isinstance(tool_name, unicode):
tool_name = tool_name.encode('utf-8')
if not tool_name.endswith(SUFFIX): # pragma: no cover
return
(tool_name, _) = os.path.splitext(tool_name)
if tool_name in cl_tools: # pragma: no cover
debug.critical("Package CLTools already added: '%s'" % tool_name)
try:
conf = json.load(open(path))
except ValueError as exc: # pragma: no cover
debug.critical("Package CLTools could not parse '%s'" % path, exc)
return
def compute(self):
""" 1. read inputs
2. call with inputs
3. set outputs
"""
# add all arguments as an unordered list
args = [self.conf['command']]
file_std = 'options' in self.conf and 'std_using_files' in self.conf['options']
fail_with_cmd = 'options' in self.conf and 'fail_with_cmd' in self.conf['options']
setOutput = [] # (name, File) - set File contents as output for name
open_files = []
stdin = None
kwargs = {}
for type, name, klass, options in self.conf['args']:
type = type.lower()
klass = klass.lower()
if "constant" == type:
flag = 'flag' in options and options['flag']
if flag:
args.append(flag)
if name:
# if flag==name we assume user tried to name a constant
if not name == flag:
args.append('%s%s' % (options.get('prefix', ''), name))
elif "input" == type:
# handle multiple inputs
values = self.force_get_input_list(name)
if values and 'list' == klass:
values = values[0]
klass = options['type'].lower() \
if 'type' in options else 'string'
for value in values:
if 'flag' == klass:
if not value:
continue
if 'flag' in options and options['flag']:
value = options['flag']
else:
# use name as flag
value = name
elif klass in ('file', 'directory', 'path'):
value = value.name
# check for flag and append file name
if not 'flag' == klass and 'flag' in options:
args.append(options['flag'])
value = '%s%s' % (options.get('prefix', ''),
value)
args.append(value)
elif "output" == type:
# output must be a filename but we may convert the result to a string
# create new file
file = self.interpreter.filePool.create_file(
suffix=options.get('suffix', DEFAULTFILESUFFIX))
fname = file.name
if 'prefix' in options:
fname = options['prefix'] + fname
if 'flag' in options:
args.append(options['flag'])
args.append(fname)
if "file" == klass:
self.set_output(name, file)
elif "string" == klass:
setOutput.append((name, file))
else:
raise ValueError
elif "inputoutput" == type:
# handle single file that is both input and output
value = self.get_input(name)
# create copy of infile to operate on
outfile = self.interpreter.filePool.create_file(
suffix=options.get('suffix', DEFAULTFILESUFFIX))
try:
shutil.copyfile(value.name, outfile.name)
except __HOLE__, e: # pragma: no cover
raise ModuleError(self,
"Error copying file '%s': %s" %
(value.name, debug.format_exception(e)))
value = '%s%s' % (options.get('prefix', ''), outfile.name)
# check for flag and append file name
if 'flag' in options:
args.append(options['flag'])
args.append(value)
self.set_output(name, outfile)
if "stdin" in self.conf:
name, type, options = self.conf["stdin"]
type = type.lower()
if self.has_input(name):
value = self.get_input(name)
if "file" == type:
if file_std:
f = open(value.name, 'rb')
else:
f = open(value.name, 'rb')
stdin = f.read()
f.close()
elif "string" == type:
if file_std:
file = self.interpreter.filePool.create_file()
f = open(file.name, 'wb')
f.write(value)
f.close()
f = open(file.name, 'rb')
else:
stdin = value
else: # pragma: no cover
raise ValueError
if file_std:
open_files.append(f)
kwargs['stdin'] = f.fileno()
else:
kwargs['stdin'] = subprocess.PIPE
if "stdout" in self.conf:
if file_std:
name, type, options = self.conf["stdout"]
type = type.lower()
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
if "file" == type:
self.set_output(name, file)
elif "string" == type:
setOutput.append((name, file))
else: # pragma: no cover
raise ValueError
f = open(file.name, 'wb')
open_files.append(f)
kwargs['stdout'] = f.fileno()
else:
kwargs['stdout'] = subprocess.PIPE
if "stderr" in self.conf:
if file_std:
name, type, options = self.conf["stderr"]
type = type.lower()
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
if "file" == type:
self.set_output(name, file)
elif "string" == type:
setOutput.append((name, file))
else: # pragma: no cover
raise ValueError
f = open(file.name, 'wb')
open_files.append(f)
kwargs['stderr'] = f.fileno()
else:
kwargs['stderr'] = subprocess.PIPE
if fail_with_cmd:
return_code = 0
else:
return_code = self.conf.get('return_code', None)
env = {}
# 0. add defaults
# 1. add from configuration
# 2. add from module env
# 3. add from env port
if configuration.check('env'):
try:
for var in configuration.env.split(";"):
key, value = var.split('=')
key = key.strip()
value = value.strip()
if key:
env[key] = value
except Exception, e: # pragma: no cover
raise ModuleError(self,
"Error parsing configuration env: %s" % (
debug.format_exception(e)))
if 'options' in self.conf and 'env' in self.conf['options']:
try:
for var in self.conf['options']['env'].split(";"):
key, value = var.split('=')
key = key.strip()
value = value.strip()
if key:
env[key] = value
except Exception, e: # pragma: no cover
raise ModuleError(self,
"Error parsing module env: %s" % (
debug.format_exception(e)))
if 'options' in self.conf and 'env_port' in self.conf['options']:
for e in self.force_get_input_list('env'):
try:
for var in e.split(';'):
if not var:
continue
key, value = var.split('=')
key = key.strip()
value = value.strip()
if key:
env[key] = value
except Exception, e: # pragma: no cover
raise ModuleError(self,
"Error parsing env port: %s" % (
debug.format_exception(e)))
if env:
kwargs['env'] = dict(os.environ)
kwargs['env'].update(env)
# write to execution provenance
env = ';'.join(['%s=%s'%(k,v) for k,v in env.iteritems()])
self.annotate({'execution_env': env})
if 'dir' in self.conf:
kwargs['cwd'] = self.conf['dir']
process = subprocess.Popen(args, **kwargs)
if file_std:
process.wait()
else:
#if stdin:
# print "stdin:", len(stdin), stdin[:30]
stdout, stderr = _eintr_retry_call(process.communicate, stdin)
#stdout, stderr = process.communicate(stdin)
#if stdout:
# print "stdout:", len(stdout), stdout[:30]
#if stderr:
# print "stderr:", len(stderr), stderr[:30]
if return_code is not None:
if process.returncode != return_code:
raise ModuleError(self, "Command returned %d (!= %d)" % (
process.returncode, return_code))
self.set_output('return_code', process.returncode)
for f in open_files:
f.close()
for name, file in setOutput:
f = open(file.name, 'rb')
self.set_output(name, f.read())
f.close()
if not file_std:
if "stdout" in self.conf:
name, type, options = self.conf["stdout"]
type = type.lower()
if "file" == type:
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
f = open(file.name, 'wb')
f.write(stdout)
f.close()
self.set_output(name, file)
elif "string" == type:
self.set_output(name, stdout)
else: # pragma: no cover
raise ValueError
if "stderr" in self.conf:
name, type, options = self.conf["stderr"]
type = type.lower()
if "file" == type:
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
f = open(file.name, 'wb')
f.write(stderr)
f.close()
self.set_output(name, file)
elif "string" == type:
self.set_output(name, stderr)
else: # pragma: no cover
raise ValueError
# create docstring
d = """This module is a wrapper for the command line tool '%s'""" % \
conf['command']
# create module
M = new_module(CLTools, tool_name, {"compute": compute,
"conf": conf,
"tool_name": tool_name,
"__doc__": d})
reg = vistrails.core.modules.module_registry.get_module_registry()
reg.add_module(M, package=identifiers.identifier,
package_version=identifiers.version)
def to_vt_type(s):
# add recognized types here - default is String
return '(basic:%s)' % \
{'file':'File', 'path':'Path', 'directory': 'Directory',
'flag':'Boolean', 'list':'List',
'float':'Float','integer':'Integer'
}.get(s.lower(), 'String')
# add module ports
if 'stdin' in conf:
name, type, options = conf['stdin']
optional = 'required' not in options
reg.add_input_port(M, name, to_vt_type(type), optional=optional)
if 'stdout' in conf:
name, type, options = conf['stdout']
optional = 'required' not in options
reg.add_output_port(M, name, to_vt_type(type), optional=optional)
if 'stderr' in conf:
name, type, options = conf['stderr']
optional = 'required' not in options
reg.add_output_port(M, name, to_vt_type(type), optional=optional)
if 'options' in conf and 'env_port' in conf['options']:
reg.add_input_port(M, 'env', to_vt_type('string'))
for type, name, klass, options in conf['args']:
optional = 'required' not in options
if 'input' == type.lower():
reg.add_input_port(M, name, to_vt_type(klass), optional=optional)
elif 'output' == type.lower():
reg.add_output_port(M, name, to_vt_type(klass), optional=optional)
elif 'inputoutput' == type.lower():
reg.add_input_port(M, name, to_vt_type('file'), optional=optional)
reg.add_output_port(M, name, to_vt_type('file'), optional=optional)
reg.add_output_port(M, 'return_code', to_vt_type('integer'))
cl_tools[tool_name] = M
|
IOError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/CLTools/init.py/_add_tool
|
3,011 |
def assert_unordered_result(self, result, cls, *expected):
"""As assert_result, but the order of objects is not considered.
The algorithm is very expensive but not a big deal for the small
numbers of rows that the test suite manipulates.
"""
class immutabledict(dict):
def __hash__(self):
return id(self)
found = util.IdentitySet(result)
expected = set([immutabledict(e) for e in expected])
for wrong in util.itertools_filterfalse(lambda o:
isinstance(o, cls), found):
fail('Unexpected type "%s", expected "%s"' % (
type(wrong).__name__, cls.__name__))
if len(found) != len(expected):
fail('Unexpected object count "%s", expected "%s"' % (
len(found), len(expected)))
NOVALUE = object()
def _compare_item(obj, spec):
for key, value in spec.items():
if isinstance(value, tuple):
try:
self.assert_unordered_result(
getattr(obj, key), value[0], *value[1])
except __HOLE__:
return False
else:
if getattr(obj, key, NOVALUE) != value:
return False
return True
for expected_item in expected:
for found_item in found:
if _compare_item(found_item, expected_item):
found.remove(found_item)
break
else:
fail(
"Expected %s instance with attributes %s not found." % (
cls.__name__, repr(expected_item)))
return True
|
AssertionError
|
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/testing/assertions.py/AssertsExecutionResults.assert_unordered_result
|
3,012 |
def parse_permission_set(input):
"""Lookup a permission set name in the defined permissions.
Requires a Flask app context.
"""
if isinstance(input, basestring):
try:
return current_acl_manager.permission_sets[input]
except __HOLE__:
raise ValueError('unknown permission set %r' % input)
return input
|
KeyError
|
dataset/ETHPy150Open mikeboers/Flask-ACL/flask_acl/permission.py/parse_permission_set
|
3,013 |
def test_authenticated_fail_kwargs(self):
try:
self.proxy20.jsonrpc.testAuth(
username='osammeh', password='password', string='this is a string')
except __HOLE__ as e:
self.assertEquals(e.args[1], 401)
else:
self.assert_(False, 'Didnt return status code 401 on unauthorized access')
|
IOError
|
dataset/ETHPy150Open samuraisam/django-json-rpc/test/test.py/JSONRPCTest.test_authenticated_fail_kwargs
|
3,014 |
def test_authenticated_fail(self):
try:
self.proxy10.jsonrpc.testAuth(
'osammeh', 'password', 'this is a string')
except __HOLE__ as e:
self.assertEquals(e.args[1], 401)
else:
self.assert_(False, 'Didnt return status code 401 on unauthorized access')
|
IOError
|
dataset/ETHPy150Open samuraisam/django-json-rpc/test/test.py/JSONRPCTest.test_authenticated_fail
|
3,015 |
def close(self):
"""closes (terminates) the SSH tunnel"""
if not self.is_open():
return
self.proc.stdin.write(BYTES_LITERAL("foo\n\n\n"))
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.stderr.close()
try:
self.proc.kill()
except __HOLE__:
if signal:
os.kill(self.proc.pid, signal.SIGTERM)
self.proc.wait()
self.proc = None
|
AttributeError
|
dataset/ETHPy150Open sccn/SNAP/src/rpyc/utils/ssh.py/SshTunnel.close
|
3,016 |
def set_fqdn(self):
try:
if self.label == '':
self.fqdn = self.domain.name
else:
self.fqdn = "{0}.{1}".format(self.label,
self.domain.name)
except __HOLE__:
return
|
ObjectDoesNotExist
|
dataset/ETHPy150Open rtucker-mozilla/mozilla_inventory/mozdns/models.py/MozdnsRecord.set_fqdn
|
3,017 |
def check_for_delegation(self):
"""
If an object's domain is delegated it should not be able to
be changed. Delegated domains cannot have objects created in
them.
"""
try:
if not self.domain.delegated:
return
except __HOLE__:
return
if not self.pk: # We don't exist yet.
raise ValidationError("No objects can be created in the {0}"
"domain. It is delegated."
.format(self.domain.name))
|
ObjectDoesNotExist
|
dataset/ETHPy150Open rtucker-mozilla/mozilla_inventory/mozdns/models.py/MozdnsRecord.check_for_delegation
|
3,018 |
def _add_choice_input(self):
try:
choice = self._choices.pop(0)
except __HOLE__:
choice = self.default_choice
if choice == importer.action.APPLY:
self.io.addinput(u'A')
elif choice == importer.action.ASIS:
self.io.addinput(u'U')
elif choice == importer.action.ALBUMS:
self.io.addinput(u'G')
elif choice == importer.action.TRACKS:
self.io.addinput(u'T')
elif choice == importer.action.SKIP:
self.io.addinput(u'S')
elif isinstance(choice, int):
self.io.addinput(u'M')
self.io.addinput(unicode(choice))
self._add_choice_input()
else:
raise Exception(u'Unknown choice %s' % choice)
|
IndexError
|
dataset/ETHPy150Open beetbox/beets/test/test_ui_importer.py/TestTerminalImportSession._add_choice_input
|
3,019 |
def main(world_folder, start=None, stop=None):
if (not os.path.exists(world_folder)):
print("No such folder as "+world_folder)
return 2 # ENOENT
regions = glob.glob(os.path.join(world_folder,'region','*.mcr'))
block_data_totals = [[0]*16 for i in range(256)] # up to 16 data numbers in 256 block IDs
try:
for filename in regions:
region_totals = process_region_file(filename, start, stop)
for i, data in enumerate(region_totals):
for j, total in enumerate(data):
block_data_totals[i][j] += total
except __HOLE__:
print_results(block_data_totals)
return 75 # EX_TEMPFAIL
print_results(block_data_totals)
return 0 # EX_OK
|
KeyboardInterrupt
|
dataset/ETHPy150Open twoolie/NBT/examples/block_analysis.py/main
|
3,020 |
def _pagination(self, collection, path, **params):
if params.get('page_reverse', False):
linkrel = 'previous'
else:
linkrel = 'next'
next = True
while next:
res = self.get(path, params=params)
yield res
next = False
try:
for link in res['%s_links' % collection]:
if link['rel'] == linkrel:
query_str = urlparse.urlparse(link['href']).query
params = urlparse.parse_qs(query_str)
next = True
break
except __HOLE__:
break
|
KeyError
|
dataset/ETHPy150Open openstack/python-group-based-policy-client/gbpclient/v2_0/client.py/Client._pagination
|
3,021 |
def __getitem__(self, index):
try:
while index >= len(self._list):
self._list.append(self._iter.next())
except __HOLE__:
pass
return self._list[index]
|
StopIteration
|
dataset/ETHPy150Open gooli/termenu/examples/paged_menu.py/IteratorList.__getitem__
|
3,022 |
def _req_json_rpc(url, method, *args, **kwargs):
"""Perform one JSON RPC operation."""
data = json.dumps({'method': method, 'params': args})
try:
res = requests.post(url, data=data, timeout=5, **kwargs)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if res.status_code == 200:
try:
result = res.json()
except ValueError:
# If json decoder could not parse the response
_LOGGER.exception("Failed to parse response from luci")
return
try:
return result['result']
except __HOLE__:
_LOGGER.exception("No result in response from luci")
return
elif res.status_code == 401:
# Authentication error
_LOGGER.exception(
"Failed to authenticate, "
"please check your username and password")
return
else:
_LOGGER.error("Invalid response from luci: %s", res)
|
KeyError
|
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/device_tracker/luci.py/_req_json_rpc
|
3,023 |
def print_row_progress(val):
try:
cprint(' %s' % val, 'cyan', end=' ')
except __HOLE__:
print(' %s' % val),
sys.stdout.flush()
|
NameError
|
dataset/ETHPy150Open philipsoutham/py-mysql2pgsql/mysql2pgsql/lib/__init__.py/print_row_progress
|
3,024 |
def print_start_table(val):
try:
cprint(val, 'magenta')
except __HOLE__:
print(val)
|
NameError
|
dataset/ETHPy150Open philipsoutham/py-mysql2pgsql/mysql2pgsql/lib/__init__.py/print_start_table
|
3,025 |
def print_table_actions(val):
try:
cprint(' %s' % val, 'green')
except __HOLE__:
print(' %s' % val)
|
NameError
|
dataset/ETHPy150Open philipsoutham/py-mysql2pgsql/mysql2pgsql/lib/__init__.py/print_table_actions
|
3,026 |
def print_red(val):
try:
cprint(val, 'red')
except __HOLE__:
print(val)
|
NameError
|
dataset/ETHPy150Open philipsoutham/py-mysql2pgsql/mysql2pgsql/lib/__init__.py/print_red
|
3,027 |
def test_fileno(self):
req = urllib2.Request("http://www.example.com")
opener = urllib2.build_opener()
res = opener.open(req)
try:
res.fileno()
except __HOLE__:
self.fail("HTTPResponse object should return a valid fileno")
finally:
res.close()
|
AttributeError
|
dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/test/test_urllib2net.py/OtherNetworkTests.test_fileno
|
3,028 |
def become_daemon(our_home_dir='.', out_log='/dev/null',
err_log='/dev/null', umask=0o022):
"Robustly turn into a UNIX daemon, running in our_home_dir."
# First fork
try:
if os.fork() > 0:
sys.exit(0) # kill off parent
except __HOLE__ as e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid()
os.chdir(our_home_dir)
os.umask(umask)
# Second fork
try:
if os.fork() > 0:
os._exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
os._exit(1)
si = open('/dev/null', 'r')
so = open(out_log, 'a+', 0)
se = open(err_log, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Set custom file descriptors so that they get proper buffering.
sys.stdout, sys.stderr = so, se
|
OSError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/utils/daemonize.py/become_daemon
|
3,029 |
def removeSelectedDateListener(self, listener):
try:
self.selectedDateListeners.remove(listener)
except __HOLE__:
self.selectedDObjListeners.remove(listener)
|
ValueError
|
dataset/ETHPy150Open pyjs/pyjs/pyjswidgets/pyjamas/ui/Calendar.py/DateSelectedHandler.removeSelectedDateListener
|
3,030 |
def onCellClicked(self, grid, row, col):
if row == 0:
return
text = grid.getText(row, col).strip()
if text == "":
return
try:
selectedDay = int(text)
except __HOLE__, e:
return
self.fireDateSelectedEvent(date(
self.currentYear,
self.currentMonth,
selectedDay,
))
self.setVisible(False)
|
ValueError
|
dataset/ETHPy150Open pyjs/pyjs/pyjswidgets/pyjamas/ui/Calendar.py/Calendar.onCellClicked
|
3,031 |
def getDate(self):
""" returns datetime.date object or None if empty/unparsable by current format"""
_sdate = self.tbox.getText()
try:
return datetime.strptime(_sdate, self.format).date()
except __HOLE__:
return None
|
ValueError
|
dataset/ETHPy150Open pyjs/pyjs/pyjswidgets/pyjamas/ui/Calendar.py/DateField.getDate
|
3,032 |
def onShowCalendar(self, sender):
txt = self.tbox.getText().strip()
try:
if txt:
_d = datetime.strptime(txt, self.format).date()
self.calendar.setDate(_d)
except __HOLE__: pass
p = CalendarPopup(self.calendar)
x = self.tbox.getAbsoluteLeft() + 10
y = self.tbox.getAbsoluteTop() + 10
p.setPopupPosition(x, y)
p.show()
|
ValueError
|
dataset/ETHPy150Open pyjs/pyjs/pyjswidgets/pyjamas/ui/Calendar.py/DateField.onShowCalendar
|
3,033 |
def _request(self, func, url, *args, **kwargs):
"""
Make a generic request, adding in any proxy defined by the instance.
Raises a ``requests.HTTPError`` if the response status isn't 200, and
raises a :class:`BitstampError` if the response contains a json encoded
error message.
"""
return_json = kwargs.pop('return_json', False)
url = self.api_url + url
response = func(url, *args, **kwargs)
if 'proxies' not in kwargs:
kwargs['proxies'] = self.proxydict
# Check for error, raising an exception if appropriate.
response.raise_for_status()
try:
json_response = response.json()
except __HOLE__:
json_response = None
if isinstance(json_response, dict):
error = json_response.get('error')
if error:
raise BitstampError(error)
if return_json:
if json_response is None:
raise BitstampError(
"Could not decode json for: " + response.text)
return json_response
return response
|
ValueError
|
dataset/ETHPy150Open kmadac/bitstamp-python-client/bitstamp/client.py/BaseClient._request
|
3,034 |
def __log(self, level, msg):
from . import testexecutor
try:
running_test_fixture = testexecutor.current_executor().get_property("running_test_fixture")
except __HOLE__:
pconsole.write_line("[%s] %s" % (logging.getLevelName(level), msg))
else:
running_test_fixture.logs.append({"level": logging.getLevelName(level).lower(), "message": str(msg)})
if config.get_option("verbose"):
# output to pconsole
message = "[%s] %s" % (running_test_fixture.full_name, msg)
pconsole.write_line(message)
|
AttributeError
|
dataset/ETHPy150Open KarlGong/ptest/ptest/plogger.py/PReporter.__log
|
3,035 |
def subclass_of(cls):
def check_class(class_path):
path_parts = class_path.split('.')
if len(path_parts) < 2:
msg = "requires a fully-qualified class name"
raise argparse.ArgumentTypeError(msg)
class_name = path_parts[-1]
module_name = '.'.join(path_parts[0:-1])
try:
m = importlib.import_module(module_name)
except __HOLE__:
msg = "unable to import %s" % module_name
raise argparse.ArgumentTypeError(msg)
kls = getattr(m, class_name, None)
if kls is None:
msg = "class %s not a member of module %s" % (class_name,
module_name)
raise argparse.ArgumentTypeError(msg)
if not issubclass(kls, cls):
msg = "class %s is not a subclass of %s" % (class_name,
cls.__name__)
raise argparse.ArgumentTypeError(msg)
return kls
return check_class
|
ImportError
|
dataset/ETHPy150Open zmap/ztag/ztag/classargs.py/subclass_of
|
3,036 |
@register.assignment_tag(takes_context=True)
def get_contact_fields(context):
try:
contact_vals = ContactPage.objects.values(
'name_organization', 'address_1',
'address_2', 'city', 'country', 'telephone', 'post_code'
)[0]
return contact_vals
except __HOLE__:
return {}
|
IndexError
|
dataset/ETHPy150Open chrisdev/wagtail-cookiecutter-foundation/{{cookiecutter.repo_name}}/utils/templatetags/{{cookiecutter.repo_name}}_utils.py/get_contact_fields
|
3,037 |
def draw(cursor, out=sys.stdout, paginate=True, max_fieldsize=100):
"""Render an result set as an ascii-table.
Renders an SQL result set to `out`, some file-like object.
Assumes that we can determine the current terminal height and
width via the termsize module.
Args:
cursor: An iterable of rows. Each row is a list or tuple
with index access to each cell. The cursor
has a list/tuple of headings via cursor.keys().
out: File-like object.
"""
def heading_line(sizes):
for size in sizes:
write_out('+' + '-' * (size + 2), out)
write_out('+\n', out)
def draw_headings(headings, sizes):
heading_line(sizes)
for idx, size in enumerate(sizes):
fmt = '| %%-%is ' % size
write_out((fmt % headings[idx]), out)
write_out('|\n', out)
heading_line(sizes)
cols, lines = termsize()
headings = list(cursor.keys())
heading_sizes = [len(str(x)) for x in headings]
if paginate:
cursor = isublists(cursor, lines - 4)
# else we assume cursor arrive here pre-paginated
for screenrows in cursor:
sizes = heading_sizes[:]
for row in screenrows:
if row is None:
break
for idx, value in enumerate(row):
if not isinstance(value, string_types):
value = str(value)
size = max(sizes[idx], len(value))
sizes[idx] = min(size, max_fieldsize)
draw_headings(headings, sizes)
for rw in screenrows:
if rw is None:
break # from isublists impl
for idx, size in enumerate(sizes):
fmt = '| %%-%is ' % size
if idx < len(rw):
value = rw[idx]
if not isinstance(value, string_types):
value = str(value)
if len(value) > max_fieldsize:
value = value[:max_fieldsize - 5] + '[...]'
value = value.replace('\n', '^')
value = value.replace('\r', '^').replace('\t', ' ')
value = fmt % value
try:
value = value.encode('utf-8', 'replace')
except __HOLE__:
value = fmt % '?'
write_out(value, out)
write_out('|\n', out)
if not paginate:
heading_line(sizes)
write_out('|\n', out)
out.stdin.flush()
out.stdin.close()
|
UnicodeDecodeError
|
dataset/ETHPy150Open kristianperkins/x_x/x_x/asciitable.py/draw
|
3,038 |
def DeleteBlob(self, blob_key):
"""Delete blob data from disk.
Deleting an unknown blob will not raise an error.
Args:
blob_key: Blob-key of existing blob to delete.
"""
try:
os.remove(self._FileForBlob(blob_key))
except __HOLE__, e:
if e.errno != errno.ENOENT:
raise e
|
OSError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/api/blobstore/file_blob_storage.py/FileBlobStorage.DeleteBlob
|
3,039 |
def run_query(self, query):
try:
error = None
code = compile_restricted(query, '<string>', 'exec')
safe_builtins["_write_"] = self.custom_write
safe_builtins["__import__"] = self.custom_import
safe_builtins["_getattr_"] = getattr
safe_builtins["getattr"] = getattr
safe_builtins["_setattr_"] = setattr
safe_builtins["setattr"] = setattr
safe_builtins["_getitem_"] = self.custom_get_item
safe_builtins["_getiter_"] = self.custom_get_iter
safe_builtins["_print_"] = self._custom_print
restricted_globals = dict(__builtins__=safe_builtins)
restricted_globals["get_query_result"] = self.get_query_result
restricted_globals["execute_query"] = self.execute_query
restricted_globals["add_result_column"] = self.add_result_column
restricted_globals["add_result_row"] = self.add_result_row
restricted_globals["disable_print_log"] = self._custom_print.disable
restricted_globals["enable_print_log"] = self._custom_print.enable
# Supported data types
restricted_globals["TYPE_DATETIME"] = TYPE_DATETIME
restricted_globals["TYPE_BOOLEAN"] = TYPE_BOOLEAN
restricted_globals["TYPE_INTEGER"] = TYPE_INTEGER
restricted_globals["TYPE_STRING"] = TYPE_STRING
restricted_globals["TYPE_DATE"] = TYPE_DATE
restricted_globals["TYPE_FLOAT"] = TYPE_FLOAT
restricted_globals["sorted"] = sorted
restricted_globals["reversed"] = reversed
restricted_globals["min"] = min
restricted_globals["max"] = max
# TODO: Figure out the best way to have a timeout on a script
# One option is to use ETA with Celery + timeouts on workers
# And replacement of worker process every X requests handled.
exec(code) in restricted_globals, self._script_locals
result = self._script_locals['result']
result['log'] = self._custom_print.lines
json_data = json.dumps(result)
except __HOLE__:
error = "Query cancelled by user."
json_data = None
except Exception as e:
error = str(e)
json_data = None
return json_data, error
|
KeyboardInterrupt
|
dataset/ETHPy150Open getredash/redash/redash/query_runner/python.py/Python.run_query
|
3,040 |
def __getattr__(self, name):
try:
return self[name]
except __HOLE__:
raise AttributeError(name)
|
KeyError
|
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/data.py/Record.__getattr__
|
3,041 |
def _get_included_directives(self, block):
"""Returns array with the "include" directives expanded out by
concatenating the contents of the included file to the block.
:param list block:
:rtype: list
"""
result = list(block) # Copy the list to keep self.parsed idempotent
for directive in block:
if _is_include_directive(directive):
included_files = glob.glob(
self.abs_path(directive[1]))
for incl in included_files:
try:
result.extend(self.parsed[incl])
except __HOLE__:
pass
return result
|
KeyError
|
dataset/ETHPy150Open letsencrypt/letsencrypt/certbot-nginx/certbot_nginx/parser.py/NginxParser._get_included_directives
|
3,042 |
def _parse_files(self, filepath, override=False):
"""Parse files from a glob
:param str filepath: Nginx config file path
:param bool override: Whether to parse a file that has been parsed
:returns: list of parsed tree structures
:rtype: list
"""
files = glob.glob(filepath)
trees = []
for item in files:
if item in self.parsed and not override:
continue
try:
with open(item) as _file:
parsed = nginxparser.load(_file)
self.parsed[item] = parsed
trees.append(parsed)
except __HOLE__:
logger.warn("Could not open file: %s", item)
except pyparsing.ParseException:
logger.debug("Could not parse file: %s", item)
return trees
|
IOError
|
dataset/ETHPy150Open letsencrypt/letsencrypt/certbot-nginx/certbot_nginx/parser.py/NginxParser._parse_files
|
3,043 |
def filedump(self, ext='tmp'):
"""Dumps parsed configurations into files.
:param str ext: The file extension to use for the dumped files. If
empty, this overrides the existing conf files.
"""
for filename in self.parsed:
tree = self.parsed[filename]
if ext:
filename = filename + os.path.extsep + ext
try:
logger.debug('Dumping to %s:\n%s', filename, nginxparser.dumps(tree))
with open(filename, 'w') as _file:
nginxparser.dump(tree, _file)
except __HOLE__:
logger.error("Could not open file for writing: %s", filename)
|
IOError
|
dataset/ETHPy150Open letsencrypt/letsencrypt/certbot-nginx/certbot_nginx/parser.py/NginxParser.filedump
|
3,044 |
def authority(request, authority_id):
"""
View for individual Authority entries.
"""
authority = Authority.objects.get(id=authority_id)
if not authority.public:
return HttpResponseForbidden()
# Some authority entries are deleted. These should be hidden from public
# view.
if authority.record_status == 'DL':
return Http404("No such Authority")
# If the user has been redirected from another Authority entry, this should
# be indicated in the view.
redirect_from_id = request.GET.get('redirect_from')
if redirect_from_id:
redirect_from = Authority.objects.get(pk=redirect_from_id)
else:
redirect_from = None
# There are several authority entries that redirect to other entries,
# usually because the former is a duplicate of the latter.
if authority.record_status == 'RD' and authority.redirect_to is not None:
redirect_kwargs = {'authority_id': authority.redirect_to.id}
base_url = reverse('authority', kwargs=redirect_kwargs)
redirect_url = base_url + '?redirect_from={0}'.format(authority.id)
return HttpResponseRedirect(redirect_url)
template = loader.get_template('isisdata/authority.html')
show_nr = 3
related_citations_author = ACRelation.objects.filter(authority=authority,
type_controlled__in=['AU'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_author_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['AU']).distinct('citation_id').count()
related_citations_editor = ACRelation.objects.filter(authority=authority,
type_controlled__in=['ED'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_editor_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['ED'], citation__public=True).distinct('citation_id').count()
related_citations_advisor = ACRelation.objects.filter(authority=authority,
type_controlled__in=['AD'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_advisor_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['AD'], citation__public=True).distinct('citation_id').count()
related_citations_contributor = ACRelation.objects.filter(authority=authority,
type_controlled__in=['CO'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_contributor_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['CO'], citation__public=True).distinct('citation_id').count()
related_citations_translator = ACRelation.objects.filter(authority=authority,
type_controlled__in=['TR'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_translator_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['TR'], citation__public=True).distinct('citation_id').count()
related_citations_subject = ACRelation.objects.filter(authority=authority,
type_controlled__in=['SU'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_subject_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['SU'], citation__public=True).distinct('citation_id').count()
related_citations_category = ACRelation.objects.filter(authority=authority,
type_controlled__in=['CA'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_category_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['CA'], citation__public=True).distinct('citation_id').count()
related_citations_publisher = ACRelation.objects.filter(authority=authority,
type_controlled__in=['PU'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_publisher_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['PU'], citation__public=True).distinct('citation_id').count()
related_citations_school = ACRelation.objects.filter(authority=authority,
type_controlled__in=['SC'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_school_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['SC'], citation__public=True).distinct('citation_id').count()
related_citations_institution = ACRelation.objects.filter(authority=authority,
type_controlled__in=['IN'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_institution_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['IN'], citation__public=True).distinct('citation_id').count()
related_citations_meeting = ACRelation.objects.filter(authority=authority,
type_controlled__in=['ME'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_meeting_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['ME'], citation__public=True).distinct('citation_id').count()
related_citations_periodical = ACRelation.objects.filter(authority=authority,
type_controlled__in=['PE'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_periodical_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['PE'], citation__public=True).distinct('citation_id').count()
related_citations_book_series = ACRelation.objects.filter(authority=authority,
type_controlled__in=['BS'], citation__public=True).order_by('-citation__publication_date')[:show_nr]
related_citations_book_series_count = ACRelation.objects.filter(authority=authority,
type_controlled__in=['BS'], citation__public=True).distinct('citation_id').count()
# Location of authority in REST API
api_view = reverse('authority-detail', args=[authority.id], request=request)
# Provide progression through search results, if present.
last_query = request.GET.get('last_query', None) #request.session.get('last_query', None)
query_string = request.GET.get('query_string', None)
fromsearch = request.GET.get('fromsearch', False)
if query_string:
query_string = query_string.encode('ascii','ignore')
search_key = base64.b64encode(query_string) #request.session.get('search_key', None)
else:
search_key = None
# This is the database cache.
user_cache = caches['default']
search_results = user_cache.get('search_results_authority_' + str(search_key))
# make sure we have a session key
if hasattr(request, 'session') and not request.session.session_key:
request.session.save()
request.session.modified = True
session_id = request.session.session_key
page_authority = user_cache.get(session_id + '_page_authority', None)
if search_results and fromsearch and page_authority:
search_count = search_results.count()
prev_search_result = None
if (page_authority > 1):
prev_search_result = search_results[(page_authority - 1)*20 - 1]
# if we got to the last result of the previous page we need to count down the page number
if prev_search_result == 'isisdata.authority.' + authority_id:
page_authority = page_authority - 1
user_cache.set(session_id + '_page_authority', page_authority)
search_results_page = search_results[(page_authority - 1)*20:page_authority*20 + 2]
try:
search_index = search_results_page.index('isisdata.authority.' + authority_id) + 1 # +1 for display.
if search_index == 21:
user_cache.set(session_id + '_page_authority', page_authority+1)
except (IndexError, ValueError):
search_index = None
try:
search_next = search_results_page[search_index]
except (IndexError, ValueError, TypeError):
search_next = None
try:
search_previous = search_results_page[search_index - 2]
if search_index - 2 == -1:
search_previous = prev_search_result
# !! Why are we catching all of these errors?
except (__HOLE__, ValueError, AssertionError, TypeError):
search_previous = None
if search_index:
search_current = search_index + (20* (page_authority - 1))
else:
search_current = None
else:
search_index = None
search_next = None
search_previous = None
search_current = None
search_count = None
context = RequestContext(request, {
'authority_id': authority_id,
'authority': authority,
'related_citations_author': related_citations_author,
'related_citations_author_count': related_citations_author_count,
'related_citations_editor': related_citations_editor,
'related_citations_editor_count': related_citations_editor_count,
'related_citations_advisor': related_citations_advisor,
'related_citations_advisor_count': related_citations_advisor_count,
'related_citations_contributor': related_citations_contributor,
'related_citations_contributor_count': related_citations_contributor_count,
'related_citations_translator': related_citations_translator,
'related_citations_translator_count': related_citations_translator_count,
'related_citations_subject': related_citations_subject,
'related_citations_subject_count': related_citations_subject_count,
'related_citations_category': related_citations_category,
'related_citations_category_count': related_citations_category_count,
'related_citations_publisher': related_citations_publisher,
'related_citations_publisher_count': related_citations_publisher_count,
'related_citations_school': related_citations_school,
'related_citations_school_count': related_citations_school_count,
'related_citations_institution': related_citations_institution,
'related_citations_institution_count': related_citations_institution_count,
'related_citations_meeting': related_citations_meeting,
'related_citations_meeting_count': related_citations_meeting_count,
'related_citations_periodical': related_citations_periodical,
'related_citations_periodical_count': related_citations_periodical_count,
'related_citations_book_series': related_citations_book_series,
'related_citations_book_series_count': related_citations_book_series_count,
'source_instance_id': authority_id,
'source_content_type': ContentType.objects.get(model='authority').id,
'api_view': api_view,
'redirect_from': redirect_from,
'search_results': search_results,
'search_index': search_index,
'search_next': search_next,
'search_previous': search_previous,
'search_current': search_current,
'search_count': search_count,
'fromsearch': fromsearch,
'last_query': last_query,
'query_string': query_string,
})
return HttpResponse(template.render(context))
|
IndexError
|
dataset/ETHPy150Open upconsulting/IsisCB/isiscb/isisdata/views.py/authority
|
3,045 |
def citation(request, citation_id):
"""
View for individual citation record.
"""
template = loader.get_template('isisdata/citation.html')
citation = get_object_or_404(Citation, pk=citation_id)
if not citation.public:
return HttpResponseForbidden()
# Some citations are deleted. These should be hidden from public view.
if citation.status_of_record == 'DL':
return Http404("No such Citation")
authors = citation.acrelation_set.filter(type_controlled__in=['AU', 'CO', 'ED'], citation__public=True)
subjects = citation.acrelation_set.filter(Q(type_controlled__in=['SU'], citation__public=True) & ~Q(authority__type_controlled__in=['GE', 'TI'], citation__public=True))
persons = citation.acrelation_set.filter(type_broad_controlled__in=['PR'], citation__public=True)
categories = citation.acrelation_set.filter(Q(type_controlled__in=['CA']), citation__public=True)
query_time = Q(type_controlled__in=['TI'], citation__public=True) | (Q(type_controlled__in=['SU'], citation__public=True) & Q(authority__type_controlled__in=['TI'], citation__public=True))
time_periods = citation.acrelation_set.filter(query_time)
query_places = Q(type_controlled__in=['SU'], citation__public=True) & Q(authority__type_controlled__in=['GE'], citation__public=True)
places = citation.acrelation_set.filter(query_places)
related_citations_ic = CCRelation.objects.filter(subject_id=citation_id, type_controlled='IC', object__public=True)
related_citations_inv_ic = CCRelation.objects.filter(object_id=citation_id, type_controlled='IC', subject__public=True)
related_citations_isa = CCRelation.objects.filter(subject_id=citation_id, type_controlled='ISA', object__public=True)
related_citations_inv_isa = CCRelation.objects.filter(object_id=citation_id, type_controlled='ISA', subject__public=True)
query = Q(subject_id=citation_id, type_controlled='RO', object__public=True) | Q(object_id=citation_id, type_controlled='RB', subject__public=True)
related_citations_ro = CCRelation.objects.filter(query)
related_citations_rb = CCRelation.objects.filter(subject_id=citation_id, type_controlled='RB', object__public=True)
related_citations_re = CCRelation.objects.filter(subject_id=citation_id, type_controlled='RE', object__public=True)
related_citations_inv_re = CCRelation.objects.filter(object_id=citation_id, type_controlled='RE', subject__public=True)
related_citations_as = CCRelation.objects.filter(subject_id=citation_id, type_controlled='AS', object__public=True)
properties = citation.acrelation_set.exclude(type_controlled__in=['AU', 'ED', 'CO', 'SU', 'CA'])
properties_map = defaultdict(list)
for prop in properties:
properties_map[prop.type_controlled] += [prop]
# Location of citation in REST API
api_view = reverse('citation-detail', args=[citation.id], request=request)
# Provide progression through search results, if present.
# make sure we have a session key
if hasattr(request, 'session') and not request.session.session_key:
request.session.save()
request.session.modified = True
session_id = request.session.session_key
fromsearch = request.GET.get('fromsearch', False)
#search_key = request.session.get('search_key', None)
last_query = request.GET.get('last_query', None) #request.session.get('last_query', None)
query_string = request.GET.get('query_string', None)
if query_string:
query_string = query_string.encode('ascii','ignore')
search_key = base64.b64encode(last_query)
# search_key = base64.b64encode(query_string) #request.session.get('search_key', None)
else:
search_key = None
user_cache = caches['default']
search_results = user_cache.get('search_results_citation_' + str(search_key))
page_citation = user_cache.get(session_id + '_page_citation', None) #request.session.get('page_citation', None)
if search_results and fromsearch and page_citation:
search_count = search_results.count()
prev_search_result = None
# Only display the "previous" link if we are on page 2+.
if page_citation > 1:
prev_search_result = search_results[(page_citation - 1)*20 - 1]
# If we got to the last result of the previous page we need to count
# down the page number.
if prev_search_result == 'isisdata.citation.' + citation_id:
page_citation = page_citation - 1
user_cache.set(session_id + '_page_citation', page_citation)
search_results_page = search_results[(page_citation - 1)*20:page_citation*20 + 2]
try:
search_index = search_results_page.index(citation_id) + 1 # +1 for display.
if search_index == 21:
user_cache.set(session_id + '_page_citation', page_citation+1)
except (IndexError, ValueError):
search_index = None
try:
search_next = search_results_page[search_index]
except (IndexError, ValueError, __HOLE__):
search_next = None
try:
search_previous = search_results_page[search_index - 2]
if search_index - 2 == -1:
search_previous = prev_search_result
except (IndexError, ValueError, AssertionError, TypeError):
search_previous = None
if search_index:
search_current = search_index + (20* (page_citation - 1))
else:
search_current = None
else:
search_index = None
search_next = None
search_previous = None
search_current = None
search_count = 0
#last_query = request.session.get('last_query', None)
context = RequestContext(request, {
'citation_id': citation_id,
'citation': citation,
'authors': authors,
'properties_map': properties,
'subjects': subjects,
'persons': persons,
'categories': categories,
'time_periods': time_periods,
'places': places,
'source_instance_id': citation_id,
'source_content_type': ContentType.objects.get(model='citation').id,
'related_citations_ic': related_citations_ic,
'related_citations_inv_ic': related_citations_inv_ic,
'related_citations_rb': related_citations_rb,
'related_citations_isa': related_citations_isa,
'related_citations_inv_isa': related_citations_inv_isa,
'related_citations_ro': related_citations_ro,
'related_citations_re': related_citations_re,
'related_citations_inv_re': related_citations_inv_re,
'related_citations_as': related_citations_as,
'api_view': api_view,
'search_results': search_results,
'search_index': search_index,
'search_next': search_next,
'search_previous': search_previous,
'search_current': search_current,
'search_count': search_count,
'fromsearch': fromsearch,
'last_query': last_query,
'query_string': query_string,
})
return HttpResponse(template.render(context))
|
TypeError
|
dataset/ETHPy150Open upconsulting/IsisCB/isiscb/isisdata/views.py/citation
|
3,046 |
def build_page(self):
"""
From haystacks SearchView:
Paginates the results appropriately.
In case someone does not want to use Django's built-in pagination, it
should be a simple matter to override this method to do what they would
like.
"""
try:
page_no_authority = int(self.request.GET.get('page_authority', 1))
except (TypeError, __HOLE__):
raise Http404("Not a valid number for page.")
try:
page_no_citation= int(self.request.GET.get('page_citation', 1))
except (TypeError, ValueError):
raise Http404("Not a valid number for page.")
if page_no_authority < 1:
raise Http404("Pages should be 1 or greater.")
if page_no_citation < 1:
raise Http404("Pages should be 1 or greater.")
start_offset_authority = (page_no_authority - 1) * self.results_per_page
start_offset_citation = (page_no_citation - 1) * self.results_per_page
if isinstance(self.queryset, EmptySearchQuerySet):
self.queryset[0:self.results_per_page]
paginator_authority = Paginator(self.queryset, self.results_per_page)
paginator_citation = Paginator(self.queryset, self.results_per_page)
else:
self.queryset['citation'][start_offset_citation:start_offset_citation + self.results_per_page]
self.queryset['authority'][start_offset_authority:start_offset_authority+ self.results_per_page]
paginator_authority = Paginator(self.queryset['authority'], self.results_per_page)
paginator_citation = Paginator(self.queryset['citation'], self.results_per_page)
try:
page_authority = paginator_authority.page(page_no_authority)
except InvalidPage:
try:
page_authority = paginator_authority.page(1)
except InvalidPage:
raise Http404("No such page!")
try:
page_citation = paginator_citation.page(page_no_citation)
except InvalidPage:
try:
page_citation = paginator_citation.page(1)
except InvalidPage:
raise Http404("No such page!")
return ({'authority':paginator_authority, 'citation':paginator_citation}, {'authority':page_authority, 'citation':page_citation})
|
ValueError
|
dataset/ETHPy150Open upconsulting/IsisCB/isiscb/isisdata/views.py/IsisSearchView.build_page
|
3,047 |
@vary_on_headers('Authorization')
def __call__(self, request, *args, **kwargs):
"""
NB: Sends a `Vary` header so we don't cache requests
that are different (OAuth stuff in `Authorization` header.)
"""
rm = request.method.upper()
# Django's internal mechanism doesn't pick up
# PUT request, so we trick it a little here.
if rm == "PUT":
coerce_put_post(request)
actor, anonymous = self.authenticate(request, rm)
if anonymous is CHALLENGE:
return actor()
else:
handler = actor
# Translate nested datastructs into `request.data` here.
if rm in ('POST', 'PUT'):
try:
translate_mime(request)
except MimerDataException:
return rc.BAD_REQUEST
if not hasattr(request, 'data'):
if rm == 'POST':
request.data = request.POST
else:
request.data = request.PUT
if not rm in handler.allowed_methods:
return HttpResponseNotAllowed(handler.allowed_methods)
meth = getattr(handler, self.callmap.get(rm, ''), None)
if not meth:
raise Http404
# Support emitter both through (?P<emitter_format>) and ?format=emitter.
em_format = self.determine_emitter(request, *args, **kwargs)
kwargs.pop('emitter_format', None)
# Clean up the request object a bit, since we might
# very well have `oauth_`-headers in there, and we
# don't want to pass these along to the handler.
request = self.cleanup_request(request)
try:
result = meth(request, *args, **kwargs)
except Exception, e:
result = self.error_handler(e, request, meth, em_format)
try:
emitter, ct = Emitter.get(em_format)
fields = handler.fields
if hasattr(handler, 'list_fields') and isinstance(result, (list, tuple, QuerySet)):
fields = handler.list_fields
except __HOLE__:
result = rc.BAD_REQUEST
result.content = "Invalid output format specified '%s'." % em_format
return result
status_code = 200
# If we're looking at a response object which contains non-string
# content, then assume we should use the emitter to format that
# content
if isinstance(result, HttpResponse) and not result._is_string:
status_code = result.status_code
# Note: We can't use result.content here because that method attempts
# to convert the content into a string which we don't want.
# when _is_string is False _container is the raw data
result = result._container
srl = emitter(result, typemapper, handler, fields, anonymous)
try:
"""
Decide whether or not we want a generator here,
or we just want to buffer up the entire result
before sending it to the client. Won't matter for
smaller datasets, but larger will have an impact.
"""
if self.stream: stream = srl.stream_render(request)
else: stream = srl.render(request)
if not isinstance(stream, HttpResponse):
resp = HttpResponse(stream, mimetype=ct, status=status_code)
else:
resp = stream
resp.streaming = self.stream
return resp
except HttpStatusCode, e:
return e.response
|
ValueError
|
dataset/ETHPy150Open dgraziotin/dycapo/piston/resource.py/Resource.__call__
|
3,048 |
def __enter__(self):
try:
return self.gen.next()
except __HOLE__:
raise RuntimeError("generator didn't yield")
|
StopIteration
|
dataset/ETHPy150Open LarsMichelsen/pmatic/ccu_pkg/python/lib/python2.7/contextlib.py/GeneratorContextManager.__enter__
|
3,049 |
def __exit__(self, type, value, traceback):
if type is None:
try:
self.gen.next()
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except __HOLE__, exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
|
StopIteration
|
dataset/ETHPy150Open LarsMichelsen/pmatic/ccu_pkg/python/lib/python2.7/contextlib.py/GeneratorContextManager.__exit__
|
3,050 |
def show_info(service, drive_item, prefix, permission_id):
try:
print(os.path.join(prefix, drive_item['title']))
print('Would set new owner to {}.'.format(permission_id))
except __HOLE__:
print('No title for this item:')
pprint.pprint(drive_item)
|
KeyError
|
dataset/ETHPy150Open davidstrauss/google-drive-recursive-ownership/transfer.py/show_info
|
3,051 |
def __call__(self, result):
"""Update test result with the lines in the TAP file.
Provide the interface that TestCase provides to a suite or runner.
"""
result.startTest(self)
if self._line.skip:
try:
result.addSkip(None, self._line.directive.reason)
except AttributeError:
# Python 2.6 does not support skipping.
result.addSuccess(self)
return
if self._line.todo:
if self._line.ok:
try:
result.addUnexpectedSuccess(self)
except AttributeError:
# Python 2.6 does not support unexpected success.
self.addFailure(result)
else:
try:
result.addExpectedFailure(
self, (Exception, Exception(), None))
except __HOLE__:
# Python 2.6 does not support expected failures.
result.addSuccess(self)
return
if self._line.ok:
result.addSuccess(self)
else:
self.addFailure(result)
|
AttributeError
|
dataset/ETHPy150Open mblayman/tappy/tap/adapter.py/Adapter.__call__
|
3,052 |
@one_perm_required_or_403(pr_project_private_perm,
(Project, 'slug__exact', 'project_slug'), anonymous_access=True)
def resource_detail(request, project_slug, resource_slug):
"""
Return the details overview of a project resource.
"""
resource = get_object_or_404(Resource.objects.select_related(), project__slug = project_slug,
slug = resource_slug)
# We want the teams to check in which languages user is permitted to translate.
user_teams = []
if getattr(request, 'user') and request.user.is_authenticated():
user_teams = Team.objects.filter(project=resource.project).filter(
Q(coordinators=request.user)|
Q(members=request.user)).distinct()
try:
autofetch_url = resource.url_info
except __HOLE__:
autofetch_url = None
statslist_src = RLStats.objects.select_related('language', 'last_committer',
'lock','resource').by_resource(resource).filter(
language = F('resource__source_language'))
statslist = RLStats.objects.select_related('language', 'last_committer',
'lock','resource').by_resource(resource).exclude(
language = F('resource__source_language'))
tmp = []
for i in statslist_src:
tmp.append(i)
for i in statslist:
tmp.append(i)
statslist = tmp
return render_to_response("resources/resource_detail.html",
{ 'project' : resource.project,
'resource' : resource,
'autofetch_url': autofetch_url,
'languages' : Language.objects.order_by('name'),
'user_teams' : user_teams,
'statslist': statslist },
context_instance = RequestContext(request))
|
ObjectDoesNotExist
|
dataset/ETHPy150Open rvanlaar/easy-transifex/src/transifex/transifex/resources/views.py/resource_detail
|
3,053 |
def Lookup(self, user_str):
for prefix, func in self.functions:
if user_str.startswith(prefix):
i = len(prefix)
# Usually a space, but could be something else
try:
splitchar = user_str[i]
except __HOLE__:
args = () # No arguments
else:
args = user_str.split(splitchar)[1:]
return func, args
return None, ()
|
IndexError
|
dataset/ETHPy150Open cloudbuilders/radioedit/jsontemplate.py/PrefixRegistry.Lookup
|
3,054 |
def _LookUpStack(self, name):
"""Look up the stack for the given name."""
i = len(self.stack) - 1
while 1:
frame = self.stack[i]
if name == '@index':
if frame.index != -1: # -1 is undefined
return frame.index # @index is 1-based
else:
context = frame.context
if hasattr(context, 'get'): # Can't look up names in a list or atom
try:
return context[name]
except __HOLE__:
pass
i -= 1 # Next frame
if i <= -1: # Couldn't find it anywhere
return self._Undefined(name)
|
KeyError
|
dataset/ETHPy150Open cloudbuilders/radioedit/jsontemplate.py/_ScopedContext._LookUpStack
|
3,055 |
def Lookup(self, name):
"""Get the value associated with a name in the current context.
The current context could be an dictionary in a list, or a dictionary
outside a list.
Args:
name: name to lookup, e.g. 'foo' or 'foo.bar.baz'
Returns:
The value, or self.undefined_str
Raises:
UndefinedVariable if self.undefined_str is not set
"""
if name == '@':
return self.stack[-1].context
parts = name.split('.')
value = self._LookUpStack(parts[0])
# Now do simple lookups of the rest of the parts
for part in parts[1:]:
try:
value = value[part]
except (KeyError, __HOLE__): # TypeError for non-dictionaries
return self._Undefined(part)
return value
|
TypeError
|
dataset/ETHPy150Open cloudbuilders/radioedit/jsontemplate.py/_ScopedContext.Lookup
|
3,056 |
def _DoRepeatedSection(args, context, callback):
"""{repeated section foo}"""
block = args
items = context.PushSection(block.section_name)
# TODO: if 'items' is a dictionary, allow @name and @value.
if items:
if not isinstance(items, list):
raise EvaluationError('Expected a list; got %s' % type(items))
last_index = len(items) - 1
statements = block.Statements()
alt_statements = block.Statements('alternates with')
try:
i = 0
while True:
context.Next()
# Execute the statements in the block for every item in the list.
# Execute the alternate block on every iteration except the last. Each
# item could be an atom (string, integer, etc.) or a dictionary.
_Execute(statements, context, callback)
if i != last_index:
_Execute(alt_statements, context, callback)
i += 1
except __HOLE__:
pass
else:
_Execute(block.Statements('or'), context, callback)
context.Pop()
|
StopIteration
|
dataset/ETHPy150Open cloudbuilders/radioedit/jsontemplate.py/_DoRepeatedSection
|
3,057 |
def _DoSubstitute(args, context, callback):
"""Variable substitution, e.g. {foo}"""
name, formatters = args
# So we can have {.section is_new}new since {@}{.end}. Hopefully this idiom
# is OK.
if name == '@':
value = context.Lookup('@')
else:
try:
value = context.Lookup(name)
except TypeError, e:
raise EvaluationError(
'Error evaluating %r in context %r: %r' % (name, context, e))
for func, args, func_type in formatters:
try:
if func_type == ENHANCED_FUNC:
value = func(value, context, args)
else:
value = func(value)
except __HOLE__:
raise
except Exception, e:
raise EvaluationError(
'Formatting value %r with formatter %s raised exception: %r' %
(value, formatters, e), original_exception=e)
# TODO: Require a string/unicode instance here?
if value is None:
raise EvaluationError('Evaluating %r gave None value' % name)
callback(value)
|
KeyboardInterrupt
|
dataset/ETHPy150Open cloudbuilders/radioedit/jsontemplate.py/_DoSubstitute
|
3,058 |
@get('/kite/<user>/mail')
@with_valid_token
def index(user):
try:
threads_index = DatetimeCabinet("/home/kite/threads.db")
except IOError:
abort(500, "Invalid thread")
return
ret_threads = []
try:
threads = threads_index[user]["threads_index"]
except __HOLE__:
threads = []
for thread in threads:
ret_threads.append(thread)
response.content_type = "application/json"
return serialize_json(ret_threads, protection=False)
|
KeyError
|
dataset/ETHPy150Open khamidou/kite/src/back/kite/server.py/index
|
3,059 |
@get('/kite/<user>/mail/<id>')
@with_valid_token
def index(user, id):
try:
threads_index = DatetimeCabinet("/home/kite/threads.db")
thread = None
except __HOLE__:
response.status = 400
return
# FIXME: use an index for threads entries ?
for thr in threads_index[user]["threads_index"]:
if thr["id"] == id:
thread = thr
if thread == None:
abort(404, "Thread not found.")
thread["unread"] = False
threads_index.sync()
response.content_type = "application/json"
ret_json = {"messages": [],
"subject": thread["subject"],
"date": thread["date"],
"id": thread["id"]
}
mdir = read_mail("/home/kite/Maildirs/%s" % user)
for mail_id in thread["messages"]:
ret_json["messages"].append(get_email(mdir, mail_id))
return serialize_json(ret_json, protection=False)
|
IOError
|
dataset/ETHPy150Open khamidou/kite/src/back/kite/server.py/index
|
3,060 |
def _count(self, filename, idx):
overlapping_genes = set()
genes = set()
# iterate over exons
infile = IOTools.openFile(filename, "r")
it = GTF.iterator(infile)
nexons, nexons_overlapping = 0, 0
nbases, nbases_overlapping = 0, 0
for this in it:
nexons += 1
nbases += this.end - this.start
genes.add(this.gene_id)
try:
intervals = list(idx[this.contig].find(this.start, this.end))
except __HOLE__:
continue
if len(intervals) == 0:
continue
overlapping_genes.add(this.gene_id)
nexons_overlapping += 1
start, end = this.start, this.end
counts = numpy.zeros(end - start, numpy.int)
for other_start, other_end, other_value in intervals:
for x in range(max(start, other_start) - start, min(end, other_end) - start):
counts[x] += 1
nbases_overlapping += sum([1 for x in counts if x > 0])
infile.close()
return len(genes), len(overlapping_genes), nexons, nexons_overlapping, nbases, nbases_overlapping
|
KeyError
|
dataset/ETHPy150Open CGATOxford/cgat/scripts/diff_gtf.py/Counter._count
|
3,061 |
def _count(self, filename, idx):
overlapping_genes = set()
genes = set()
# iterate over exons
infile = IOTools.openFile(filename, "r")
it = GTF.iterator(infile)
for this in it:
genes.add(this.gene_id)
try:
intervals = idx[this.contig].find(this.start, this.end)
except __HOLE__:
continue
if len(intervals) == 0:
continue
overlapping_genes.add(this.gene_id)
infile.close()
return genes, overlapping_genes
|
KeyError
|
dataset/ETHPy150Open CGATOxford/cgat/scripts/diff_gtf.py/CounterGenes._count
|
3,062 |
def main(argv=None):
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-s", "--ignore-strand", dest="ignore_strand",
action="store_true",
help="ignore strand information [default=%default].")
parser.add_option(
"-u", "--update", dest="filename_update", type="string",
help="if filename is given, previous results will be read"
"from there and only changed sets will be computed "
"[default=%default].")
parser.add_option(
"-p", "--pattern-identifier", dest="pattern_id", type="string",
help="pattern to convert a filename to an id"
"[default=%default].")
parser.add_option(
"-g", "--output-only-genes", dest="output_only_genes",
action="store_true",
help="only output gene stats (includes gene lists)"
" [default=%default].")
parser.set_defaults(
ignore_strand=False,
filename_update=None,
pattern_id="(.*).gtf",
output_only_genes=False,
)
(options, args) = E.Start(parser)
if len(args) < 2:
print USAGE
raise ValueError("at least two arguments required")
if options.filename_update:
infile = open(options.filename_update, "r")
previous_results = {}
for line in infile:
if line.startswith("#"):
continue
if line.startswith("set1"):
continue
data = line[:-1].split("\t")
set1, set2 = data[0], data[1]
if set1 not in previous_results:
previous_results[set1] = {}
if set2 not in previous_results:
previous_results[set2] = {}
previous_results[set1][set2] = "\t".join(data[2:])
rev = [(data[x + 1], data[x]) for x in range(2, len(data), 2)]
previous_results[set2][set1] = "\t".join(IOTools.flatten(rev))
else:
previous_results = {}
if options.output_only_genes:
counter = CounterGenes()
else:
counter = Counter()
options.stdout.write("set1\tset2\t%s\n" % counter.getHeader())
pattern_id = re.compile(options.pattern_id)
def getTitle(x):
try:
return pattern_id.search(x).groups()[0]
except AttributeError:
return x
ncomputed, nupdated = 0, 0
for x in range(len(args)):
title1 = getTitle(args[x])
for y in range(0, x):
title2 = getTitle(args[y])
if previous_results:
try:
prev = previous_results[title1][title2]
except __HOLE__:
pass
else:
options.stdout.write(
"%s\t%s\t%s\n" % ((title1, title2, prev)))
nupdated += 1
continue
counter.count(args[x], args[y])
options.stdout.write(
"%s\t%s\t%s\n" % ((title1, title2, str(counter))))
ncomputed += 1
E.info("nupdated=%i, ncomputed=%i" % (nupdated, ncomputed))
E.Stop()
|
KeyError
|
dataset/ETHPy150Open CGATOxford/cgat/scripts/diff_gtf.py/main
|
3,063 |
def get_proxy(agentConfig):
proxy_settings = {}
# First we read the proxy configuration from config.cfg
proxy_host = agentConfig.get('proxy_host')
if proxy_host is not None:
proxy_settings['host'] = proxy_host
try:
proxy_settings['port'] = int(agentConfig.get('proxy_port', 3128))
except __HOLE__:
log.error('Proxy port must be an Integer. Defaulting it to 3128')
proxy_settings['port'] = 3128
proxy_settings['user'] = agentConfig.get('proxy_user')
proxy_settings['password'] = agentConfig.get('proxy_password')
log.debug("Proxy Settings: %s:*****@%s:%s", proxy_settings['user'],
proxy_settings['host'], proxy_settings['port'])
return proxy_settings
# If no proxy configuration was specified in config.cfg
# We try to read it from the system settings
try:
proxy = getproxies().get('https')
if proxy is not None:
parse = urlparse(proxy)
proxy_settings['host'] = parse.hostname
proxy_settings['port'] = int(parse.port)
proxy_settings['user'] = parse.username
proxy_settings['password'] = parse.password
log.debug("Proxy Settings: %s:*****@%s:%s", proxy_settings['user'],
proxy_settings['host'], proxy_settings['port'])
return proxy_settings
except Exception, e:
log.debug("Error while trying to fetch proxy settings using urllib %s."
"Proxy is probably not set", str(e))
log.debug("No proxy configured")
return None
|
ValueError
|
dataset/ETHPy150Open serverdensity/sd-agent/utils/proxy.py/get_proxy
|
3,064 |
def trap(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except self.throws:
raise
except __HOLE__:
raise
except:
tb = _cperror.format_exc()
#print('trapped (started %s):' % self.started_response, tb)
_cherrypy.log(tb, severity=40)
if not _cherrypy.request.show_tracebacks:
tb = ""
s, h, b = _cperror.bare_error(tb)
if py3k:
# What fun.
s = s.decode('ISO-8859-1')
h = [(k.decode('ISO-8859-1'), v.decode('ISO-8859-1'))
for k, v in h]
if self.started_response:
# Empty our iterable (so future calls raise StopIteration)
self.iter_response = iter([])
else:
self.iter_response = iter(b)
try:
self.start_response(s, h, _sys.exc_info())
except:
# "The application must not trap any exceptions raised by
# start_response, if it called start_response with exc_info.
# Instead, it should allow such exceptions to propagate
# back to the server or gateway."
# But we still log and call close() to clean up ourselves.
_cherrypy.log(traceback=True, severity=40)
raise
if self.started_response:
return ntob("").join(b)
else:
return b
# WSGI-to-CP Adapter #
|
StopIteration
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/cherrypy/cherrypy/_cpwsgi.py/_TrappedResponse.trap
|
3,065 |
def run(self):
"""Create a Request object using environ."""
env = self.environ.get
local = httputil.Host('', int(env('SERVER_PORT', 80)),
env('SERVER_NAME', ''))
remote = httputil.Host(env('REMOTE_ADDR', ''),
int(env('REMOTE_PORT', -1) or -1),
env('REMOTE_HOST', ''))
scheme = env('wsgi.url_scheme')
sproto = env('ACTUAL_SERVER_PROTOCOL', "HTTP/1.1")
request, resp = self.cpapp.get_serving(local, remote, scheme, sproto)
# LOGON_USER is served by IIS, and is the name of the
# user after having been mapped to a local account.
# Both IIS and Apache set REMOTE_USER, when possible.
request.login = env('LOGON_USER') or env('REMOTE_USER') or None
request.multithread = self.environ['wsgi.multithread']
request.multiprocess = self.environ['wsgi.multiprocess']
request.wsgi_environ = self.environ
request.prev = env('cherrypy.previous_request', None)
meth = self.environ['REQUEST_METHOD']
path = httputil.urljoin(self.environ.get('SCRIPT_NAME', ''),
self.environ.get('PATH_INFO', ''))
qs = self.environ.get('QUERY_STRING', '')
if py3k:
# This isn't perfect; if the given PATH_INFO is in the wrong encoding,
# it may fail to match the appropriate config section URI. But meh.
old_enc = self.environ.get('wsgi.url_encoding', 'ISO-8859-1')
new_enc = self.cpapp.find_config(self.environ.get('PATH_INFO', ''),
"request.uri_encoding", 'utf-8')
if new_enc.lower() != old_enc.lower():
# Even though the path and qs are unicode, the WSGI server is
# required by PEP 3333 to coerce them to ISO-8859-1 masquerading
# as unicode. So we have to encode back to bytes and then decode
# again using the "correct" encoding.
try:
u_path = path.encode(old_enc).decode(new_enc)
u_qs = qs.encode(old_enc).decode(new_enc)
except (UnicodeEncodeError, __HOLE__):
# Just pass them through without transcoding and hope.
pass
else:
# Only set transcoded values if they both succeed.
path = u_path
qs = u_qs
rproto = self.environ.get('SERVER_PROTOCOL')
headers = self.translate_headers(self.environ)
rfile = self.environ['wsgi.input']
request.run(meth, path, qs, rproto, headers, rfile)
|
UnicodeDecodeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/cherrypy/cherrypy/_cpwsgi.py/AppResponse.run
|
3,066 |
def _get_client_from_pool_or_make_it(self):
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
continue
break
except __HOLE__:
client = self._make_client()
return (True, client)
return (False, client)
|
IndexError
|
dataset/ETHPy150Open thefab/tornadis/tornadis/pool.py/ClientPool._get_client_from_pool_or_make_it
|
3,067 |
def _autoclose(self):
newpool = deque()
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
else:
newpool.append(client)
except __HOLE__:
self.__pool = newpool
|
IndexError
|
dataset/ETHPy150Open thefab/tornadis/tornadis/pool.py/ClientPool._autoclose
|
3,068 |
def destroy(self):
"""Disconnects all pooled client objects."""
while True:
try:
client = self.__pool.popleft()
if isinstance(client, Client):
client.disconnect()
except __HOLE__:
break
|
IndexError
|
dataset/ETHPy150Open thefab/tornadis/tornadis/pool.py/ClientPool.destroy
|
3,069 |
def main():
"""
Print the TITLE and USAGE and then start the main loop.
"""
print(TITLE)
print(USAGE)
try:
start_process()
except __HOLE__:
print("Shutdown requested...exiting")
except EOFError:
print("\nSystem Exited during user input.")
traceback.print_exc(file=stdout)
|
KeyboardInterrupt
|
dataset/ETHPy150Open AnimeshShaw/Hash-Algorithm-Identifier/hashidentifier/HashIdentifier.py/main
|
3,070 |
@property
def report_context(self):
self.report_template_path = "patient_interactions.html"
ret = super(PatientInteractionsReport, self).report_context
self.update_app_info()
ret['view_mode'] = 'interactions'
ret['problem_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_PD_MODULE, PD1, ret['patient']['_id'])
ret['huddle_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_HUD_MODULE, HUD2, ret['patient']['_id'])
ret['cm_phone_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_CM_MODULE, CM6_PHONE, ret['patient']['_id'])
ret['cm_visits_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_CM_MODULE, CM4, ret['patient']['_id'])
ret['anti_thrombotic_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2AM, ret['patient']['_id'])
ret['blood_pressure_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2BPM, ret['patient']['_id'])
ret['cholesterol_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2CHM, ret['patient']['_id'])
ret['diabetes_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2DIABM, ret['patient']['_id'])
ret['depression_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2DEPM, ret['patient']['_id'])
ret['smoking_cessation_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2SCM, ret['patient']['_id'])
ret['other_meds_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2OM, ret['patient']['_id'])
ret['interaction_table'] = []
for visit_key, visit in enumerate(VISIT_SCHEDULE):
if visit['target_date_case_property'] in ret['patient'] and \
ret['patient'][visit['target_date_case_property']]:
try:
target_date = (ret['patient'][visit['target_date_case_property']])
except __HOLE__:
target_date = _("Bad Date Format!")
else:
target_date = EMPTY_FIELD
received_date = EMPTY_FIELD
for completed in visit['completed_date']:
if completed in ret['patient']:
received_date = ret['patient'][completed]
interaction = {
'url': '',
'name': visit['visit_name'],
'target_date': target_date,
'received_date': received_date,
}
if visit['show_button']:
interaction['url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
visit['module_idx'], visit['xmlns'], ret['patient']['_id'])
ret['interaction_table'].append(interaction)
medication = []
for med_prop in MEDICATION_DETAILS:
if med_prop == 'MEDS_diabetes_prescribed':
oral = getattr(ret['patient'], 'MEDS_diabetes-oral_prescribed', None)
insulin = getattr(ret['patient'], 'MEDS_diabetes-insulin_prescribed', None)
if oral == 'yes':
to_append = oral
elif insulin == 'yes':
to_append = insulin
else:
to_append = EMPTY_FIELD
medication.append(to_append)
else:
medication.append(getattr(ret['patient'], med_prop, EMPTY_FIELD))
ret['medication_table'] = medication
user = self.request.couch_user
ret['patient_task_list_url'] = html.escape(
PatientTaskListReport.get_url(*[ret['patient']["domain"]]) +
"?patient_id=%s&task_status=%s" % (ret['patient']["_id"], "open"))
if is_cm(user):
ret['create_new_task_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_CREATE_TASK_MODULE, CM_NEW_TASK,
ret['patient']['_id'])
elif is_chw(user):
ret['create_new_task_url'] = self.get_form_url(self.chw_app_dict, self.latest_chw_build,
CHW_APP_TASK_MODULE, CM_NEW_TASK, ret['patient']['_id'])
ret['view_appointments_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_APPOINTMENTS_MODULE, AP2,
parent_id=ret['patient']['_id'])
ret['add_appointments_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_PD_MODULE, AP1,
case_id=ret['patient']['_id'])
# Risk Factor Table
rows = []
for key, val in RISK_FACTOR_CONFIG.iteritems():
data = [key]
for v in val:
case_data = ret['patient'][v] if v in ret['patient'] else ''
if key == 'Status:':
if case_data:
case_data = case_data.replace('-', ' ').title()
else:
case_data = EMPTY_FIELD
data.append(case_data)
rows.append(data)
ret['risk_factor_table'] = rows
return ret
|
TypeError
|
dataset/ETHPy150Open dimagi/commcare-hq/custom/succeed/reports/patient_interactions.py/PatientInteractionsReport.report_context
|
3,071 |
def linkify(text, callbacks=DEFAULT_CALLBACKS, skip_pre=False,
parse_email=False, tokenizer=HTMLSanitizer):
"""Convert URL-like strings in an HTML fragment to links.
linkify() converts strings that look like URLs or domain names in a
blob of text that may be an HTML fragment to links, while preserving
(a) links already in the string, (b) urls found in attributes, and
(c) email addresses.
"""
text = force_unicode(text)
if not text:
return ''
parser = html5lib.HTMLParser(tokenizer=tokenizer)
forest = parser.parseFragment(text)
_seen = set([])
def replace_nodes(tree, new_frag, node, index=0):
"""
Doesn't really replace nodes, but inserts the nodes contained in
new_frag into the treee at position index and returns the number
of nodes inserted.
If node is passed in, it is removed from the tree
"""
count = 0
new_tree = parser.parseFragment(new_frag)
# capture any non-tag text at the start of the fragment
if new_tree.text:
if index == 0:
tree.text = tree.text or ''
tree.text += new_tree.text
else:
tree[index - 1].tail = tree[index - 1].tail or ''
tree[index - 1].tail += new_tree.text
# the put in the tagged elements into the old tree
for n in new_tree:
if n.tag == ETREE_TAG('a'):
_seen.add(n)
tree.insert(index + count, n)
count += 1
# if we got a node to remove...
if node is not None:
tree.remove(node)
return count
def strip_wrapping_parentheses(fragment):
"""Strips wrapping parentheses.
Returns a tuple of the following format::
(string stripped from wrapping parentheses,
count of stripped opening parentheses,
count of stripped closing parentheses)
"""
opening_parentheses = closing_parentheses = 0
# Count consecutive opening parentheses
# at the beginning of the fragment (string).
for char in fragment:
if char == '(':
opening_parentheses += 1
else:
break
if opening_parentheses:
newer_frag = ''
# Cut the consecutive opening brackets from the fragment.
fragment = fragment[opening_parentheses:]
# Reverse the fragment for easier detection of parentheses
# inside the URL.
reverse_fragment = fragment[::-1]
skip = False
for char in reverse_fragment:
# Remove the closing parentheses if it has a matching
# opening parentheses (they are balanced).
if (char == ')' and
closing_parentheses < opening_parentheses and
not skip):
closing_parentheses += 1
continue
# Do not remove ')' from the URL itself.
elif char != ')':
skip = True
newer_frag += char
fragment = newer_frag[::-1]
return fragment, opening_parentheses, closing_parentheses
def apply_callbacks(attrs, new):
for cb in callbacks:
attrs = cb(attrs, new)
if attrs is None:
return None
return attrs
def _render_inner(node):
out = ['' if node.text is None else node.text]
for subnode in node:
out.append(_render(subnode))
if subnode.tail:
out.append(subnode.tail)
return ''.join(out)
def linkify_nodes(tree, parse_text=True):
children = len(tree)
current_child = -1
# start at -1 to process the parent first
while current_child < len(tree):
if current_child < 0:
node = tree
if parse_text and node.text:
new_txt = old_txt = node.text
if parse_email:
new_txt = re.sub(email_re, email_repl, node.text)
if new_txt and new_txt != node.text:
node.text = ''
adj = replace_nodes(tree, new_txt, None, 0)
children += adj
current_child += adj
linkify_nodes(tree, True)
continue
new_txt = re.sub(url_re, link_repl, new_txt)
if new_txt != old_txt:
node.text = ''
adj = replace_nodes(tree, new_txt, None, 0)
children += adj
current_child += adj
continue
else:
node = tree[current_child]
if parse_text and node.tail:
new_tail = old_tail = node.tail
if parse_email:
new_tail = re.sub(email_re, email_repl, new_tail)
if new_tail != node.tail:
node.tail = ''
adj = replace_nodes(tree, new_tail, None,
current_child + 1)
# Insert the new nodes made from my tail into
# the tree right after me. current_child+1
children += adj
continue
new_tail = re.sub(url_re, link_repl, new_tail)
if new_tail != old_tail:
node.tail = ''
adj = replace_nodes(tree, new_tail, None,
current_child + 1)
children += adj
if node.tag == ETREE_TAG('a') and not (node in _seen):
if not node.get('href', None) is None:
attrs = dict(node.items())
_text = attrs['_text'] = _render_inner(node)
attrs = apply_callbacks(attrs, False)
if attrs is None:
# <a> tag replaced by the text within it
adj = replace_nodes(tree, _text, node,
current_child)
current_child -= 1
# pull back current_child by 1 to scan the
# new nodes again.
else:
text = force_unicode(attrs.pop('_text'))
for attr_key, attr_val in attrs.items():
node.set(attr_key, attr_val)
for n in reversed(list(node)):
node.remove(n)
text = parser.parseFragment(text)
node.text = text.text
for n in text:
node.append(n)
_seen.add(node)
elif current_child >= 0:
if node.tag == ETREE_TAG('pre') and skip_pre:
linkify_nodes(node, False)
elif not (node in _seen):
linkify_nodes(node, True)
current_child += 1
def email_repl(match):
addr = match.group(0).replace('"', '"')
link = {
'_text': addr,
'href': 'mailto:{0!s}'.format(addr),
}
link = apply_callbacks(link, True)
if link is None:
return addr
_href = link.pop('href')
_text = link.pop('_text')
repl = '<a href="{0!s}" {1!s}>{2!s}</a>'
attr = '{0!s}="{1!s}"'
attribs = ' '.join(attr.format(k, v) for k, v in link.items())
return repl.format(_href, attribs, _text)
def link_repl(match):
url = match.group(0)
open_brackets = close_brackets = 0
if url.startswith('('):
_wrapping = strip_wrapping_parentheses(url)
url, open_brackets, close_brackets = _wrapping
end = ''
m = re.search(punct_re, url)
if m:
end = m.group(0)
url = url[0:m.start()]
if re.search(proto_re, url):
href = url
else:
href = ''.join(['http://', url])
link = {
'_text': url,
'href': href,
}
link = apply_callbacks(link, True)
if link is None:
return '(' * open_brackets + url + ')' * close_brackets
_text = link.pop('_text')
_href = link.pop('href')
repl = '{0!s}<a href="{1!s}" {2!s}>{3!s}</a>{4!s}{5!s}'
attr = '{0!s}="{1!s}"'
attribs = ' '.join(attr.format(k, v) for k, v in link.items())
return repl.format('(' * open_brackets,
_href, attribs, _text, end,
')' * close_brackets)
try:
linkify_nodes(forest)
except __HOLE__ as e:
# If we hit the max recursion depth, just return what we've got.
log.exception('Probable recursion error: {0!r}'.format(e))
return _render(forest)
|
RuntimeError
|
dataset/ETHPy150Open mozilla/bleach/bleach/__init__.py/linkify
|
3,072 |
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False. Ignored by this driver.
:raises: InvalidParameterValue if an invalid boot device is
specified or if any connection parameters are incorrect.
:raises: MissingParameterValue if a required parameter is missing
:raises: SSHConnectFailed if ssh failed to connect to the node.
:raises: SSHCommandFailed on an error from ssh.
:raises: NotImplementedError if the virt_type does not support
setting the boot device.
:raises: NodeNotFound if could not find a VM corresponding to any
of the provided MACs.
"""
node = task.node
driver_info = _parse_driver_info(node)
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
driver_info['macs'] = driver_utils.get_node_mac_addresses(task)
ssh_obj = _get_connection(node)
boot_device_map = _get_boot_device_map(driver_info['virt_type'])
try:
_set_boot_device(ssh_obj, driver_info, boot_device_map[device])
except __HOLE__:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to set boot device for node %(node)s, "
"virt_type %(vtype)s does not support this "
"operation"),
{'node': node.uuid,
'vtype': driver_info['virt_type']})
|
NotImplementedError
|
dataset/ETHPy150Open openstack/ironic/ironic/drivers/modules/ssh.py/SSHManagement.set_boot_device
|
3,073 |
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Provides the current boot device of the node. Be aware that not
all drivers support this.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if any connection parameters are
incorrect.
:raises: MissingParameterValue if a required parameter is missing
:raises: SSHConnectFailed if ssh failed to connect to the node.
:raises: SSHCommandFailed on an error from ssh.
:raises: NodeNotFound if could not find a VM corresponding to any
of the provided MACs.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
node = task.node
driver_info = _parse_driver_info(node)
driver_info['macs'] = driver_utils.get_node_mac_addresses(task)
ssh_obj = _get_connection(node)
response = {'boot_device': None, 'persistent': None}
try:
response['boot_device'] = _get_boot_device(ssh_obj, driver_info)
except __HOLE__:
LOG.warning(_LW("Failed to get boot device for node %(node)s, "
"virt_type %(vtype)s does not support this "
"operation"),
{'node': node.uuid, 'vtype': driver_info['virt_type']})
return response
|
NotImplementedError
|
dataset/ETHPy150Open openstack/ironic/ironic/drivers/modules/ssh.py/SSHManagement.get_boot_device
|
3,074 |
def mkdir_p(path):
"""Create path if it doesn't exist already"""
try:
os.makedirs(path)
except __HOLE__ as e:
if e.errno != errno.EEXIST:
raise
|
OSError
|
dataset/ETHPy150Open basak/glacier-cli/glacier.py/mkdir_p
|
3,075 |
def get_archive_list(self, vault):
def force_id(archive):
return "\t".join([
self._archive_ref(archive, force_id=True),
"%s" % archive.name
])
for archive_name, archive_iterator in (
itertools.groupby(
self._get_archive_list_objects(vault),
lambda archive: archive.name)):
# Yield self._archive_ref(..., force_id=True) if there is more than
# one archive with the same name; otherwise use force_id=False.
first_archive = next(archive_iterator)
try:
second_archive = next(archive_iterator)
except __HOLE__:
yield self._archive_ref(first_archive, force_id=False)
else:
yield force_id(first_archive)
yield force_id(second_archive)
for subsequent_archive in archive_iterator:
yield force_id(subsequent_archive)
|
StopIteration
|
dataset/ETHPy150Open basak/glacier-cli/glacier.py/Cache.get_archive_list
|
3,076 |
def job_oneline(conn, cache, vault, job):
action_letter = {'ArchiveRetrieval': 'a',
'InventoryRetrieval': 'i'}[job.action]
status_letter = {'InProgress': 'p',
'Succeeded': 'd',
'Failed': 'e'}[job.status_code]
date = job.completion_date
if not date:
date = job.creation_date
if job.action == 'ArchiveRetrieval':
try:
name = cache.get_archive_name(vault.name, 'id:' + job.archive_id)
except __HOLE__:
name = None
if name is None:
name = 'id:' + job.archive_id
elif job.action == 'InventoryRetrieval':
name = ''
return '{action_letter}/{status_letter} {date} {vault.name:10} {name}'.format(
**locals())
|
KeyError
|
dataset/ETHPy150Open basak/glacier-cli/glacier.py/job_oneline
|
3,077 |
@staticmethod
def _write_archive_retrieval_job(f, job, multipart_size):
if job.archive_size > multipart_size:
def fetch(start, end):
byte_range = start, end-1
f.write(job.get_output(byte_range).read())
whole_parts = job.archive_size // multipart_size
for first_byte in xrange(0, whole_parts * multipart_size,
multipart_size):
fetch(first_byte, first_byte + multipart_size)
remainder = job.archive_size % multipart_size
if remainder:
fetch(job.archive_size - remainder, job.archive_size)
else:
f.write(job.get_output().read())
# Make sure that the file now exactly matches the downloaded archive,
# even if the file existed before and was longer.
try:
f.truncate(job.archive_size)
except __HOLE__ as e:
# Allow ESPIPE, since the "file" couldn't have existed before in
# this case.
if e.errno != errno.ESPIPE:
raise
|
IOError
|
dataset/ETHPy150Open basak/glacier-cli/glacier.py/App._write_archive_retrieval_job
|
3,078 |
def archive_retrieve_one(self, name):
try:
archive_id = self.cache.get_archive_id(self.args.vault, name)
except __HOLE__:
raise ConsoleError('archive %r not found' % name)
vault = self.connection.get_vault(self.args.vault)
retrieval_jobs = find_retrieval_jobs(vault, archive_id)
complete_job = find_complete_job(retrieval_jobs)
if complete_job:
self._archive_retrieve_completed(self.args, complete_job, name)
elif has_pending_job(retrieval_jobs):
if self.args.wait:
complete_job = wait_until_job_completed(retrieval_jobs)
self._archive_retrieve_completed(self.args, complete_job, name)
else:
raise RetryConsoleError('job still pending for archive %r' % name)
else:
# create an archive retrieval job
job = vault.retrieve_archive(archive_id)
if self.args.wait:
wait_until_job_completed([job])
self._archive_retrieve_completed(self.args, job, name)
else:
raise RetryConsoleError('queued retrieval job for archive %r' % name)
|
KeyError
|
dataset/ETHPy150Open basak/glacier-cli/glacier.py/App.archive_retrieve_one
|
3,079 |
def archive_delete(self):
try:
archive_id = self.cache.get_archive_id(
self.args.vault, self.args.name)
except __HOLE__:
raise ConsoleError('archive %r not found' % self.args.name)
vault = self.connection.get_vault(self.args.vault)
vault.delete_archive(archive_id)
self.cache.delete_archive(self.args.vault, self.args.name)
|
KeyError
|
dataset/ETHPy150Open basak/glacier-cli/glacier.py/App.archive_delete
|
3,080 |
def archive_checkpresent(self):
try:
last_seen = self.cache.get_archive_last_seen(
self.args.vault, self.args.name)
except KeyError:
if self.args.wait:
last_seen = None
else:
if not self.args.quiet:
print(
'archive %r not found' % self.args.name,
file=sys.stderr)
return
def too_old(last_seen):
return (not last_seen or
not self.args.max_age_hours or
(last_seen <
time.time() - self.args.max_age_hours * 60 * 60))
if too_old(last_seen):
# Not recent enough
try:
self._vault_sync(vault_name=self.args.vault,
max_age_hours=self.args.max_age_hours,
fix=False,
wait=self.args.wait)
except RetryConsoleError:
pass
else:
try:
last_seen = self.cache.get_archive_last_seen(
self.args.vault, self.args.name)
except __HOLE__:
if not self.args.quiet:
print(('archive %r not found, but it may ' +
'not be in the inventory yet')
% self.args.name, file=sys.stderr)
return
if too_old(last_seen):
if not self.args.quiet:
print(('archive %r found, but has not been seen ' +
'recently enough to consider it present') %
self.args.name, file=sys.stderr)
return
print(self.args.name)
|
KeyError
|
dataset/ETHPy150Open basak/glacier-cli/glacier.py/App.archive_checkpresent
|
3,081 |
def get_context_data(self, **kwargs):
context = {}
# channel is needed everywhere
self.channel = self.channel or Channel.objects.get_homepage(
site=get_current_site(self.request)
)
if not self.channel and getattr(
settings, 'OPPS_MULTISITE_FALLBACK', None):
self.channel = Channel.objects.filter(
homepage=True, published=True)[:1].get()
context['channel'] = self.channel
if not self.long_slug:
return context
context = super(View, self).get_context_data(**kwargs)
if hasattr(self, 'articleboxes'):
context['articleboxes'] = self.articleboxes
else:
context['articleboxes'] = ContainerBox.objects.filter(
channel__long_slug=self.long_slug)
self.excluded_ids = []
for box in context['articleboxes']:
self.excluded_ids += [a.pk for a in box.ordered_containers()]
obj_filter = {}
obj_filter['site_domain'] = self.site.domain
obj_filter['date_available__lte'] = timezone.now()
obj_filter['published'] = True
filters = obj_filter
filters['channel_long_slug__in'] = self.channel_long_slug
is_paginated = self.page_kwarg in self.request.GET
if self.channel and self.channel.is_root_node() and not is_paginated:
filters['show_on_root_channel'] = True
article = Container.objects.filter(**filters)
context['posts'] = article.filter(
child_class='Post'
).exclude(pk__in=self.excluded_ids)[:self.limit]
context['albums'] = Album.objects.filter(
**filters
).exclude(pk__in=self.excluded_ids)[:self.limit]
context['channel'] = {}
context['channel']['long_slug'] = self.long_slug
if self.channel:
context['channel'] = self.channel
context['breadcrumb'] = self.get_breadcrumb()
if self.slug:
try:
context['next'] = self.get_object() \
.get_next_by_date_insert(**obj_filter)
except self.get_object().DoesNotExist:
pass
try:
context['prev'] = self.get_object() \
.get_previous_by_date_insert(**obj_filter)
except self.get_object().DoesNotExist:
pass
context['articleboxes'] = context['articleboxes'].filter(
containers__slug=self.slug)
if self.get_object().child_class == 'Mirror':
context['context'] = self.get_object().container
if self.request.META.get('HTTP_X_PJAX', False) or \
self.request.is_ajax():
context['extends_parent'] = 'base_ajax.html'
try:
# opps.field append on context
context['context'].fields = field_template_read(
context['context'].custom_fields())
except __HOLE__:
pass
return context
|
AttributeError
|
dataset/ETHPy150Open opps/opps/opps/views/generic/base.py/View.get_context_data
|
3,082 |
def get_long_slug(self):
self.long_slug = self.kwargs.get('channel__long_slug', None)
try:
if not self.long_slug:
self.long_slug = Channel.objects.get_homepage(
site=self.site).long_slug
except __HOLE__:
pass
return self.long_slug
|
AttributeError
|
dataset/ETHPy150Open opps/opps/opps/views/generic/base.py/View.get_long_slug
|
3,083 |
def __init__(self, beaver_config, logger=None):
"""Generic transport configuration
Will attach the file_config object, setup the
current hostname, and ensure we have a proper
formatter for the current transport
"""
self._beaver_config = beaver_config
self._current_host = beaver_config.get('hostname')
self._default_formatter = beaver_config.get('format', 'null')
self._formatters = {}
self._is_valid = True
self._logger = logger
self._epoch = datetime.datetime.utcfromtimestamp(0)
self._logstash_version = beaver_config.get('logstash_version')
if self._logstash_version == 0:
self._fields = {
'type': '@type',
'tags': '@tags',
'message': '@message',
'file': '@source_path',
'host': '@source_host',
'raw_json_fields': ['@message', '@source', '@source_host', '@source_path', '@tags', '@timestamp', '@type'],
}
elif self._logstash_version == 1:
self._fields = {
'type': 'type',
'tags': 'tags',
'message': 'message',
'file': 'file',
'host': 'host',
'raw_json_fields': ['message', 'host', 'file', 'tags', '@timestamp', 'type'],
}
def raw_formatter(data):
return data[self._fields.get('message')]
def rawjson_formatter(data):
try:
json_data = json.loads(data[self._fields.get('message')])
except __HOLE__:
self._logger.warning("cannot parse as rawjson: {0}".format(self._fields.get('message')))
json_data = json.loads("{}")
if json_data:
del data[self._fields.get('message')]
for field in json_data:
data[field] = json_data[field]
for field in self._fields.get('raw_json_fields'):
if field not in data:
data[field] = ''
return json.dumps(data)
def gelf_formatter(data):
message = data[self._fields.get('message')]
short_message = message.split('\n', 1)[0]
short_message = short_message[:250]
timestampDate = datetime.datetime.strptime(data['@timestamp'], "%Y-%m-%dT%H:%M:%S.%fZ")
delta = timestampDate - self._epoch
timestampSeconds = delta.days*86400+delta.seconds+delta.microseconds/1e6
gelf_data = {
'version': '1.1',
'host': data[self._fields.get('host')],
'short_message': short_message,
'full_message': message,
'timestamp': timestampSeconds,
'level': 6,
'_file': data[self._fields.get('file')],
}
return json.dumps(gelf_data) + '\0'
def string_formatter(data):
return '[{0}] [{1}] {2}'.format(data[self._fields.get('host')], data['@timestamp'], data[self._fields.get('message')])
self._formatters['json'] = json.dumps
self._formatters['msgpack'] = msgpack.packb
self._formatters['raw'] = raw_formatter
self._formatters['rawjson'] = rawjson_formatter
self._formatters['string'] = string_formatter
self._formatters['gelf'] = gelf_formatter
|
ValueError
|
dataset/ETHPy150Open python-beaver/python-beaver/beaver/transports/base_transport.py/BaseTransport.__init__
|
3,084 |
def collect_datasources(self):
"""
Identify what time series exist in both the diffed reports and download them to the diff report resources directory
:return: True/False : return status of whether the download of time series resources succeeded.
"""
report_count = 0
if self.status != 'OK':
return False
diff_datasource = sorted(set(self.reports[0].datasource) & set(self.reports[1].datasource))
if diff_datasource:
self.reports[0].datasource = diff_datasource
self.reports[1].datasource = diff_datasource
else:
self.status = 'NO_COMMON_STATS'
logger.error('No common metrics were found between the two reports')
return False
for report in self.reports:
report.label = report_count
report_count += 1
report.local_location = os.path.join(self.resource_directory, str(report.label))
try:
os.makedirs(report.local_location)
except OSError as exeption:
if exeption.errno != errno.EEXIST:
raise
if report.remote_location != 'local':
naarad.httpdownload.download_url_list(map(lambda x: report.remote_location + '/' + self.resource_path + '/' + x + '.csv', report.datasource),
report.local_location)
else:
for filename in report.datasource:
try:
shutil.copy(os.path.join(os.path.join(report.location, self.resource_path), filename + '.csv'), report.local_location)
except __HOLE__ as exeption:
continue
return True
|
IOError
|
dataset/ETHPy150Open linkedin/naarad/src/naarad/reporting/diff.py/Diff.collect_datasources
|
3,085 |
def collect_cdf_datasources(self):
"""
Identify what cdf series exist in both the diffed reports and download them to the diff report resources directory
:return: True/False : return status of whether the download of time series resources succeeded.
"""
report_count = 0
if self.status != 'OK':
return False
diff_cdf_datasource = sorted(set(self.reports[0].cdf_datasource) & set(self.reports[1].cdf_datasource))
if diff_cdf_datasource:
self.reports[0].cdf_datasource = diff_cdf_datasource
self.reports[1].cdf_datasource = diff_cdf_datasource
else:
self.status = 'NO_COMMON_STATS'
logger.error('No common metrics were found between the two reports')
return False
for report in self.reports:
report.label = report_count
report_count += 1
report.local_location = os.path.join(self.resource_directory, str(report.label))
try:
os.makedirs(report.local_location)
except OSError as exeption:
if exeption.errno != errno.EEXIST:
raise
if report.remote_location != 'local':
naarad.httpdownload.download_url_list(map(lambda x: report.remote_location + '/' + self.resource_path + '/' + x + '.csv', report.cdf_datasource),
report.local_location)
else:
for filename in report.cdf_datasource:
try:
shutil.copy(os.path.join(os.path.join(report.location, self.resource_path), filename + '.csv'), report.local_location)
except __HOLE__ as exeption:
continue
return True
|
IOError
|
dataset/ETHPy150Open linkedin/naarad/src/naarad/reporting/diff.py/Diff.collect_cdf_datasources
|
3,086 |
def collect(self):
"""
Identify what summary stats exist in both the diffed reports and download them to the diff report resources directory
:return: True/False : return status of whether the download of summary stats succeeded.
"""
report_count = 0
if self.status != 'OK':
return False
diff_stats = set(self.reports[0].stats) & set(self.reports[1].stats)
if diff_stats:
self.reports[0].stats = diff_stats
self.reports[1].stats = diff_stats
else:
self.status = 'NO_COMMON_STATS'
logger.error('No common metrics were found between the two reports')
return False
for report in self.reports:
report.label = report_count
report_count += 1
report.local_location = os.path.join(self.resource_directory, str(report.label))
try:
os.makedirs(report.local_location)
except __HOLE__ as exeption:
if exeption.errno != errno.EEXIST:
raise
if report.remote_location != 'local':
naarad.httpdownload.download_url_list(map(lambda x: report.remote_location + '/' + self.resource_path + '/' + x, report.stats), report.local_location)
else:
for filename in report.stats:
shutil.copy(os.path.join(os.path.join(report.location, self.resource_path), filename), report.local_location)
return True
|
OSError
|
dataset/ETHPy150Open linkedin/naarad/src/naarad/reporting/diff.py/Diff.collect
|
3,087 |
def check_sla(self, sla, diff_metric):
"""
Check whether the SLA has passed or failed
"""
try:
if sla.display is '%':
diff_val = float(diff_metric['percent_diff'])
else:
diff_val = float(diff_metric['absolute_diff'])
except __HOLE__:
return False
if not (sla.check_sla_passed(diff_val)):
self.sla_failures += 1
self.sla_failure_list.append(DiffSLAFailure(sla, diff_metric))
return True
|
ValueError
|
dataset/ETHPy150Open linkedin/naarad/src/naarad/reporting/diff.py/Diff.check_sla
|
3,088 |
def upgrade(self, from_version=None, to_version=__version__):
if from_version is None:
try:
from_version = self._read_md()['properties']['db-version']
except (KeyError, __HOLE__):
from_version = "0.0"
self.upgrade_steps.run(self, from_version, to_version)
md = self._read_md()
md['properties']['db-version'] = to_version
self._update_md(md)
|
TypeError
|
dataset/ETHPy150Open ottogroup/palladium/palladium/persistence.py/File.upgrade
|
3,089 |
def _evaluate_numexpr(op, op_str, a, b, raise_on_error=False, truediv=True,
reversed=False, **eval_kwargs):
result = None
if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
try:
# we were originally called by a reversed op
# method
if reversed:
a, b = b, a
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate('a_value %s b_value' % op_str,
local_dict={'a_value': a_value,
'b_value': b_value},
casting='safe', truediv=truediv,
**eval_kwargs)
except __HOLE__ as detail:
if 'unknown type object' in str(detail):
pass
except Exception as detail:
if raise_on_error:
raise
if _TEST_MODE:
_store_test_result(result is not None)
if result is None:
result = _evaluate_standard(op, op_str, a, b, raise_on_error)
return result
|
ValueError
|
dataset/ETHPy150Open pydata/pandas/pandas/computation/expressions.py/_evaluate_numexpr
|
3,090 |
def _where_numexpr(cond, a, b, raise_on_error=False):
result = None
if _can_use_numexpr(None, 'where', a, b, 'where'):
try:
cond_value = getattr(cond, 'values', cond)
a_value = getattr(a, 'values', a)
b_value = getattr(b, 'values', b)
result = ne.evaluate('where(cond_value, a_value, b_value)',
local_dict={'cond_value': cond_value,
'a_value': a_value,
'b_value': b_value},
casting='safe')
except __HOLE__ as detail:
if 'unknown type object' in str(detail):
pass
except Exception as detail:
if raise_on_error:
raise TypeError(str(detail))
if result is None:
result = _where_standard(cond, a, b, raise_on_error)
return result
# turn myself on
|
ValueError
|
dataset/ETHPy150Open pydata/pandas/pandas/computation/expressions.py/_where_numexpr
|
3,091 |
def _has_bool_dtype(x):
try:
return x.dtype == bool
except AttributeError:
try:
return 'bool' in x.blocks
except __HOLE__:
return isinstance(x, (bool, np.bool_))
|
AttributeError
|
dataset/ETHPy150Open pydata/pandas/pandas/computation/expressions.py/_has_bool_dtype
|
3,092 |
def start(self, extra_configs=None):
"""
Start a cluster as a subprocess.
"""
self.tmpdir = tempfile.mkdtemp()
if not extra_configs:
extra_configs = {}
def tmppath(filename):
"""Creates paths in tmpdir."""
return os.path.join(self.tmpdir, filename)
LOGGER.info("Using temporary directory: %s" % self.tmpdir)
in_conf_dir = tmppath("in-conf")
os.mkdir(in_conf_dir)
self.log_dir = tmppath("logs")
os.mkdir(self.log_dir)
f = file(os.path.join(in_conf_dir, "hadoop-metrics.properties"), "w")
try:
f.write("""
dfs.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
mapred.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
jvm.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
""")
finally:
f.close()
if self.superuser not in TEST_USER_GROUP_MAPPING:
TEST_USER_GROUP_MAPPING[self.superuser] = [self.superuser]
_write_static_group_mapping(TEST_USER_GROUP_MAPPING,
tmppath('ugm.properties'))
core_configs = {
'hadoop.proxyuser.%s.groups' % (self.superuser,): 'users,supergroup',
'hadoop.proxyuser.%s.hosts' % (self.superuser,): 'localhost',
'mapred.jobtracker.plugins': CLUSTER_JT_PLUGINS}
extra_configs.update(STARTUP_CONFIGS)
write_config(core_configs, tmppath('in-conf/core-site.xml'))
write_config({'mapred.jobtracker.taskScheduler': CLUSTER_TASK_SCHEDULER,
'mapred.queue.names': CLUSTER_QUEUE_NAMES},
tmppath('in-conf/mapred-site.xml'))
hadoop_policy_keys = ['client', 'client.datanode', 'datanode', 'inter.datanode', 'namenode', 'inter.tracker', 'job.submission', 'task.umbilical', 'refresh.policy', 'admin.operations']
hadoop_policy_config = {}
for policy in hadoop_policy_keys:
hadoop_policy_config['security.' + policy + '.protocol.acl'] = '*'
write_config(hadoop_policy_config, tmppath('in-conf/hadoop-policy.xml'))
details_file = file(tmppath("details.json"), "w+")
try:
args = [ os.path.join(hadoop.conf.HADOOP_MR1_HOME.get(), 'bin', 'hadoop'),
"jar",
hadoop.conf.HADOOP_TEST_JAR.get(),
"minicluster",
"-writeConfig", tmppath("config.xml"),
"-writeDetails", tmppath("details.json"),
"-datanodes", str(self.num_datanodes),
"-tasktrackers", str(self.num_tasktrackers),
"-useloopbackhosts",
"-D", "hadoop.tmp.dir=%s" % self.tmpdir,
"-D", "mapred.local.dir=%s/mapred/local" % self.tmpdir,
"-D", "mapred.system.dir=/mapred/system",
"-D", "mapred.temp.dir=/mapred/temp",
"-D", "jobclient.completion.poll.interval=100",
"-D", "jobclient.progress.monitor.poll.interval=100",
"-D", "fs.checkpoint.period=1",
# For a reason I don't fully understand, this must be 0.0.0.0 and not 'localhost'
"-D", "dfs.secondary.http.address=0.0.0.0:%d" % python_util.find_unused_port(),
# We bind the NN's thrift interface to a port we find here.
# This is suboptimal, since there's a race. Alas, if we don't
# do this here, the datanodes fail to discover the namenode's thrift
# address, and there's a race there
"-D", "dfs.thrift.address=localhost:%d" % python_util.find_unused_port(),
"-D", "jobtracker.thrift.address=localhost:%d" % python_util.find_unused_port(),
# Jobs realize they have finished faster with this timeout.
"-D", "jobclient.completion.poll.interval=50",
"-D", "hadoop.security.authorization=true",
"-D", "hadoop.policy.file=%s/hadoop-policy.xml" % in_conf_dir,
]
for key,value in extra_configs.iteritems():
args.append("-D")
args.append(key + "=" + value)
env = {}
env["HADOOP_CONF_DIR"] = in_conf_dir
env["HADOOP_OPTS"] = "-Dtest.build.data=%s" % (self.tmpdir, )
env["HADOOP_CLASSPATH"] = ':'.join([
# -- BEGIN JAVA TRIVIA --
# Add the -test- jar to the classpath to work around a subtle issue
# involving Java classloaders. In brief, hadoop's RunJar class creates
# a child classloader with the test jar on it, but the core classes
# are loaded by the system classloader. This is fine except that
# some classes in the test jar extend package-protected classes in the
# core jar. Even though the classes are in the same package name, they
# are thus loaded by different classloaders and therefore an IllegalAccessError
# prevents the MiniMRCluster from starting. Adding the test jar to the system
# classpath prevents this error since then both the MiniMRCluster and the
# core classes are loaded by the system classloader.
hadoop.conf.HADOOP_TEST_JAR.get(),
# -- END JAVA TRIVIA --
hadoop.conf.HADOOP_PLUGIN_CLASSPATH.get(),
# Due to CDH-4537, we need to add test dependencies to run minicluster
os.path.join(os.path.dirname(__file__), 'test_jars', '*'),
])
env["HADOOP_HEAPSIZE"] = "128"
env["HADOOP_HOME"] = hadoop.conf.HADOOP_MR1_HOME.get()
env["HADOOP_LOG_DIR"] = self.log_dir
env["USER"] = self.superuser
if "JAVA_HOME" in os.environ:
env["JAVA_HOME"] = os.environ["JAVA_HOME"]
# Wait for the debugger to attach
if DEBUG_HADOOP:
env["HADOOP_OPTS"] = env.get("HADOOP_OPTS", "") + " -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=9999"
if USE_STDERR:
stderr=sys.stderr
else:
stderr=file(tmppath("stderr"), "w")
LOGGER.debug("Starting minicluster: %s env: %s" % (repr(args), repr(env)))
self.clusterproc = subprocess.Popen(
args=args,
stdout=file(tmppath("stdout"), "w"),
stderr=stderr,
env=env)
details = {}
start = time.time()
# We consider the cluster started when the details file parses correct JSON.
# MiniHadoopCluster currently writes the details file last, and this depends
# on that.
while not details:
try:
details_file.seek(0)
details = json.load(details_file)
except __HOLE__:
pass
if self.clusterproc.poll() is not None or (not DEBUG_HADOOP and (time.time() - start) > MAX_CLUSTER_STARTUP_TIME):
LOGGER.debug("stdout:" + file(tmppath("stdout")).read())
if not USE_STDERR:
LOGGER.debug("stderr:" + file(tmppath("stderr")).read())
self.stop()
raise Exception("Cluster process quit or is taking too long to start. Aborting.")
finally:
details_file.close()
LOGGER.debug("Successfully started minicluster")
# Place all the details as attributes on self.
for k, v in details.iteritems():
setattr(self, k, v)
# Parse the configuration using XPath and place into self.config.
config = lxml.etree.parse(tmppath("config.xml"))
self.config = dict( (property.find("./name").text, property.find("./value").text)
for property in config.xpath("/configuration/property"))
# Write out Hadoop-style configuration directory,
# which can, in turn, be used for /bin/hadoop.
self.config_dir = tmppath("conf")
os.mkdir(self.config_dir)
hadoop.conf.HADOOP_CONF_DIR.set_for_testing(self.config_dir)
write_config(self.config, tmppath("conf/core-site.xml"),
["fs.defaultFS", "jobclient.completion.poll.interval",
"dfs.namenode.checkpoint.period", "dfs.namenode.checkpoint.dir",
'hadoop.proxyuser.'+self.superuser+'.groups', 'hadoop.proxyuser.'+self.superuser+'.hosts'])
write_config(self.config, tmppath("conf/hdfs-site.xml"), ["fs.defaultFS", "dfs.namenode.http-address", "dfs.namenode.secondary.http-address"])
# mapred.job.tracker isn't written out into self.config, so we fill
# that one out more manually.
write_config({ 'mapred.job.tracker': 'localhost:%d' % self.jobtracker_port },
tmppath("conf/mapred-site.xml"))
write_config(hadoop_policy_config, tmppath('conf/hadoop-policy.xml'))
# Once the config is written out, we can start the 2NN.
args = [hadoop.conf.HADOOP_BIN.get(),
'--config', self.config_dir,
'secondarynamenode']
LOGGER.debug("Starting 2NN at: " +
self.config['dfs.secondary.http.address'])
LOGGER.debug("2NN command: %s env: %s" % (repr(args), repr(env)))
self.secondary_proc = subprocess.Popen(
args=args,
stdout=file(tmppath("stdout.2nn"), "w"),
stderr=file(tmppath("stderr.2nn"), "w"),
env=env)
while True:
try:
response = urllib2.urlopen(urllib2.Request('http://' +
self.config['dfs.secondary.http.address']))
except urllib2.URLError:
# If we should abort startup.
if self.secondary_proc.poll() is not None or (not DEBUG_HADOOP and (time.time() - start) > MAX_CLUSTER_STARTUP_TIME):
LOGGER.debug("stdout:" + file(tmppath("stdout")).read())
if not USE_STDERR:
LOGGER.debug("stderr:" + file(tmppath("stderr")).read())
self.stop()
raise Exception("2nn process quit or is taking too long to start. Aborting.")
break
else:
time.sleep(1)
continue
# We didn't get a URLError. 2NN started successfully.
response.close()
break
LOGGER.debug("Successfully started 2NN")
|
ValueError
|
dataset/ETHPy150Open cloudera/hue/desktop/libs/hadoop/src/hadoop/mini_cluster.py/MiniHadoopCluster.start
|
3,093 |
def __getitem__(self, key):
key = key.lower()
try:
return BASE_DATA_TYPES_REVERSE[key]
except __HOLE__:
import re
m = re.search(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$', key)
if m:
return ('CharField', {'maxlength': int(m.group(1))})
raise KeyError
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/backends/sqlite3/introspection.py/FlexibleFieldLookupDict.__getitem__
|
3,094 |
def LeaveCluster(modify_ssh_setup):
"""Cleans up and remove the current node.
This function cleans up and prepares the current node to be removed
from the cluster.
If processing is successful, then it raises an
L{errors.QuitGanetiException} which is used as a special case to
shutdown the node daemon.
@param modify_ssh_setup: boolean
"""
_CleanDirectory(pathutils.DATA_DIR)
_CleanDirectory(pathutils.CRYPTO_KEYS_DIR)
JobQueuePurge()
if modify_ssh_setup:
try:
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
ssh.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
utils.RemoveFile(priv_key)
utils.RemoveFile(pub_key)
except errors.OpExecError:
logging.exception("Error while processing ssh files")
except __HOLE__:
logging.exception("At least one SSH file was not accessible.")
try:
utils.RemoveFile(pathutils.CONFD_HMAC_KEY)
utils.RemoveFile(pathutils.RAPI_CERT_FILE)
utils.RemoveFile(pathutils.SPICE_CERT_FILE)
utils.RemoveFile(pathutils.SPICE_CACERT_FILE)
utils.RemoveFile(pathutils.NODED_CERT_FILE)
except: # pylint: disable=W0702
logging.exception("Error while removing cluster secrets")
utils.StopDaemon(constants.CONFD)
utils.StopDaemon(constants.MOND)
utils.StopDaemon(constants.KVMD)
# Raise a custom exception (handled in ganeti-noded)
raise errors.QuitGanetiException(True, "Shutdown scheduled")
|
IOError
|
dataset/ETHPy150Open ganeti/ganeti/lib/backend.py/LeaveCluster
|
3,095 |
def VerifyNode(what, cluster_name, all_hvparams):
"""Verify the status of the local node.
Based on the input L{what} parameter, various checks are done on the
local node.
If the I{filelist} key is present, this list of
files is checksummed and the file/checksum pairs are returned.
If the I{nodelist} key is present, we check that we have
connectivity via ssh with the target nodes (and check the hostname
report).
If the I{node-net-test} key is present, we check that we have
connectivity to the given nodes via both primary IP and, if
applicable, secondary IPs.
@type what: C{dict}
@param what: a dictionary of things to check:
- filelist: list of files for which to compute checksums
- nodelist: list of nodes we should check ssh communication with
- node-net-test: list of nodes we should check node daemon port
connectivity with
- hypervisor: list with hypervisors to run the verify for
@type cluster_name: string
@param cluster_name: the cluster's name
@type all_hvparams: dict of dict of strings
@param all_hvparams: a dictionary mapping hypervisor names to hvparams
@rtype: dict
@return: a dictionary with the same keys as the input dict, and
values representing the result of the checks
"""
result = {}
my_name = netutils.Hostname.GetSysName()
vm_capable = my_name not in what.get(constants.NV_NONVMNODES, [])
_VerifyHypervisors(what, vm_capable, result, all_hvparams)
_VerifyHvparams(what, vm_capable, result)
if constants.NV_FILELIST in what:
fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath,
what[constants.NV_FILELIST]))
result[constants.NV_FILELIST] = \
dict((vcluster.MakeVirtualPath(key), value)
for (key, value) in fingerprints.items())
if constants.NV_CLIENT_CERT in what:
result[constants.NV_CLIENT_CERT] = _VerifyClientCertificate()
if constants.NV_SSH_SETUP in what:
node_status_list, key_type = what[constants.NV_SSH_SETUP]
result[constants.NV_SSH_SETUP] = \
_VerifySshSetup(node_status_list, my_name, key_type)
if constants.NV_SSH_CLUTTER in what:
result[constants.NV_SSH_CLUTTER] = \
_VerifySshClutter(what[constants.NV_SSH_SETUP], my_name)
if constants.NV_NODELIST in what:
(nodes, bynode, mcs) = what[constants.NV_NODELIST]
# Add nodes from other groups (different for each node)
try:
nodes.extend(bynode[my_name])
except KeyError:
pass
# Use a random order
random.shuffle(nodes)
# Try to contact all nodes
val = {}
ssh_port_map = ssconf.SimpleStore().GetSshPortMap()
for node in nodes:
# We only test if master candidates can communicate to other nodes.
# We cannot test if normal nodes cannot communicate with other nodes,
# because the administrator might have installed additional SSH keys,
# over which Ganeti has no power.
if my_name in mcs:
success, message = _GetSshRunner(cluster_name). \
VerifyNodeHostname(node, ssh_port_map[node])
if not success:
val[node] = message
result[constants.NV_NODELIST] = val
if constants.NV_NODENETTEST in what:
result[constants.NV_NODENETTEST] = VerifyNodeNetTest(
my_name, what[constants.NV_NODENETTEST])
if constants.NV_MASTERIP in what:
result[constants.NV_MASTERIP] = VerifyMasterIP(
my_name, what[constants.NV_MASTERIP])
if constants.NV_USERSCRIPTS in what:
result[constants.NV_USERSCRIPTS] = \
[script for script in what[constants.NV_USERSCRIPTS]
if not utils.IsExecutable(script)]
if constants.NV_OOB_PATHS in what:
result[constants.NV_OOB_PATHS] = tmp = []
for path in what[constants.NV_OOB_PATHS]:
try:
st = os.stat(path)
except __HOLE__, err:
tmp.append("error stating out of band helper: %s" % err)
else:
if stat.S_ISREG(st.st_mode):
if stat.S_IMODE(st.st_mode) & stat.S_IXUSR:
tmp.append(None)
else:
tmp.append("out of band helper %s is not executable" % path)
else:
tmp.append("out of band helper %s is not a file" % path)
if constants.NV_LVLIST in what and vm_capable:
try:
val = GetVolumeList(utils.ListVolumeGroups().keys())
except RPCFail, err:
val = str(err)
result[constants.NV_LVLIST] = val
_VerifyInstanceList(what, vm_capable, result, all_hvparams)
if constants.NV_VGLIST in what and vm_capable:
result[constants.NV_VGLIST] = utils.ListVolumeGroups()
if constants.NV_PVLIST in what and vm_capable:
check_exclusive_pvs = constants.NV_EXCLUSIVEPVS in what
val = bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
filter_allocatable=False,
include_lvs=check_exclusive_pvs)
if check_exclusive_pvs:
result[constants.NV_EXCLUSIVEPVS] = _CheckExclusivePvs(val)
for pvi in val:
# Avoid sending useless data on the wire
pvi.lv_list = []
result[constants.NV_PVLIST] = map(objects.LvmPvInfo.ToDict, val)
if constants.NV_VERSION in what:
result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
constants.RELEASE_VERSION)
_VerifyNodeInfo(what, vm_capable, result, all_hvparams)
if constants.NV_DRBDVERSION in what and vm_capable:
try:
drbd_version = DRBD8.GetProcInfo().GetVersionString()
except errors.BlockDeviceError, err:
logging.warning("Can't get DRBD version", exc_info=True)
drbd_version = str(err)
result[constants.NV_DRBDVERSION] = drbd_version
if constants.NV_DRBDLIST in what and vm_capable:
try:
used_minors = drbd.DRBD8.GetUsedDevs()
except errors.BlockDeviceError, err:
logging.warning("Can't get used minors list", exc_info=True)
used_minors = str(err)
result[constants.NV_DRBDLIST] = used_minors
if constants.NV_DRBDHELPER in what and vm_capable:
status = True
try:
payload = drbd.DRBD8.GetUsermodeHelper()
except errors.BlockDeviceError, err:
logging.error("Can't get DRBD usermode helper: %s", str(err))
status = False
payload = str(err)
result[constants.NV_DRBDHELPER] = (status, payload)
if constants.NV_NODESETUP in what:
result[constants.NV_NODESETUP] = tmpr = []
if not os.path.isdir("/sys/block") or not os.path.isdir("/sys/class/net"):
tmpr.append("The sysfs filesytem doesn't seem to be mounted"
" under /sys, missing required directories /sys/block"
" and /sys/class/net")
if (not os.path.isdir("/proc/sys") or
not os.path.isfile("/proc/sysrq-trigger")):
tmpr.append("The procfs filesystem doesn't seem to be mounted"
" under /proc, missing required directory /proc/sys and"
" the file /proc/sysrq-trigger")
if constants.NV_TIME in what:
result[constants.NV_TIME] = utils.SplitTime(time.time())
if constants.NV_OSLIST in what and vm_capable:
result[constants.NV_OSLIST] = DiagnoseOS()
if constants.NV_BRIDGES in what and vm_capable:
result[constants.NV_BRIDGES] = [bridge
for bridge in what[constants.NV_BRIDGES]
if not utils.BridgeExists(bridge)]
if what.get(constants.NV_ACCEPTED_STORAGE_PATHS) == my_name:
result[constants.NV_ACCEPTED_STORAGE_PATHS] = \
filestorage.ComputeWrongFileStoragePaths()
if what.get(constants.NV_FILE_STORAGE_PATH):
pathresult = filestorage.CheckFileStoragePath(
what[constants.NV_FILE_STORAGE_PATH])
if pathresult:
result[constants.NV_FILE_STORAGE_PATH] = pathresult
if what.get(constants.NV_SHARED_FILE_STORAGE_PATH):
pathresult = filestorage.CheckFileStoragePath(
what[constants.NV_SHARED_FILE_STORAGE_PATH])
if pathresult:
result[constants.NV_SHARED_FILE_STORAGE_PATH] = pathresult
return result
|
OSError
|
dataset/ETHPy150Open ganeti/ganeti/lib/backend.py/VerifyNode
|
3,096 |
def _SymlinkBlockDev(instance_name, device_path, idx=None, uuid=None):
"""Set up symlinks to a instance's block device.
This is an auxiliary function run when an instance is start (on the primary
node) or when an instance is migrated (on the target node).
@param instance_name: the name of the target instance
@param device_path: path of the physical block device, on the node
@param idx: the disk index
@param uuid: the disk uuid
@return: absolute path to the disk's symlink
"""
# In case we have only a userspace access URI, device_path is None
if not device_path:
return None
link_name = _GetBlockDevSymlinkPath(instance_name, idx, uuid)
try:
os.symlink(device_path, link_name)
except __HOLE__, err:
if err.errno == errno.EEXIST:
if (not os.path.islink(link_name) or
os.readlink(link_name) != device_path):
os.remove(link_name)
os.symlink(device_path, link_name)
else:
raise
return link_name
|
OSError
|
dataset/ETHPy150Open ganeti/ganeti/lib/backend.py/_SymlinkBlockDev
|
3,097 |
def _RemoveBlockDevLinks(instance_name, disks):
"""Remove the block device symlinks belonging to the given instance.
"""
def _remove_symlink(link_name):
if os.path.islink(link_name):
try:
os.remove(link_name)
except __HOLE__:
logging.exception("Can't remove symlink '%s'", link_name)
for idx, disk in enumerate(disks):
link_name = _GetBlockDevSymlinkPath(instance_name, uuid=disk.uuid)
_remove_symlink(link_name)
# Remove also the deprecated symlink (if any)
link_name = _GetBlockDevSymlinkPath(instance_name, idx=idx)
_remove_symlink(link_name)
|
OSError
|
dataset/ETHPy150Open ganeti/ganeti/lib/backend.py/_RemoveBlockDevLinks
|
3,098 |
def _GatherAndLinkBlockDevs(instance):
"""Set up an instance's block device(s).
This is run on the primary node at instance startup. The block
devices must be already assembled.
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should assemble
@rtype: list
@return: list of (disk_object, link_name, drive_uri)
"""
block_devices = []
for idx, disk in enumerate(instance.disks_info):
device = _RecursiveFindBD(disk)
if device is None:
raise errors.BlockDeviceError("Block device '%s' is not set up." %
str(disk))
device.Open()
try:
# Create both index-based and uuid-based symlinks
# for backwards compatibility
_SymlinkBlockDev(instance.name, device.dev_path, idx=idx)
link_name = _SymlinkBlockDev(instance.name, device.dev_path,
uuid=disk.uuid)
except __HOLE__, e:
raise errors.BlockDeviceError("Cannot create block device symlink: %s" %
e.strerror)
uri = _CalculateDeviceURI(instance, disk, device)
block_devices.append((disk, link_name, uri))
return block_devices
|
OSError
|
dataset/ETHPy150Open ganeti/ganeti/lib/backend.py/_GatherAndLinkBlockDevs
|
3,099 |
def BlockdevAssemble(disk, instance, as_primary, idx):
"""Activate a block device for an instance.
This is a wrapper over _RecursiveAssembleBD.
@rtype: str or boolean
@return: a tuple with the C{/dev/...} path and the created symlink
for primary nodes, and (C{True}, C{True}) for secondary nodes
"""
try:
result = _RecursiveAssembleBD(disk, instance.name, as_primary)
if isinstance(result, BlockDev):
# pylint: disable=E1103
dev_path = result.dev_path
link_name = None
uri = None
if as_primary:
# Create both index-based and uuid-based symlinks
# for backwards compatibility
_SymlinkBlockDev(instance.name, dev_path, idx=idx)
link_name = _SymlinkBlockDev(instance.name, dev_path, uuid=disk.uuid)
uri = _CalculateDeviceURI(instance, disk, result)
elif result:
return result, result
else:
_Fail("Unexpected result from _RecursiveAssembleBD")
except errors.BlockDeviceError, err:
_Fail("Error while assembling disk: %s", err, exc=True)
except __HOLE__, err:
_Fail("Error while symlinking disk: %s", err, exc=True)
return dev_path, link_name, uri
|
OSError
|
dataset/ETHPy150Open ganeti/ganeti/lib/backend.py/BlockdevAssemble
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.