Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
4,400 |
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = []
results2 = []
results3 = []
barrier2 = self.barriertype(self.N)
def f():
try:
i = self.barrier.wait()
if i == self.N//2:
raise RuntimeError
self.barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except __HOLE__:
self.barrier.abort()
pass
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == self.N//2:
self.barrier.reset()
barrier2.wait()
self.barrier.wait()
results3.append(True)
self.run_threads(f)
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
|
RuntimeError
|
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/lock_tests.py/BarrierTests.test_abort_and_reset
|
4,401 |
def web_method_wrapper(func):
@functools.wraps(func)
@gen.coroutine
def wrapper(self, *args, **kwargs):
self.init_rsp_json()
try:
yield func(self, *args, **kwargs)
yield self.finished()
except BaseError as e:
self.rsp_json['code'] = e.e_code
self.rsp_json['msg'] = e.e_msg
self.api_response(self.rsp_json)
gen_logger.error(e, exc_info=options.traceback_4_baseerror)
yield self.on_error(e)
except __HOLE__ as e:
raise e
except Exception as e:
self.api_response(self.rsp_json)
gen_logger.error(e, exc_info=options.traceback_4_exception)
yield self.on_error(e)
return wrapper
|
StopIteration
|
dataset/ETHPy150Open bufferx/twork/twork/web/action_wrapper.py/web_method_wrapper
|
4,402 |
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except __HOLE__:
return self.__missing__(key)
|
KeyError
|
dataset/ETHPy150Open cournape/Bento/bento/compat/_collections.py/defaultdict.__getitem__
|
4,403 |
def move_to_group(self, item, target_group, quantity):
try:
target_item = target_group.items.get(
product=item.product, product_name=item.product_name,
product_sku=item.product_sku)
except __HOLE__:
target_group.items.create(
delivery_group=target_group, product=item.product,
product_name=item.product_name, product_sku=item.product_sku,
quantity=quantity, unit_price_net=item.unit_price_net,
stock=item.stock,
unit_price_gross=item.unit_price_gross)
else:
target_item.quantity += quantity
target_item.save()
item.quantity -= quantity
self.remove_empty_groups(item)
|
ObjectDoesNotExist
|
dataset/ETHPy150Open mirumee/saleor/saleor/order/models.py/OrderedItemManager.move_to_group
|
4,404 |
def load_server_key():
try:
key_path = settings.PQAUTH_SERVER_KEY
except AttributeError:
msg = "You must set settings.PQUATH_SERVER_KEY"
raise ImproperlyConfigured(msg)
key_password = None
try:
key_password = settings.PQAUTH_SERVER_KEY_PASSWORD
except __HOLE__:
pass
return load_key_file(key_path, key_password)
|
AttributeError
|
dataset/ETHPy150Open teddziuba/pqauth/python/pqauth/pqauth_django_server/keys.py/load_server_key
|
4,405 |
def _get_parent_modeladmin(self):
# HACK: accessing private field.
try:
parentadmin = self.admin_site._registry[self.parent_model]
except __HOLE__:
raise ImproperlyConfigured("Model admin for '{0}' not found in admin_site!".format(self.parent_model.__name__))
# Do some "type" checking to developers are aided in inheriting their parent ModelAdmin screens with the proper classes.
assert isinstance(parentadmin, PlaceholderEditorBaseMixin), "The '{0}' class can only be used in admin screens which implement a PlaceholderEditor mixin class.".format(self.__class__.__name__)
return parentadmin
|
KeyError
|
dataset/ETHPy150Open edoburu/django-fluent-contents/fluent_contents/admin/placeholdereditor.py/PlaceholderEditorInline._get_parent_modeladmin
|
4,406 |
def _get_url_format(opts):
try:
return opts.app_label, opts.model_name # Django 1.7 format
except __HOLE__:
return opts.app_label, opts.module_name
|
AttributeError
|
dataset/ETHPy150Open edoburu/django-fluent-contents/fluent_contents/admin/placeholdereditor.py/_get_url_format
|
4,407 |
def extract_filters(term, opts=None):
"""
Pulls all the filtering options out of the term and returns a cleaned term
and a dictionary of filter names and filter values. Term filters override
filters found in opts.
"""
opts = opts or {}
filters = {}
params = {}
# Type filters.
term, addon_type = extract_from_query(term, 'type', '\w+')
addon_type = addon_type or opts.get('addon_type')
if addon_type:
try:
atype = int(addon_type)
if atype in amo.ADDON_SEARCH_TYPES:
filters['type'] = atype
except __HOLE__:
# `addon_type` is not a digit.
# Try to find it in `ADDON_SEARCH_SLUGS`.
atype = amo.ADDON_SEARCH_SLUGS.get(addon_type.lower())
if atype:
filters['type'] = atype
# Platform filters.
term, platform = extract_from_query(term, 'platform', '\w+')
params['platform'] = platform or opts.get('platform')
# Version filters.
term, version = extract_from_query(term, 'version', '[0-9.]+')
params['version'] = version or opts.get('version')
# Tag filters.
term, tag = extract_from_query(term, 'tag', '\w+')
if tag:
tag = Tag.objects.filter(tag_text=tag).values_list('tag_text',
flat=True)
if tag:
filters['tags__in'] = list(tag)
return (term, filters, params)
|
ValueError
|
dataset/ETHPy150Open mozilla/addons-server/src/olympia/legacy_api/utils.py/extract_filters
|
4,408 |
def parse_docstring(api_doc, docindex):
"""
Process the given C{APIDoc}'s docstring. In particular, populate
the C{APIDoc}'s C{descr} and C{summary} attributes, and add any
information provided by fields in the docstring.
@param docindex: A DocIndex, used to find the containing
module (to look up the docformat); and to find any
user docfields defined by containing objects.
"""
if api_doc.metadata is not UNKNOWN:
if not (isinstance(api_doc, RoutineDoc)
and api_doc.canonical_name[-1] == '__init__'):
log.debug("%s's docstring processed twice" % api_doc.canonical_name)
return
initialize_api_doc(api_doc)
# If there's no docstring, then there's nothing more to do.
if (api_doc.docstring in (None, UNKNOWN)):
return
# Remove leading indentation from the docstring.
api_doc.docstring = unindent_docstring(api_doc.docstring)
# Extract a signature from the docstring, if it has one. This
# overrides any signature we got via introspection/parsing.
if isinstance(api_doc, RoutineDoc):
parse_function_signature(api_doc)
# Parse the docstring. Any errors encountered are stored as
# `ParseError` objects in the errors list.
docformat = get_docformat(api_doc, docindex)
parse_errors = []
parsed_docstring = markup.parse(api_doc.docstring, docformat,
parse_errors)
# Divide the docstring into a description and a list of
# fields.
descr, fields = parsed_docstring.split_fields(parse_errors)
api_doc.descr = descr
field_warnings = []
# Handle the constructor fields that have been defined in the class
# docstring. This code assumes that a class docstring is parsed before
# the same class __init__ docstring.
if isinstance(api_doc, ClassDoc):
# Parse ahead the __init__ docstring for this class
initvar = api_doc.variables.get('__init__')
if initvar and initvar.value not in (None, UNKNOWN):
init_api_doc = initvar.value
parse_docstring(init_api_doc, docindex)
parse_function_signature(init_api_doc, api_doc)
init_fields = split_init_fields(fields, field_warnings)
# Process fields
for field in init_fields:
try:
process_field(init_api_doc, docindex, field.tag(),
field.arg(), field.body())
except ValueError, e: field_warnings.append(str(e))
# Process fields
for field in fields:
try:
process_field(api_doc, docindex, field.tag(),
field.arg(), field.body())
except __HOLE__, e: field_warnings.append(str(e))
# Check to make sure that all type parameters correspond to
# some documented parameter.
check_type_fields(api_doc, field_warnings)
# Check for special variables (e.g., __version__)
if isinstance(api_doc, NamespaceDoc):
for field in STANDARD_FIELDS + user_docfields(api_doc, docindex):
add_metadata_from_var(api_doc, field)
# Extract a summary
if api_doc.summary is None and api_doc.descr is not None:
api_doc.summary, api_doc.other_docs = api_doc.descr.summary()
# If the summary is empty, but the return field is not, then use
# the return field to generate a summary description.
if (isinstance(api_doc, RoutineDoc) and api_doc.summary is None and
api_doc.return_descr is not None):
s, o = api_doc.return_descr.summary()
api_doc.summary = RETURN_PDS + s
api_doc.other_docs = o
# [XX] Make sure we don't have types/param descrs for unknown
# vars/params?
# Report any errors that occured
report_errors(api_doc, docindex, parse_errors, field_warnings)
|
ValueError
|
dataset/ETHPy150Open ardekantur/pyglet/tools/epydoc/epydoc/docstringparser.py/parse_docstring
|
4,409 |
def add_metadata_from_var(api_doc, field):
if not field.multivalue:
for (f,a,d) in api_doc.metadata:
if field == f:
return # We already have a value for this metadata.
for varname in field.varnames:
# Check if api_doc has a variable w/ the given name.
if varname not in api_doc.variables: continue
var_doc = api_doc.variables[varname]
if var_doc.value is UNKNOWN: continue
val_doc = var_doc.value
value = []
# Try extracting the value from the pyval.
ok_types = (basestring, int, float, bool, type(None))
if val_doc.pyval is not UNKNOWN:
if isinstance(val_doc.pyval, ok_types):
value = [val_doc.pyval]
elif field.multivalue:
if isinstance(val_doc.pyval, (tuple, list)):
for elt in val_doc.pyval:
if not isinstance(elt, ok_types): break
else:
value = list(val_doc.pyval)
# Try extracting the value from the parse tree.
elif val_doc.toktree is not UNKNOWN:
try: value = [epydoc.docparser.parse_string(val_doc.toktree)]
except __HOLE__: raise
except: pass
if field.multivalue and not value:
try: value = epydoc.docparser.parse_string_list(val_doc.toktree)
except KeyboardInterrupt: raise
except: raise
# Add any values that we found.
for elt in value:
if isinstance(elt, str):
elt = decode_with_backslashreplace(elt)
else:
elt = unicode(elt)
elt = epytext.ParsedEpytextDocstring(
epytext.parse_as_para(elt))
# Add in the metadata and remove from the variables
api_doc.metadata.append( (field, varname, elt) )
if var_doc.docstring in (None, UNKNOWN):
del api_doc.variables[varname]
|
KeyboardInterrupt
|
dataset/ETHPy150Open ardekantur/pyglet/tools/epydoc/epydoc/docstringparser.py/add_metadata_from_var
|
4,410 |
def process_deffield_field(api_doc, docindex, tag, arg, descr):
"""Define a new custom field."""
_check(api_doc, tag, arg, expect_arg=True)
if api_doc.extra_docstring_fields is UNKNOWN:
api_doc.extra_docstring_fields = []
try:
docstring_field = _descr_to_docstring_field(arg, descr)
docstring_field.varnames.append("__%s__" % arg)
api_doc.extra_docstring_fields.append(docstring_field)
except __HOLE__, e:
raise ValueError('Bad %s: %s' % (tag, e))
|
ValueError
|
dataset/ETHPy150Open ardekantur/pyglet/tools/epydoc/epydoc/docstringparser.py/process_deffield_field
|
4,411 |
def calendar_view(request, year=datetime.now().year, month=datetime.now().month, channel_slug=None, page=1):
"""
Shows a grid (similar in appearance to a physical calendar) of podcasts for either a specific podcast channel or no
channel at all. Based on the GridOne layout originally developed for Calendars.
"""
site = Site.objects.get_current()
try:
page = int(page)
if year:
year = int(year)
if month:
month = int(month)
except __HOLE__:
raise Http404
channel = None
channel_list = Channel.current.filter(section__publication__site=site).order_by('title')
episodes = Episode.objects.filter(channel__section__publication__site=site)
if channel_slug:
channel = get_object_or_404(Channel, slug=channel_slug)
episodes = episodes.filter(channel=channel)
month_formatted = pycal.monthcalendar(year, month)
month_minus = month - 1
month_plus = month + 1
month_name = pycal.month_name[month]
weekday_header = pycal.weekheader(3).strip().split(" ")
year_minus = year - 1
year_plus = year + 1
today = datetime.now().day
this_month = datetime.now().month
this_year = datetime.now().year
episode_list = episodes.filter(pub_date__year=year).filter(pub_date__month=month)
page_name = "This is a test of the calendaring system."
page = {
'channel': channel,
'channel_list': channel_list,
'episode_list': episode_list,
'month': month,
'month_formatted': month_formatted,
'month_minus': month_minus,
'month_name': month_name,
'month_plus': month_plus,
'page_name': page_name,
'site': site,
'this_month': this_month,
'this_year': this_year,
'today': today,
'weekday_header': weekday_header,
'year': year,
'year_minus': year_minus,
'year_plus': year_plus,
}
return render_to_response('podcasts/calendar.html', page, context_instance=RequestContext(request))
|
ValueError
|
dataset/ETHPy150Open albatrossandco/brubeck_cms/brubeck/podcasts/views.py/calendar_view
|
4,412 |
def calendar_day_view(request, year=datetime.now().year, month=datetime.now().month, day=datetime.now().day, channel_slug=None, page=1):
"""
Shows a grid (similar in appearance to a physical calendar) of podcasts for either a specific podcast channel or no
channel at all. Based on the GridOne layout originally developed for Calendars.
"""
site = Site.objects.get_current()
try:
page = int(page)
if year:
year = int(year)
if month:
month = int(month)
except __HOLE__:
raise Http404
channel = None
channel_list = Channel.current.filter(section__publication__site=site).order_by('title')
episodes = Episode.objects.filter(channel__section__publication__site=site)
if channel_slug:
channel = get_object_or_404(Channel, slug=channel_slug)
episodes = episodes.filter(channel=channel)
month_formatted = pycal.monthcalendar(year, month)
month_minus = month - 1
month_plus = month + 1
month_name = pycal.month_name[month]
weekday_header = pycal.weekheader(3).strip().split(" ")
year_minus = year - 1
year_plus = year + 1
today = datetime.now().day
this_month = datetime.now().month
this_year = datetime.now().year
episode_list = episodes.filter(pub_date__year=year).filter(pub_date__month=month).filter(pub_date__day=day)
page_name = "This is a test of the calendaring system."
page = {
'channel': channel,
'channel_list': channel_list,
'day': day,
'episode_list': episode_list,
'month': month,
'month_formatted': month_formatted,
'month_minus': month_minus,
'month_name': month_name,
'month_plus': month_plus,
'page_name': page_name,
'site': site,
'this_month': this_month,
'this_year': this_year,
'today': today,
'weekday_header': weekday_header,
'year': year,
'year_minus': year_minus,
'year_plus': year_plus,
}
return render_to_response('podcasts/calendarday.html', page, context_instance=RequestContext(request))
|
ValueError
|
dataset/ETHPy150Open albatrossandco/brubeck_cms/brubeck/podcasts/views.py/calendar_day_view
|
4,413 |
def main():
"""Main function for specchio
Example: specchio test/ user@host:test/
:return: None
"""
init_logger()
_popen_str = os.popen("whereis ssh").read().strip()
if _popen_str == "" or _popen_str == "ssh:":
return logger.error("Specchio need `ssh`, "
"but there is no `ssh` in the system")
_popen_str = os.popen("whereis rsync").read().strip()
if _popen_str == "" or _popen_str == "rsync:":
return logger.error("Specchio need `rsync`, "
"but there is no `rsync` in the system")
if len(sys.argv) >= 3:
src_path = sys.argv[-2].strip()
dst_ssh, dst_path = sys.argv[-1].strip().split(":")
option_valid = all((option in GENERAL_OPTIONS)
for option in sys.argv[1:-2])
if option_valid:
logger.info("Initialize Specchio")
is_init_remote = "--init-remote" in sys.argv[1:-2]
event_handler = SpecchioEventHandler(
src_path=src_path, dst_ssh=dst_ssh, dst_path=dst_path,
is_init_remote=is_init_remote
)
observer = Observer()
observer.schedule(event_handler, src_path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except __HOLE__:
observer.stop()
observer.join()
logger.info("Specchio stopped, have a nice day :)")
else:
print MANUAL
else:
print MANUAL
|
KeyboardInterrupt
|
dataset/ETHPy150Open brickgao/specchio/specchio/main.py/main
|
4,414 |
def makeExcludesDict(self):
"""
@return: A C{dict} that maps each option name appearing in
self.mutuallyExclusive to a list of those option names that
is it mutually exclusive with (can't appear on the cmd line with)
"""
#create a mapping of long option name -> single character name
longToShort = {}
for optList in itertools.chain(self.optParams, self.optFlags):
try:
if optList[1] != None:
longToShort[optList[0]] = optList[1]
except __HOLE__:
pass
excludes = {}
for lst in self.mutuallyExclusive:
for i, longname in enumerate(lst):
tmp = []
tmp.extend(lst[:i])
tmp.extend(lst[i+1:])
for name in tmp[:]:
if name in longToShort:
tmp.append(longToShort[name])
if longname in excludes:
excludes[longname].extend(tmp)
else:
excludes[longname] = tmp
return excludes
|
IndexError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/python/zshcomp.py/ArgumentsGenerator.makeExcludesDict
|
4,415 |
def getDescription(self, longname):
"""
Return the description to be used for this argument
@return: C{str}
"""
#check if we have an alternate descr for this arg, and if so use it
if longname in self.altArgDescr:
return self.altArgDescr[longname]
#otherwise we have to get it from the optFlags or optParams
try:
descr = self.optFlags_d[longname][1]
except __HOLE__:
try:
descr = self.optParams_d[longname][2]
except KeyError:
descr = None
if descr is not None:
return descr
# lets try to get it from the opt_foo method doc string if there is one
longMangled = longname.replace('-', '_') # this is what t.p.usage does
obj = getattr(self.options, 'opt_%s' % longMangled, None)
if obj:
descr = descrFromDoc(obj)
if descr is not None:
return descr
return longname # we really ought to have a good description to use
|
KeyError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/python/zshcomp.py/ArgumentsGenerator.getDescription
|
4,416 |
def getShortOption(self, longname):
"""
Return the short option letter or None
@return: C{str} or C{None}
"""
optList = self.optAll_d[longname]
try:
return optList[0] or None
except __HOLE__:
pass
|
IndexError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/python/zshcomp.py/ArgumentsGenerator.getShortOption
|
4,417 |
def descrFromDoc(obj):
"""
Generate an appropriate description from docstring of the given object
"""
if obj.__doc__ is None:
return None
lines = obj.__doc__.split("\n")
descr = None
try:
if lines[0] != "" and not lines[0].isspace():
descr = lines[0].lstrip()
# skip first line if it's blank
elif lines[1] != "" and not lines[1].isspace():
descr = lines[1].lstrip()
except __HOLE__:
pass
return descr
|
IndexError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/python/zshcomp.py/descrFromDoc
|
4,418 |
def firstLine(s):
"""
Return the first line of the given string
"""
try:
i = s.index('\n')
return s[:i]
except __HOLE__:
return s
|
ValueError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/python/zshcomp.py/firstLine
|
4,419 |
def readlines(self, sizehint=-1):
self._close_check()
lines = []
try:
while True:
line = self.readline(sizehint)
if not line:
break
lines.append(line)
if sizehint >= 0:
sizehint -= len(line)
if sizehint <= 0:
break
except __HOLE__:
pass
return lines
|
StopIteration
|
dataset/ETHPy150Open openstack/storlets/Engine/swift/storlet_gateway/storlet_docker_gateway.py/StorletGatewayDocker.IterLike.readlines
|
4,420 |
@classmethod
def validate_dependency_registration(cls, params, name):
mandatory = ['Dependency-Version']
StorletGatewayDocker._check_mandatory_params(params, mandatory)
perm = params.get('Dependency-Permissions')
if perm is not None:
try:
perm_int = int(perm, 8)
except __HOLE__:
raise ValueError('Dependency permission is incorrect')
if (perm_int & int('600', 8)) != int('600', 8):
raise ValueError('The owner hould have rw permission')
|
ValueError
|
dataset/ETHPy150Open openstack/storlets/Engine/swift/storlet_gateway/storlet_docker_gateway.py/StorletGatewayDocker.validate_dependency_registration
|
4,421 |
def update_wrapper(new_fn, fn):
"""
Copy as much of the function detail from fn to new_fn
as we can.
"""
try:
new_fn.__name__ = fn.__name__
new_fn.__dict__.update(fn.__dict__)
new_fn.__doc__ = fn.__doc__
new_fn.__module__ = fn.__module__
except __HOLE__:
pass # python2.3 ignore read-only attributes
|
TypeError
|
dataset/ETHPy150Open AnyMesh/anyMesh-Python/example/urwid/widget.py/update_wrapper
|
4,422 |
def parse_date_delta(value):
"""
like parse_date, but also handle delta seconds
"""
if not value:
return None
try:
value = int(value)
except __HOLE__:
return parse_date(value)
else:
return _now() + timedelta(seconds=value)
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/webob-1.1.1/webob/datetime_utils.py/parse_date_delta
|
4,423 |
def read(self, amt=None):
try:
result = next(self.resp_iter)
return result
except __HOLE__:
return ''
|
StopIteration
|
dataset/ETHPy150Open openstack/swift/test/unit/proxy/test_sysmeta.py/FakeServerConnection.read
|
4,424 |
def _data_range_changed(self, value):
try:
self.lut.set_range(value[0], value[1])
except TypeError:
self.lut.set_range((value[0], value[1]))
except __HOLE__:
self.lut.range = value
self.scalar_bar.modified()
self.render()
|
AttributeError
|
dataset/ETHPy150Open enthought/mayavi/mayavi/core/lut_manager.py/LUTManager._data_range_changed
|
4,425 |
def load_lut_from_file(self, file_name):
lut_list = []
if len(file_name) > 0:
try:
f = open(file_name, 'r')
except IOError:
msg = "Cannot open Lookup Table file: %s\n"%file_name
error(msg)
else:
f.close()
try:
lut_list = parse_lut_file(file_name)
except __HOLE__ as err_msg:
msg = "Sorry could not parse LUT file: %s\n"%file_name
msg += err_msg
error(msg)
else:
if self.reverse_lut:
lut_list.reverse()
self.lut = set_lut(self.lut, lut_list)
self.render()
|
IOError
|
dataset/ETHPy150Open enthought/mayavi/mayavi/core/lut_manager.py/LUTManager.load_lut_from_file
|
4,426 |
def write(dictionary, args, output_file_path):
# result to be returned
result = None
# get absolute path
output_file_path_absolute = os.path.abspath(output_file_path)
# sort by headword, optionally ignoring case
dictionary.sort(by_headword=True, ignore_case=args.sort_ignore_case)
# create groups
special_group, group_keys, group_dict = dictionary.group(
prefix_function_path=args.group_by_prefix_function,
prefix_length=int(args.group_by_prefix_length),
merge_min_size=int(args.group_by_prefix_merge_min_size),
merge_across_first=args.group_by_prefix_merge_across_first
)
all_group_keys = group_keys
if special_group is not None:
all_group_keys += [u"SPECIAL"]
# create mobi object
mobi = DictionaryEbook(ebook_format=DictionaryEbook.MOBI, args=args)
# add groups
for key in all_group_keys:
if key == u"SPECIAL":
group_entries = special_group
else:
group_entries = group_dict[key]
mobi.add_group(key, group_entries)
# create output file
print_debug("Writing to file '%s'..." % (output_file_path_absolute), args.debug)
mobi.write(output_file_path_absolute, compress=False)
result = [output_file_path]
print_debug("Writing to file '%s'... done" % (output_file_path_absolute), args.debug)
# run kindlegen
tmp_path = mobi.get_tmp_path()
if args.mobi_no_kindlegen:
print_info("Not running kindlegen, the raw files are located in '%s'" % tmp_path)
result = [tmp_path]
else:
try:
print_debug("Creating .mobi file with kindlegen...", args.debug)
kindlegen_path = KINDLEGEN
opf_file_path_absolute = os.path.join(tmp_path, "OEBPS", "content.opf")
mobi_file_path_relative = u"content.mobi"
mobi_file_path_absolute = os.path.join(tmp_path, "OEBPS", mobi_file_path_relative)
if args.kindlegen_path is None:
print_info(" Running '%s' from $PATH" % KINDLEGEN)
else:
kindlegen_path = args.kindlegen_path
print_info(" Running '%s' from '%s'" % (KINDLEGEN, kindlegen_path))
proc = subprocess.Popen(
[kindlegen_path, opf_file_path_absolute, "-o", mobi_file_path_relative],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE
)
output = proc.communicate()
if args.debug:
output_unicode = (output[0]).decode("utf-8")
print_debug(output_unicode, args.debug)
copy_file(mobi_file_path_absolute, output_file_path_absolute)
result = [output_file_path]
print_debug("Creating .mobi file with kindlegen... done", args.debug)
except __HOLE__ as exc:
print_error(" Unable to run '%s' as '%s'" % (KINDLEGEN, kindlegen_path))
print_error(" Please make sure '%s':" % KINDLEGEN)
print_error(" 1. is available on your $PATH or")
print_error(" 2. specify its path with --kindlegen-path")
# delete tmp directory
tmp_path = mobi.get_tmp_path()
if args.keep:
print_info("Not deleting temp dir '%s'" % (tmp_path))
else:
mobi.delete()
print_debug("Deleted temp dir '%s'" % (tmp_path), args.debug)
return result
|
OSError
|
dataset/ETHPy150Open pettarin/penelope/penelope/format_mobi.py/write
|
4,427 |
def read(self, istream):
super(Attribute, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
# Read the name of the attribute
self.attribute_name = Attribute.AttributeName()
self.attribute_name.read(tstream)
# Read the attribute index if it is next
if self.is_tag_next(Tags.ATTRIBUTE_INDEX, tstream):
self.attribute_index = Attribute.AttributeIndex()
self.attribute_index.read(tstream)
# Lookup the attribute class that belongs to the attribute name
name = self.attribute_name.value
enum_name = name.replace('.', '_').replace(' ', '_').upper()
enum_type = None
try:
enum_type = AttributeType[enum_name]
except __HOLE__:
# Likely custom attribute, pass raw name string as attribute type
enum_type = name
value = self.value_factory.create_attribute_value(enum_type, None)
self.attribute_value = value
self.attribute_value.tag = Tags.ATTRIBUTE_VALUE
self.attribute_value.read(tstream)
self.is_oversized(tstream)
|
KeyError
|
dataset/ETHPy150Open OpenKMIP/PyKMIP/kmip/core/objects.py/Attribute.read
|
4,428 |
def InsertVideoEntry(self, video_entry, filename_or_handle,
youtube_username='default',
content_type='video/quicktime'):
"""Upload a new video to YouTube using the direct upload mechanism.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to upload.
filename_or_handle: A file-like object or file name where the video
will be read from.
youtube_username: An optional string representing the username into whose
account this video is to be uploaded to. Defaults to the currently
authenticated user.
content_type: An optional string representing internet media type
(a.k.a. mime type) of the media object. Currently the YouTube API
supports these types:
o video/mpeg
o video/quicktime
o video/x-msvideo
o video/mp4
o video/x-flv
Returns:
The newly created YouTubeVideoEntry if successful.
Raises:
AssertionError: video_entry must be a gdata.youtube.VideoEntry instance.
YouTubeError: An error occurred trying to read the video file provided.
gdata.service.RequestError: An error occurred trying to upload the video
to the API server.
"""
# We need to perform a series of checks on the video_entry and on the
# file that we plan to upload, such as checking whether we have a valid
# video_entry and that the file is the correct type and readable, prior
# to performing the actual POST request.
try:
assert(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry))
except __HOLE__:
raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT,
'body':'`video_entry` must be a gdata.youtube.VideoEntry instance',
'reason':'Found %s, not VideoEntry' % type(video_entry)
})
#majtype, mintype = content_type.split('/')
#
#try:
# assert(mintype in YOUTUBE_SUPPORTED_UPLOAD_TYPES)
#except (ValueError, AssertionError):
# raise YouTubeError({'status':YOUTUBE_INVALID_CONTENT_TYPE,
# 'body':'This is not a valid content type: %s' % content_type,
# 'reason':'Accepted content types: %s' %
# ['video/%s' % (t) for t in YOUTUBE_SUPPORTED_UPLOAD_TYPES]})
if (isinstance(filename_or_handle, (str, unicode))
and os.path.exists(filename_or_handle)):
mediasource = gdata.MediaSource()
mediasource.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):
import StringIO
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0)
file_handle = filename_or_handle
name = 'video'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else:
raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, 'body':
'`filename_or_handle` must be a path name or a file-like object',
'reason': ('Found %s, not path name or object '
'with a .read() method' % type(filename_or_handle))})
upload_uri = '%s/%s/%s' % (YOUTUBE_UPLOAD_URI, youtube_username,
'uploads')
self.additional_headers['Slug'] = mediasource.file_name
# Using a nested try statement to retain Python 2.4 compatibility
try:
try:
return self.Post(video_entry, uri=upload_uri, media_source=mediasource,
converter=gdata.youtube.YouTubeVideoEntryFromString)
except gdata.service.RequestError, e:
raise YouTubeError(e.args[0])
finally:
del(self.additional_headers['Slug'])
|
AssertionError
|
dataset/ETHPy150Open acil-bwh/SlicerCIP/Scripted/attic/PicasaSnap/gdata/youtube/service.py/YouTubeService.InsertVideoEntry
|
4,429 |
def calcdesc(self, descnames=None):
"""Calculate descriptor values.
Optional parameter:
descnames -- a list of names of descriptors
If descnames is not specified, all available descriptors are
calculated. See the descs variable for a list of available
descriptors.
"""
descnames = descnames or descs
ans = {}
for descname in descnames:
try:
desc = _descDict[descname]
except __HOLE__:
raise ValueError, "%s is not a recognised RDKit descriptor type" % descname
ans[descname] = desc(self.Mol)
return ans
|
KeyError
|
dataset/ETHPy150Open oddt/oddt/oddt/toolkits/rdk.py/Molecule.calcdesc
|
4,430 |
def try_again(f):
def _(self, *a, **kw):
for i in range(3):
try:
return f(self, *a, **kw)
except __HOLE__ as e:
self.close()
logger.warning("mfs master connection: %s", e)
time.sleep(2**i * 0.1)
else:
raise
return _
|
IOError
|
dataset/ETHPy150Open douban/dpark/dpark/moosefs/master.py/try_again
|
4,431 |
def recv_thread(self):
while True:
with self.lock:
if not self.is_ready:
time.sleep(0.01)
continue
try:
r = self.recv_cmd()
self.reply.put(r)
except __HOLE__ as e:
self.reply.put(e)
|
IOError
|
dataset/ETHPy150Open douban/dpark/dpark/moosefs/master.py/MasterConn.recv_thread
|
4,432 |
def py_import(module_name, dependency_dictionary, store_in_config=False):
"""Tries to import a python module, installing if necessary.
If the import doesn't succeed, we guess which system we are running on and
install the corresponding package from the dictionary. We then run the
import again.
If the installation fails, we won't try to install that same module again
for the session.
"""
try:
result = _vanilla_import(module_name)
return result
except ImportError:
if not getattr(get_vistrails_configuration(), 'installBundles'):
raise
if module_name in _previously_failed_pkgs:
raise PyImportException("Import of Python module '%s' failed again, "
"not triggering installation" % module_name)
if store_in_config:
ignored_packages_list = getattr(get_vistrails_configuration(),
'bundleDeclinedList',
None)
if ignored_packages_list:
ignored_packages = set(ignored_packages_list.split(';'))
else:
ignored_packages = set()
if module_name in ignored_packages:
raise PyImportException("Import of Python module '%s' failed "
"again, installation disabled by "
"configuration" % module_name)
debug.warning("Import of python module '%s' failed. "
"Will try to install bundle." % module_name)
success = vistrails.core.bundles.installbundle.install(
dependency_dictionary)
if store_in_config:
if bool(success):
ignored_packages.discard(module_name)
else:
ignored_packages.add(module_name)
setattr(get_vistrails_configuration(),
'bundleDeclinedList',
';'.join(sorted(ignored_packages)))
setattr(get_vistrails_persistent_configuration(),
'bundleDeclinedList',
';'.join(sorted(ignored_packages)))
if not success:
_previously_failed_pkgs.add(module_name)
raise PyImportException("Installation of Python module '%s' failed." %
module_name)
try:
result = _vanilla_import(module_name)
return result
except __HOLE__, e:
_previously_failed_pkgs.add(module_name)
raise PyImportBug("Installation of package '%s' succeeded, but import "
"still fails." % module_name)
|
ImportError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/bundles/pyimport.py/py_import
|
4,433 |
def _process(commands, callback=None, stdin=None, settings=None, working_dir=None, wait_for_completion=None, **kwargs):
'''Process one or more OS commands.'''
if wait_for_completion is None:
wait_for_completion = False
# We're expecting a list of commands, so if we only have one, convert
# it to a list:
#
if isinstance(commands, str):
commands = [commands]
results = []
# Windows needs STARTF_USESHOWWINDOW in order to start the process with a
# hidden window.
#
# See:
#
# http://stackoverflow.com/questions/1016384/cross-platform-subprocess-with-hidden-window
#
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Now we can execute each command:
#
for command in commands:
# See if there are any interactive shell settings that we could use:
#
bash_env = None
if settings is not None and settings.has('shell_configuration_file'):
bash_env = settings.get('shell_configuration_file')
else:
bash_env = os.getenv('ENV')
if bash_env is not None:
command = '. {}; {}'.format(bash_env, command)
# Work out whether the executable is being overridden in the
# configuration settings or an environment variable:
#
# NOTE: We don't need to check COMSPEC on Windows since this
# is already done inside Popen().
#
executable = None
if settings is not None and settings.has('shell-file-name'):
executable = settings.get('shell-file-name')
else:
executable = os.getenv('SHELL')
try:
proc = subprocess.Popen(command,
executable=executable,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
cwd=working_dir,
startupinfo=startupinfo)
if stdin is not None:
proc.stdin.write(stdin.encode('utf-8'))
proc.stdin.close();
# We're going to keep polling the command and either:
#
# 1. we get None to tell us that the command is still running, or;
# 2. we get a return code to indicate that the command has finished.
#
return_code = None
while return_code is None:
return_code = proc.poll()
# If there's no error then see what we got from the command:
#
if return_code is None or return_code == 0:
# Process whatever output we can get:
#
output = True
while output:
output = proc.stdout.readline().decode().replace('\r\n', '\n')
# If the caller wants everything in one go, or
# there is no callback function, then batch up
# the output. Otherwise pass it back to the
# caller as it becomes available:
#
if wait_for_completion is True or callback is None:
results += output
else:
SH.main_thread(callback, output, **kwargs)
except subprocess.CalledProcessError as e:
SH.main_thread(callback, e.returncode)
except __HOLE__ as e:
if e.errno == 2:
sublime.message_dialog('Command not found\n\nCommand is: %s' % command)
else:
raise e
# Concatenate all of the results and return the value. If we've been
# using the callback then just make one last call with 'None' to indicate
# that we're finished:
#
result = ''.join(results)
if callback is None:
return result
if wait_for_completion is True:
SH.main_thread(callback, result, **kwargs)
SH.main_thread(callback, None, **kwargs)
|
OSError
|
dataset/ETHPy150Open markbirbeck/sublime-text-shell-command/OsShell.py/_process
|
4,434 |
def cli_commands(obj, namef, clizer):
cmds = util.OrderedDict()
cmd_by_name = {}
try:
names = util.dict_from_names(obj).items()
except __HOLE__:
raise ValueError("Cannot guess name for anonymous objects "
"(lists, dicts, etc)")
for key, val in names:
if not key:
continue
names = tuple(namef(name) for name in util.maybe_iter(key))
cli = clizer.get_cli(val)
cmds[names] = cli
for name in names:
cmd_by_name[name] = cli
return cmds, cmd_by_name
|
AttributeError
|
dataset/ETHPy150Open epsy/clize/clize/runner.py/cli_commands
|
4,435 |
@classmethod
def get_cli(cls, obj, **kwargs):
"""Makes an attempt to discover a command-line interface for the
given object.
.. _cli-object:
The process used is as follows:
1. If the object has a ``cli`` attribute, it is used with no further
transformation.
2. If the object is callable, `.Clize` or whichever object this
class method is used from is used to build a CLI. ``**kwargs`` are
forwarded to its initializer.
3. If the object is iterable, `.SubcommandDispatcher` is used on
the object, and its `cli <.SubcommandDispatcher.cli>` method
is used.
Most notably, `clize.run` uses this class method in order to interpret
the given object(s).
"""
try:
cli = obj.cli
except AttributeError:
if callable(obj):
cli = cls(obj, **kwargs)
else:
try:
iter(obj)
except __HOLE__:
raise TypeError("Don't know how to build a cli for "
+ repr(obj))
cli = SubcommandDispatcher(obj, **kwargs).cli
return cli
|
TypeError
|
dataset/ETHPy150Open epsy/clize/clize/runner.py/Clize.get_cli
|
4,436 |
def __get__(self, obj, owner=None):
try:
func = self.func.__get__(obj, owner)
except __HOLE__:
func = self.func
if func is self.func:
return self
params = self.parameters()
params['owner'] = obj
return type(self)(func, **params)
|
AttributeError
|
dataset/ETHPy150Open epsy/clize/clize/runner.py/Clize.__get__
|
4,437 |
@Clize(helper_class=_dispatcher_helper)
@annotate(name=parameters.pass_name,
command=(lowercase, parser.Parameter.LAST_OPTION))
def cli(self, name, command, *args):
try:
func = self.cmds_by_name[command]
except __HOLE__:
raise errors.ArgumentError('Unknwon command "{0}"'.format(command))
return func('{0} {1}'.format(name, command), *args)
|
KeyError
|
dataset/ETHPy150Open epsy/clize/clize/runner.py/SubcommandDispatcher.cli
|
4,438 |
def fix_argv(argv, path, main):
"""Properly display ``python -m`` invocations"""
if not path[0]:
try:
name = main_module_name(main)
except __HOLE__:
pass
else:
argv = argv[:]
argv[0] = '{0} -m {1}'.format(
get_executable(sys.executable, 'python'), name)
else:
name = get_executable(argv[0], argv[0])
argv = argv[:]
argv[0] = name
return argv
|
AttributeError
|
dataset/ETHPy150Open epsy/clize/clize/runner.py/fix_argv
|
4,439 |
def get_executable(path, default):
if not path:
return default
if path.endswith('.py'):
return path
basename = os.path.basename(path)
try:
which = shutil.which
except __HOLE__:
which = None
else:
if which(basename) == path:
return basename
rel = os.path.relpath(path)
if rel.startswith('../'):
if which is None and os.path.isabs(path):
return basename
return path
return rel
|
AttributeError
|
dataset/ETHPy150Open epsy/clize/clize/runner.py/get_executable
|
4,440 |
def parse_opts(buf):
"""Parse TCP option buffer into a list of (option, data) tuples."""
opts = []
while buf:
o = ord(buf[0])
if o > TCP_OPT_NOP:
try:
l = ord(buf[1])
d, buf = buf[2:l], buf[l:]
except __HOLE__:
#print 'bad option', repr(str(buf))
opts.append(None) # XXX
break
else:
d, buf = '', buf[1:]
opts.append((o,d))
return opts
|
ValueError
|
dataset/ETHPy150Open dragondjf/QMarkdowner/dpkt/tcp.py/parse_opts
|
4,441 |
def test_execfile_traceback(self):
globals = {}
try:
execfile(self.basename1, globals)
except __HOLE__:
tb = sys.exc_info()[2]
self.assertEqual(tb.tb_next.tb_frame.f_code.co_filename,
self.basename1)
|
NotImplementedError
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_chdir.py/ExecfileTracebackTestCase.test_execfile_traceback
|
4,442 |
def tearDown(self):
try:
super(ImportJarTestCase, self).tearDown()
except __HOLE__:
# XXX: Windows raises an error here when deleting the jar
# because SyspathArchive holds onto its file handle (and you
# can't delete a file in use on Windows). We may not want to
# change this
self.assert_(WINDOWS)
if 'ChdirJyTest' in sys.modules:
del sys.modules['ChdirJyTest']
|
OSError
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_chdir.py/ImportJarTestCase.tearDown
|
4,443 |
def __init__(self, **kwargs):
self.kwargs = kwargs
if CONF.fatal_exception_format_errors:
assert isinstance(self.msg_fmt, six.text_type)
try:
self.message = self.msg_fmt % kwargs
except __HOLE__:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'),
extra=dict(
private=dict(
msg=self.msg_fmt,
args=kwargs
)
)
)
if CONF.fatal_exception_format_errors:
raise
|
KeyError
|
dataset/ETHPy150Open openstack/solum/solum/common/exception.py/SolumException.__init__
|
4,444 |
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
if start_alias:
try:
alias = seen[model]
except __HOLE__:
if model is proxied_model:
alias = start_alias
else:
link_field = opts.get_ancestor_link(model)
alias = self.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.included_inherited_models[model]
table = self.alias_map[alias][TABLE_NAME]
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
|
KeyError
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/db/models/sql/query.py/BaseQuery.get_default_columns
|
4,445 |
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns and
ordering must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
for alias in self.tables:
if not self.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, lhs_col, col, nullable = self.alias_map[alias]
except __HOLE__:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = (alias != name and ' %s' % alias or '')
if join_type and not first:
result.append('%s %s%s ON (%s.%s = %s.%s)'
% (join_type, qn(name), alias_str, qn(lhs),
qn2(lhs_col), qn(alias), qn2(col)))
else:
connector = not first and ', ' or ''
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.extra_tables:
alias, unused = self.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.alias_map or self.alias_refcount[alias] == 1:
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, []
|
KeyError
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/db/models/sql/query.py/BaseQuery.get_from_clause
|
4,446 |
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None, negate=False,
process_extras=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters). If can_reuse is not None, it's a list of aliases
that can be reused in these joins (nothing else can be reused in this
case). Finally, 'negate' is used in the same sense as for add_filter()
-- it indicates an exclude() filter, or something similar. It is only
passed in here so that it can be passed to a field's extra_filter() for
customised behaviour.
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
list of tables joined.
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
extra_filters = []
for pos, name in enumerate(names):
try:
exclusions.add(int_alias)
except __HOLE__:
pass
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
names = opts.get_all_field_names() + self.aggregate_select.keys()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = get_proxied_model(opts)
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col,
alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if process_extras and hasattr(field, 'extra_filters'):
extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_column_name()
opts = field.rel.to._meta
table2 = opts.db_table
from_col2 = field.m2m_reverse_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
if int_alias == table2 and from_col2 == to_col2:
joins.append(int_alias)
alias = int_alias
else:
alias = self.join(
(int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
opts = field.rel.to._meta
target = field.rel.get_related_field()
table = opts.db_table
from_col = field.column
to_col = target.column
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=exclusions, nullable=field.null)
joins.append(alias)
else:
# Non-relation fields.
target = field
break
else:
orig_field = field
field = field.field
if m2m:
# Many-to-many field defined on the target model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_reverse_name()
opts = orig_field.opts
table2 = opts.db_table
from_col2 = field.m2m_column_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
local_field = opts.get_field_by_name(
field.rel.field_name)[0]
opts = orig_field.opts
table = opts.db_table
from_col = local_field.column
to_col = field.column
target = opts.pk
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
try:
self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias)
except NameError:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if pos != len(names) - 1:
if pos == len(names) - 2:
raise FieldError("Join on field %r not permitted. Did you misspell %r for the lookup type?" % (name, names[pos + 1]))
else:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last, extra_filters
|
NameError
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/db/models/sql/query.py/BaseQuery.setup_joins
|
4,447 |
def update_dupe_avoidance(self, opts, col, alias):
"""
For a column that is one of multiple pointing to the same table, update
the internal data structures to note that this alias shouldn't be used
for those other columns.
"""
ident = id(opts)
for name in opts.duplicate_targets[col]:
try:
self.dupe_avoidance[ident, name].add(alias)
except __HOLE__:
self.dupe_avoidance[ident, name] = set([alias])
|
KeyError
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/db/models/sql/query.py/BaseQuery.update_dupe_avoidance
|
4,448 |
def get(self, name, **kwargs):
try:
cls = self.providers[name]
except __HOLE__:
raise InvalidProvider(name)
return cls(**kwargs)
|
KeyError
|
dataset/ETHPy150Open getsentry/freight/freight/providers/manager.py/ProviderManager.get
|
4,449 |
def execute_directives(self):
global __KV_INCLUDES__
for ln, cmd in self.directives:
cmd = cmd.strip()
if __debug__:
trace('Parser: got directive <%s>' % cmd)
if cmd[:5] == 'kivy ':
version = cmd[5:].strip()
if len(version.split('.')) == 2:
version += '.0'
require(version)
elif cmd[:4] == 'set ':
try:
name, value = cmd[4:].strip().split(' ', 1)
except:
Logger.exception('')
raise ParserException(self, ln, 'Invalid directive syntax')
try:
value = eval(value, global_idmap)
except:
Logger.exception('')
raise ParserException(self, ln, 'Invalid value')
global_idmap[name] = value
elif cmd[:8] == 'include ':
ref = cmd[8:].strip()
force_load = False
if ref[:6] == 'force ':
ref = ref[6:].strip()
force_load = True
if ref[-3:] != '.kv':
Logger.warn('WARNING: {0} does not have a valid Kivy'
'Language extension (.kv)'.format(ref))
break
if ref in __KV_INCLUDES__:
if not os.path.isfile(resource_find(ref) or ref):
raise ParserException(self, ln,
'Invalid or unknown file: {0}'.format(ref))
if not force_load:
Logger.warn('WARNING: {0} has already been included!'
.format(ref))
break
else:
Logger.debug('Reloading {0} because include was forced.'
.format(ref))
kivy.lang.builder.Builder.unload_file(ref)
kivy.lang.builder.Builder.load_file(ref)
continue
Logger.debug('Including file: {0}'.format(0))
__KV_INCLUDES__.append(ref)
kivy.lang.builder.Builder.load_file(ref)
elif cmd[:7] == 'import ':
package = cmd[7:].strip()
l = package.split()
if len(l) != 2:
raise ParserException(self, ln, 'Invalid import syntax')
alias, package = l
try:
if package not in sys.modules:
try:
mod = __import__(package)
except ImportError:
mod = __import__('.'.join(package.split('.')[:-1]))
# resolve the whole thing
for part in package.split('.')[1:]:
mod = getattr(mod, part)
else:
mod = sys.modules[package]
global_idmap[alias] = mod
except __HOLE__:
Logger.exception('')
raise ParserException(self, ln,
'Unable to import package %r' %
package)
else:
raise ParserException(self, ln, 'Unknown directive')
|
ImportError
|
dataset/ETHPy150Open kivy/kivy/kivy/lang/parser.py/Parser.execute_directives
|
4,450 |
@app.get('/dossier/v1/feature-collection/<cid>/search/<engine_name>')
@app.post('/dossier/v1/feature-collection/<cid>/search/<engine_name>')
def v1_search(request, response, visid_to_dbid, config,
search_engines, filters, cid, engine_name):
'''Search feature collections.
The route for this endpoint is:
``/dossier/v1/<content_id>/search/<search_engine_name>``.
``content_id`` can be any *profile* content identifier. (This
restriction may be lifted at some point.) Namely, it must start
with ``p|``.
``engine_name`` corresponds to the search strategy to
use. The list of available search engines can be retrieved with the
:func:`v1_search_engines` endpoint.
This endpoint returns a JSON payload which is an object with a
single key, ``results``. ``results`` is a list of objects, where
the objects each have ``content_id`` and ``fc`` attributes.
``content_id`` is the unique identifier for the result returned,
and ``fc`` is a JSON serialization of a feature collection.
There are also two query parameters:
* **limit** limits the number of results to the number given.
* **filter** sets the filtering function. The default
filter function, ``already_labeled``, will filter out any
feature collections that have already been labeled with the
query ``content_id``.
'''
db_cid = visid_to_dbid(cid)
try:
search_engine = search_engines[engine_name]
except __HOLE__ as e:
bottle.abort(404, 'Search engine "%s" does not exist.' % e.message)
query = request.query if request.method == 'GET' else request.forms
search_engine = (config.create(search_engine)
.set_query_id(db_cid)
.set_query_params(query))
for name, filter in filters.items():
search_engine.add_filter(name, config.create(filter))
return search_engine.respond(response)
|
KeyError
|
dataset/ETHPy150Open dossier/dossier.web/dossier/web/routes.py/v1_search
|
4,451 |
@app.get('/dossier/v1/folder/<fid>/subfolder', json=True)
def v1_subfolder_list(request, response, kvlclient, fid):
'''Retrieves a list of subfolders in a folder for the current user.
The route for this endpoint is:
``GET /dossier/v1/folder/<fid>/subfolder``.
(Temporarily, the "current user" can be set via the
``annotator_id`` query parameter.)
The payload returned is a list of subfolder identifiers.
'''
fid = urllib.unquote(fid)
try:
return sorted(imap(attrgetter('name'),
ifilter(lambda it: it.is_folder(),
new_folders(kvlclient, request).list(fid))))
except __HOLE__:
response.status = 404
return []
|
KeyError
|
dataset/ETHPy150Open dossier/dossier.web/dossier/web/routes.py/v1_subfolder_list
|
4,452 |
@app.get('/dossier/v1/folder/<fid>/subfolder/<sfid>', json=True)
def v1_subtopic_list(request, response, kvlclient, fid, sfid):
'''Retrieves a list of items in a subfolder.
The route for this endpoint is:
``GET /dossier/v1/folder/<fid>/subfolder/<sfid>``.
(Temporarily, the "current user" can be set via the
``annotator_id`` query parameter.)
The payload returned is a list of two element arrays. The first
element in the array is the item's content id and the second
element is the item's subtopic id.
'''
path = urllib.unquote(fid) + '/' + urllib.unquote(sfid)
try:
items = []
for it in new_folders(kvlclient, request).list(path):
if '@' in it.name:
items.append(it.name.split('@'))
else:
items.append((it.name, None))
return items
except __HOLE__:
response.status = 404
return []
|
KeyError
|
dataset/ETHPy150Open dossier/dossier.web/dossier/web/routes.py/v1_subtopic_list
|
4,453 |
def str_to_max_int(s, maximum):
try:
return min(maximum, int(s))
except (__HOLE__, TypeError):
return maximum
|
ValueError
|
dataset/ETHPy150Open dossier/dossier.web/dossier/web/routes.py/str_to_max_int
|
4,454 |
def new_folders(kvlclient, request):
try:
config = yakonfig.get_global_config('dossier.folders')
# For old configs.
if 'prefix' in config:
config['namespace'] = config.pop('prefix')
except __HOLE__:
config = {}
if 'annotator_id' in request.query:
config['owner'] = request.query['annotator_id']
return Folders(kvlclient, **config)
|
KeyError
|
dataset/ETHPy150Open dossier/dossier.web/dossier/web/routes.py/new_folders
|
4,455 |
def init(
dist='dist',
minver=None,
maxver=None,
use_markdown_readme=True,
use_stdeb=False,
use_distribute=False,
):
"""Imports and returns a setup function.
If use_markdown_readme is set,
then README.md is added to setuptools READMES list.
If use_stdeb is set on a Debian based system,
then module stdeb is imported.
Stdeb supports building deb packages on Debian based systems.
The package should only be installed on the same system version
it was built on, though. See http://github.com/astraw/stdeb.
If use_distribute is set, then distribute_setup.py is imported.
"""
if not minver == maxver == None:
import sys
if not minver <= sys.version < (maxver or 'Any'):
sys.stderr.write(
'%s: requires python version in <%s, %s), not %s\n' % (
sys.argv[0], minver or 'any', maxver or 'any', sys.version.split()[0]))
sys.exit(1)
if use_distribute:
from distribute_setup import use_setuptools
use_setuptools(to_dir=dist)
from setuptools import setup
else:
try:
from setuptools import setup
except __HOLE__:
from distutils.core import setup
if use_markdown_readme:
try:
import setuptools.command.sdist
setuptools.command.sdist.READMES = tuple(list(getattr(setuptools.command.sdist, 'READMES', ()))
+ ['README.md'])
except ImportError:
pass
if use_stdeb:
import platform
if 'debian' in platform.dist():
try:
import stdeb
except ImportError:
pass
return setup
|
ImportError
|
dataset/ETHPy150Open salsita/flask-raml/setup.py/init
|
4,456 |
def read_array(filename):
"""Reads an array from a file.
The first line must be a header with labels and units in a particular
format.
"""
unused_, filetype = os.path.splitext(filename)
fo = open(filename, "rU")
try:
if filetype == ".txt":
header = map(eval, fo.readline().split("\t"))
a = numpy.fromfile(fo, dtype="f8", sep="\n\t")
elif filetype == ".csv":
header = map(str.strip, fo.readline().split(","))
a = numpy.fromiter(_ReadCSV(fo), numpy.float64)
elif filetype == ".dat": # gnuplot style data
line1 = fo.readline()[2:].split("\t")
try:
header = map(eval, line1)
except (__HOLE__, SyntaxError): # assume no header line
header = line1
fo.seek(0)
a = numpy.fromfile(fo, dtype="f8", sep="\n")
else:
raise ValueError(
"read_array: Invalid file type. need .txt, .csv, or .dat (got %r)." %
filetype)
finally:
fo.close()
# Data may have SCPI NAN or INF values in it. Convert to numpy
# equivalents.
a = numpy.fromiter(itertools.imap(check_value, a), numpy.float64)
a.shape = (-1, len(header))
return header, a
|
ValueError
|
dataset/ETHPy150Open kdart/pycopia/QA/pycopia/dataset.py/read_array
|
4,457 |
def flush(self):
"""Flushes the stream"""
try:
stream = self.get_wsgierrors()
except __HOLE__:
pass
else:
if stream:
stream.flush()
|
TypeError
|
dataset/ETHPy150Open Pylons/pylons/pylons/log.py/WSGIErrorsHandler.flush
|
4,458 |
def emit(self, record):
"""Emit a record"""
try:
stream = self.get_wsgierrors()
except TypeError:
pass
else:
if not stream:
return
try:
msg = self.format(record)
fs = "%s\n"
if not hasattr(types, "UnicodeType"): # if no unicode support
stream.write(fs % msg)
else:
try:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (__HOLE__, SystemExit):
raise
except:
self.handleError(record)
|
KeyboardInterrupt
|
dataset/ETHPy150Open Pylons/pylons/pylons/log.py/WSGIErrorsHandler.emit
|
4,459 |
def make_unauthorized_request(self, request_url):
response = urllib.urlopen(request_url)
try:
json_response = json.load(response)
except __HOLE__, e:
logging.error('Invalid response: %s (%d)' % (e,
response.getcode()))
return None
return json_response
|
ValueError
|
dataset/ETHPy150Open dasevilla/tumblr-python/tumblr/__init__.py/TumblrClient.make_unauthorized_request
|
4,460 |
def make_oauth_request(self, request_url, method='GET', body=None):
if not self.consumer or not self.token:
logging.error('Missing OAuth credentials')
return None
oauth_client = oauth2.Client(self.consumer, self.token)
if body:
response, content = oauth_client.request(request_url, method,
body)
else:
response, content = oauth_client.request(request_url, method)
try:
json_response = json.loads(content)
except __HOLE__, e:
logging.error('Invalid response: %s (%s)' % (e,
response['status']))
return None
return json_response
|
ValueError
|
dataset/ETHPy150Open dasevilla/tumblr-python/tumblr/__init__.py/TumblrClient.make_oauth_request
|
4,461 |
def get_shared_cache_folder():
"""
Look in the registry for the configured cache folder.
If there is no entry, then we create one.
:return:
"""
_winreg.aReg = _winreg.ConnectRegistry(None, _winreg.HKEY_CURRENT_USER)
try:
key = _winreg.OpenKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
path, _ = _winreg.QueryValueEx(key, "CACHEFOLDER")
except __HOLE__:
return None
return path
|
OSError
|
dataset/ETHPy150Open ccpgames/rescache/paths_win.py/get_shared_cache_folder
|
4,462 |
def set_shared_cache_folder(folder_path):
if not os.path.isdir(folder_path):
try:
os.makedirs(folder_path)
except __HOLE__:
raise ValueError("Could not create directory {}".format(folder_path))
folder_path = os.path.normpath(folder_path) + os.sep
key_eveonline = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
_winreg.SetValueEx(key_eveonline, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
key_eveprobe = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEPROBE")
_winreg.SetValueEx(key_eveprobe, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
|
OSError
|
dataset/ETHPy150Open ccpgames/rescache/paths_win.py/set_shared_cache_folder
|
4,463 |
def setMode (key, newval):
try:
if (type(newval) is not int):
newval = int(newval)
except __HOLE__, e:
raise UnknownApprovalMode(newval)
if (newval < 0) or (newval > 2):
raise UnknownApprovalMode(newval)
if newval == RULES:
item = ConfigDB.getConfigItemByKey("geni.openflow.analysis-engine")
if not item.getValue():
item.write(True)
return newval
|
ValueError
|
dataset/ETHPy150Open fp7-ofelia/ocf/ofam/src/src/foam/geni/approval.py/setMode
|
4,464 |
@ensure_template_arg
def get_variables_from_template(template):
variable_nodes = [n for n in template.nodelist if isinstance(n, DefineNode)]
variables = SortedDict()
for node in variable_nodes:
if node.variable_name in variables:
raise TemplateSyntaxError('%s defined multiple times - %s' % (
node.variable_name,
_get_template_name_from_node(node)
))
try:
variables[node.variable_name] = {
'type': get_type(node.variable_type),
'optional': node.optional,
'initial_field_values': node.initial_field_values,
}
except __HOLE__:
raise TemplateSyntaxError('%s type not registered - %s' % (
node.variable_type,
_get_template_name_from_node(node)
))
return variables
|
KeyError
|
dataset/ETHPy150Open KristianOellegaard/cmsplugin-text-ng/cmsplugin_text_ng/utils.py/get_variables_from_template
|
4,465 |
def test_already_exists_rollback_file(self):
# Test the rollback feature by attempting to create two files, the
# second of which already exists.
# Test that the first file gets deleted on the rollback.
tempdir = tempfile.mkdtemp()
try:
file_one = os.path.join(tempdir, 'one.txt')
file_two = os.path.join(tempdir, 'two.txt')
# Create the file_two to cause the error
open(file_two, 'w').close()
afc = util.AtomicFileCreator()
afc.create_file('file', file_one, 'abc')
try:
afc.create_file('file', file_two, 'def')
except __HOLE__:
assert os.path.exists(file_one)
assert not os.path.isdir(file_one)
afc._rollback()
# Make sure rollback deletes it
assert not os.path.exists(file_one)
# file_two was created before, and it should still exist
assert os.path.exists(file_two)
else:
# No IOError, fail the test
assert False, "Expected IOError not raised"
finally:
shutil.rmtree(tempdir)
|
IOError
|
dataset/ETHPy150Open zerovm/zerovm-cli/zpmlib/tests/test_util.py/TestAtomicFileCreator.test_already_exists_rollback_file
|
4,466 |
def test_already_exists_rollback_dir(self):
# Test the rollback feature by attempting to create a dir and a file,
# the second of which already exists.
# Test that the dir gets deleted on the rollback.
tempdir = tempfile.mkdtemp()
try:
dir_one = os.path.join(tempdir, 'dir_one')
file_two = os.path.join(tempdir, 'two.txt')
# Create the file_two to cause the error
open(file_two, 'w').close()
afc = util.AtomicFileCreator()
afc.create_file('dir', dir_one, None)
try:
afc.create_file('file', file_two, 'def')
except __HOLE__:
assert os.path.exists(dir_one)
assert os.path.isdir(dir_one)
afc._rollback()
# Make sure rollback deletes it
assert not os.path.exists(dir_one)
# file_two was created before, and it should still exist
assert os.path.exists(file_two)
else:
# No IOError, fail the test
assert False, "Expected IOError not raised"
finally:
shutil.rmtree(tempdir)
|
IOError
|
dataset/ETHPy150Open zerovm/zerovm-cli/zpmlib/tests/test_util.py/TestAtomicFileCreator.test_already_exists_rollback_dir
|
4,467 |
def test_context_manager_failure_case(self):
tempdir = tempfile.mkdtemp()
try:
dir_one = os.path.join(tempdir, 'dir_one')
# This tests creating files inside created dirs:
file_one = os.path.join(dir_one, 'one.txt')
file_two = os.path.join(tempdir, 'two.txt')
file_three = os.path.join(tempdir, 'three.txt')
# file_two will be pre-existing
open(file_two, 'w').close()
try:
with util.AtomicFileCreator() as afc:
afc.create_file('dir', dir_one, None)
afc.create_file('file', file_one, 'abc')
afc.create_file('file', file_two, 'def')
afc.create_file('file', file_three, 'ghi')
except __HOLE__:
assert afc._files_created == [
('dir', dir_one, None),
('file', file_one, 'abc'),
]
# Test that everything was rolled back, except for the
# pre-existing file.
assert not os.path.exists(dir_one)
assert not os.path.exists(file_one)
assert os.path.exists(file_two)
assert not os.path.exists(file_three)
else:
# No IOError, fail the test
assert False, "Expected IOError not raised"
finally:
shutil.rmtree(tempdir)
|
IOError
|
dataset/ETHPy150Open zerovm/zerovm-cli/zpmlib/tests/test_util.py/TestAtomicFileCreator.test_context_manager_failure_case
|
4,468 |
def transform_csv(self):
self.log(" Transforming source CSV")
grouped = {}
form2field = {
# F460
'A-1': 'itemized_monetary_contributions',
'A-2': 'unitemized_monetary_contributions',
'A-3': 'total_monetary_contributions',
'F460-4': 'non_monetary_contributions',
'F460-5': 'total_contributions',
'E-1': 'itemized_expenditures',
'E-2': 'unitemized_expenditures',
'E-4': 'total_expenditures',
'F460-16': 'ending_cash_balance',
'F460-19': 'outstanding_debts',
# F450
'F450-7': 'total_monetary_contributions',
'F450-8': 'non_monetary_contributions',
'F450-10': 'total_contributions',
'F450-1': 'itemized_expenditures',
'F450-2': 'unitemized_expenditures',
'E-6': 'total_expenditures',
}
self.log(" Regrouping")
for r in csv.DictReader(open(self.source_csv, 'rb')):
uid = "%s-%s" % (r['FILING_ID'], r['AMEND_ID'])
formkey = "%s-%s" % (r['FORM_TYPE'], r['LINE_ITEM'])
try:
field = form2field[formkey]
except KeyError:
continue
try:
grouped[uid][field] = self.safeamt(r['AMOUNT_A'])
except __HOLE__:
grouped[uid] = SortedDict((
("itemized_monetary_contributions", "\N"),
("unitemized_monetary_contributions", "\N"),
("total_monetary_contributions", "\N"),
("non_monetary_contributions", "\N"),
("total_contributions", "\N"),
("itemized_expenditures", "\N"),
("unitemized_expenditures", "\N"),
("total_expenditures", "\N"),
("ending_cash_balance", "\N"),
("outstanding_debts", "\N")
))
grouped[uid][field] = self.safeamt(r['AMOUNT_A'])
self.log(" Writing to filesystem")
out = csv.writer(open(self.target_csv, "wb"))
outheaders = (
"filing_id_raw",
"amend_id",
"itemized_monetary_contributions",
"unitemized_monetary_contributions",
"total_monetary_contributions",
"non_monetary_contributions",
"total_contributions",
"itemized_expenditures",
"unitemized_expenditures",
"total_expenditures",
"ending_cash_balance",
"outstanding_debts"
)
out.writerow(outheaders)
for uid, data in grouped.items():
outrow = uid.split("-") + data.values()
out.writerow(outrow)
|
KeyError
|
dataset/ETHPy150Open california-civic-data-coalition/django-calaccess-campaign-browser/calaccess_campaign_browser/management/commands/loadcalaccesscampaignsummaries.py/Command.transform_csv
|
4,469 |
def tearDown(self):
try:
self.cnx.close()
self.removefile()
except __HOLE__:
pass
except sqlite.ProgrammingError:
pass
|
AttributeError
|
dataset/ETHPy150Open sassoftware/conary/conary/pysqlite3/test/api_tests.py/moduleTestCases.tearDown
|
4,470 |
def CheckCursorIterator(self):
self.cur.execute("create table test (id, name)")
self.cur.executemany("insert into test (id) values (?)",
[(1,), (2,), (3,)])
self.cur.execute("select id from test")
if sys.version_info[:2] >= (2,2):
counter = 0
for row in self.cur:
if counter == 0:
self.failUnlessEqual(row.id, 1,
"row.id should have been 1, was %i" % row.id)
elif counter == 1:
self.failUnlessEqual(row.id, 2,
"row.id should have been 2, was %i" % row.id)
elif counter == 2:
self.failUnlessEqual(row.id, 3,
"row.id should have been 3, was %i" % row.id)
else:
self.fail("Iterated over too many rows.")
counter += 1
else:
# Python 2.1
counter = 0
try:
while 1:
row = self.cur.next()
if counter == 0:
self.failUnlessEqual(row.id, 1,
"row.id should have been 1, was %i" % row.id)
elif counter == 1:
self.failUnlessEqual(row.id, 2,
"row.id should have been 2, was %i" % row.id)
elif counter == 2:
self.failUnlessEqual(row.id, 3,
"row.id should have been 3, was %i" % row.id)
else:
self.fail("Iterated over too many rows.")
counter += 1
except __HOLE__:
pass
self.failUnlessEqual(counter, 3,
"Should have iterated over 3 items, was: %i" % counter)
|
IndexError
|
dataset/ETHPy150Open sassoftware/conary/conary/pysqlite3/test/api_tests.py/moduleTestCases.CheckCursorIterator
|
4,471 |
def echo(self, request, *args, **kwargs):
response = HttpResponse("echo")
try:
response.data = request.data
except __HOLE__:
response.data = request.DATA
return response
|
AttributeError
|
dataset/ETHPy150Open kevin-brown/drf-json-api/tests/views.py/EchoMixin.echo
|
4,472 |
def get_all_addrs(self, name):
'''
Returns the all addresses which the logical name would resolve to,
or raises NameNotFound if there is no known address for the given name.
'''
ctx = context.get_context()
address_groups = self.address_groups or ctx.address_groups
try:
address_list = list(address_groups[name])
except __HOLE__:
err_str = "no address found for name {0}".format(name)
if ctx.stage_ip is None:
err_str += " (no stage communication configured; did you forget?)"
raise NameNotFound(err_str)
return address_list
|
KeyError
|
dataset/ETHPy150Open paypal/support/support/connection_mgr.py/ConnectionManager.get_all_addrs
|
4,473 |
def _edit(self, filename, line=None):
""" Opens a Python script for editing.
Parameters:
-----------
filename : str
A path to a local system file.
line : int, optional
A line of interest in the file.
"""
if self.custom_edit:
self.custom_edit_requested.emit(filename, line)
elif not self.editor:
self._append_plain_text('No default editor available.\n'
'Specify a GUI text editor in the `IPythonWidget.editor` '
'configurable to enable the %edit magic')
else:
try:
filename = '"%s"' % filename
if line and self.editor_line:
command = self.editor_line.format(filename=filename,
line=line)
else:
try:
command = self.editor.format()
except __HOLE__:
command = self.editor.format(filename=filename)
else:
command += ' ' + filename
except KeyError:
self._append_plain_text('Invalid editor command.\n')
else:
try:
Popen(command, shell=True)
except OSError:
msg = 'Opening editor with command "%s" failed.\n'
self._append_plain_text(msg % command)
|
KeyError
|
dataset/ETHPy150Open ipython/ipython-py3k/IPython/frontend/qt/console/ipython_widget.py/IPythonWidget._edit
|
4,474 |
def _read(self):
try:
profile_f = open(self.fname)
except __HOLE__:
return
for lineno, line in enumerate(profile_f):
line = line.strip()
if not line or line.startswith("#"):
continue
test_key, platform_key, counts = line.split()
per_fn = self.data[test_key]
per_platform = per_fn[platform_key]
c = [int(count) for count in counts.split(",")]
per_platform['counts'] = c
per_platform['lineno'] = lineno + 1
per_platform['current_count'] = 0
profile_f.close()
|
IOError
|
dataset/ETHPy150Open zzzeek/sqlalchemy/lib/sqlalchemy/testing/profiling.py/ProfileStatsFile._read
|
4,475 |
def find_pdh_counter_localized_name(english_name, machine_name = None):
if not counter_english_map:
import win32api, win32con
counter_reg_value = win32api.RegQueryValueEx(win32con.HKEY_PERFORMANCE_DATA,
"Counter 009")
counter_list = counter_reg_value[0]
for i in range(0, len(counter_list) - 1, 2):
try:
counter_id = int(counter_list[i])
except __HOLE__:
continue
counter_english_map[counter_list[i+1].lower()] = counter_id
return win32pdh.LookupPerfNameByIndex(machine_name, counter_english_map[english_name.lower()])
|
ValueError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32pdhutil.py/find_pdh_counter_localized_name
|
4,476 |
def FindPerformanceAttributesByName(instanceName, object = None,
counter = None,
format = win32pdh.PDH_FMT_LONG,
machine = None, bRefresh=0):
"""Find peformance attributes by (case insensitive) instance name.
Given a process name, return a list with the requested attributes.
Most useful for returning a tuple of PIDs given a process name.
"""
if object is None: object = find_pdh_counter_localized_name("Process", machine)
if counter is None: counter = find_pdh_counter_localized_name("ID Process", machine)
if bRefresh: # PDH docs say this is how you do a refresh.
win32pdh.EnumObjects(None, machine, 0, 1)
instanceName = string.lower(instanceName)
items, instances = win32pdh.EnumObjectItems(None,None,object, -1)
# Track multiple instances.
instance_dict = {}
for instance in instances:
try:
instance_dict[instance] = instance_dict[instance] + 1
except __HOLE__:
instance_dict[instance] = 0
ret = []
for instance, max_instances in instance_dict.items():
for inum in xrange(max_instances+1):
if string.lower(instance) == instanceName:
ret.append(GetPerformanceAttributes(object, counter,
instance, inum, format,
machine))
return ret
|
KeyError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32pdhutil.py/FindPerformanceAttributesByName
|
4,477 |
def ShowAllProcesses():
object = find_pdh_counter_localized_name("Process")
items, instances = win32pdh.EnumObjectItems(None,None,object,
win32pdh.PERF_DETAIL_WIZARD)
# Need to track multiple instances of the same name.
instance_dict = {}
for instance in instances:
try:
instance_dict[instance] = instance_dict[instance] + 1
except __HOLE__:
instance_dict[instance] = 0
# Bit of a hack to get useful info.
items = [find_pdh_counter_localized_name("ID Process")] + items[:5]
print "Process Name", string.join(items,",")
for instance, max_instances in instance_dict.items():
for inum in xrange(max_instances+1):
hq = win32pdh.OpenQuery()
hcs = []
for item in items:
path = win32pdh.MakeCounterPath( (None,object,instance,
None, inum, item) )
hcs.append(win32pdh.AddCounter(hq, path))
win32pdh.CollectQueryData(hq)
# as per http://support.microsoft.com/default.aspx?scid=kb;EN-US;q262938, some "%" based
# counters need two collections
time.sleep(0.01)
win32pdh.CollectQueryData(hq)
print "%-15s\t" % (instance[:15]),
for hc in hcs:
type, val = win32pdh.GetFormattedCounterValue(hc, win32pdh.PDH_FMT_LONG)
print "%5d" % (val),
win32pdh.RemoveCounter(hc)
print
win32pdh.CloseQuery(hq)
|
KeyError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32pdhutil.py/ShowAllProcesses
|
4,478 |
def loadHooks(self):
target = self.target
config = self.target.config
lib = self.target.library
platform = self.target.project.platform
# Collect hooks in various locations
for fileName in config.get("hooks", []):
Log.notice("Parsing hooks from '%s'." % fileName)
source = self.readSource(fileName)
functions = Parser.parseSource(source).functions.values()
if not functions:
Log.warn("No hooks found.")
for f in functions:
Log.debug("%s %s(%s)" % (f.type, f.name, ", ".join(["%s %s" % (t.type, p) for p, t in f.parameters.items()])))
for function in functions:
if not function.body:
Log.warn("Hook function '%s' has no body." % function.name)
continue
if function.name.startswith("@"):
lib.hooks[function.name] = function.body
continue
else:
try:
name, hookName = function.name.split(".", 1)
if not name in lib.functions:
target.fail("Function '%s' referred by hook function '%s' does not exist." % (name, function.name))
if not hookName.startswith("@") and not hookName in lib.functions[name].parameters:
target.fail("Parameter '%s' referred by hook function '%s' does not exist." % (hookName, function.name))
lib.functions[name].hooks[hookName] = function.body
except __HOLE__:
target.fail("Hook function name '%s' is not valid." % function.name)
|
ValueError
|
dataset/ETHPy150Open skyostil/tracy/src/generator/Target.py/ParserTool.loadHooks
|
4,479 |
def prepare(self):
# Shorthand for various objects
config = self.config
lib = self.library
# Parse the sources
for fileName in config.get("apiheaders", []):
Log.notice("Parsing functions from '%s'." % fileName)
source = self.parserTool.readSource(fileName)
newLib = Parser.parseSource(source)
for f in newLib.functions.values():
f.headerName = fileName
for f in newLib.functions.values():
Log.debug("%s %s(%s)" % (f.type, f.name, ", ".join(["%s %s" % (t.type, p) for p, t in f.parameters.items()])))
if not newLib.functions:
Log.warn("No new functions found.")
else:
Log.notice("%d functions found." % len(newLib.functions))
lib.merge(newLib)
# Load the hooks
self.parserTool.loadHooks()
def parseBool(s):
return bool(int(s))
# Read the typemap
for typeDecl, mapping in self.config.types.items():
attrs = self.config.types[typeDecl].attrs
name, type = Parser.parseVariableDeclaration(typeDecl + " dummy")
assert name == "dummy"
# If this is a class mapping, create the class if it doesn't already exist
if mapping == "object":
if not mapping in self.library.classes:
cls = Library.Class(type)
if "namespace" in attrs:
cls.namespacePath = attrs["namespace"].split(".")
self.library.classes[type] = cls
# Patch the default decoration hint into all matching types
if "decorationhint" in attrs:
for function in self.library.functions.values():
for t in [p.type for p in function.parameters.values()] + [function.type]:
if t == type:
t.decorationHint = attrs["decorationhint"]
self.library.typeMap[type] = str(mapping)
# Patch in some function-specific attributes
for function in config.functions.keys():
if not function in lib.functions:
self.fail("Attributes specified for non-existent function '%s'." % function)
attrs = config.functions[function].attrs
if "terminator" in attrs:
lib.functions[function].isTerminator = parseBool(attrs["terminator"])
if "generate" in attrs:
lib.functions[function].generate = parseBool(attrs["generate"])
if "runtimestate" in attrs:
lib.functions[function].runtimeStateTracking = parseBool(attrs["runtimestate"])
if "framemarker" in attrs:
lib.functions[function].isFrameMarker = parseBool(attrs["framemarker"])
if "staticlinkage" in attrs:
lib.functions[function].staticLinkage = parseBool(attrs["staticlinkage"])
if "rendercall" in attrs:
lib.functions[function].isRenderCall = parseBool(attrs["rendercall"])
if "passthrough" in attrs:
lib.functions[function].passthrough = parseBool(attrs["passthrough"])
if not isinstance(config.functions[function], Config.Group):
self.fail("Syntax error: State map definition for function '%s' is missing braces." % function)
# Argument to state mapping
reservedNames = ["@return", "@modify", "@set", "@get", "@copy"]
funcAttrs = attrs
for arg, parameter in config.functions[function].items():
# Check that this is a valid parameter
if not arg in reservedNames and not arg in lib.functions[function].parameters:
self.fail("State mapping for nonexistent parameter '%s' of function '%s' specified." % (arg, function))
if arg in ["@copy"] and parseBool(funcAttrs.get("runtimestate", "0")):
Log.warn("Function %s state relation %s not implemented for runtime state tracking." % (function, arg))
# Read the parameter-specific attributes
attrs = config.functions[function][arg].attrs
if "decoration" in attrs:
lib.functions[function].parameters[arg].decoration = attrs["decoration"]
if "decorationhint" in attrs:
lib.functions[function].parameters[arg].decorationHint = attrs["decorationhint"]
if "out" in attrs:
lib.functions[function].parameters[arg].isOut = parseBool(attrs["out"])
if "object_class" in attrs:
# Create a function-local type so that this parameter type is an object only for this function
if arg == "@return":
type = lib.functions[function].type
else:
type = lib.functions[function].parameters[arg].type
# Override the type's name so that it will refer to the new object class
# while still using the original C type under the hood
newType = copy.deepcopy(type)
newType.isObject = True
newType.name = attrs["object_class"]
if arg == "@return":
lib.functions[function].type = newType
else:
lib.functions[function].parameters[arg].type = newType
# Check that this class exists
classType = Library.Type(attrs["object_class"])
if not classType in self.library.classes:
self.fail("Undefined object class '%s'." % classType)
# Do we have a meta type?
if "metatype" in config.functions[function][arg]:
metaGroup = config.functions[function][arg].metatype
try:
metaType = Library.MetaType(metaGroup.attrs["class"])
except KeyError:
self.fail("Meta type for parameter '%s' does not define class." % arg)
# Is this an array parameter?
if metaType.name == "array":
metaType.values["size"] = Library.MetaValue("size", metaGroup.attrs.get("size", 1))
if "type" in metaGroup.attrs:
metaType.values["type"] = Library.MetaValue("type", metaGroup.attrs["type"])
if metaGroup.attrs["type"] == "object":
if not "object_class" in metaGroup.attrs:
self.fail("Required metatype attribute object_class missing")
metaType.values["object_class"] = Library.MetaValue("object_class", metaGroup.attrs["object_class"])
# How about an image parameter?
elif metaType.name == "image":
metaType.values["stride"] = Library.MetaValue("stride", metaGroup.attrs.get("stride", "width"))
metaType.values["height"] = Library.MetaValue("height", metaGroup.attrs.get("height", "height"))
metaType.values["components"] = Library.MetaValue("components", metaGroup.attrs.get("components", "1"))
metaType.values["type"] = Library.MetaValue("type", metaGroup.attrs.get("type", "byte"))
else:
self.fail("Unknown meta type class '%s'." % metaclass)
Log.debug("Meta type: %s.%s: %s" % (function, arg, metaType.name))
# Get the conditions for different meta values
if isinstance(metaGroup, Config.List):
for item in metaGroup:
predicate = item.attrs["condition"]
predicateValue = item.attrs["value"]
result = item.attrs["result"]
metaType.values[item].addPredicate(predicate, predicateValue, result)
Log.debug("Meta type condition: If %s is %s, then %s = %s" % (predicate, predicateValue, item, result))
elif isinstance(metaGroup, Config.Group):
Log.error("Meta type variations for parameter '%s' represented in a group instead of a list." % arg)
# Record the meta type
lib.functions[function].parameters[arg].metaType = metaType
# Is this a short-hand state mapping?
try:
path = parameter.split(".")
except __HOLE__:
# Try the expanded form of a nested attribute set
try:
path = (config.functions[function][arg].state).split(".")
except AttributeError:
path = []
# Check that we even have a state structure
if path and not "state" in config:
Log.warn("State structure not defined.")
continue
# Parse special state mapping relations
relation = None
checkPaths = []
if arg == "@copy":
relation = Library.StateRelationCopy(attrs["src"].split("."), attrs["dest"].split("."))
checkPaths = [relation.sourcePath, relation.destPath]
elif arg == "@get":
relation = Library.StateRelationGet(path)
checkPaths = [relation.path]
elif arg == "@set":
relation = Library.StateRelationSet(path)
checkPaths = [relation.path]
elif arg == "@modify":
relation = Library.StateRelationModify(path)
checkPaths = [relation.path]
# Empty mapping?
elif not "".join(path):
continue
if relation:
for path in checkPaths:
if traverseStatePath(config, path) is None:
self.fail("Relation state path '%s' for function '%s' does not exist." % (".".join(path), function))
Log.debug("State relation: %s %s" % (function, relation))
lib.functions[function].stateRelations.append(relation)
continue
Log.debug("State mapping: %s.%s -> %s" % (function, arg, ".".join(path)))
# Determine the parameter type
type = None
if arg == "@return":
type = lib.functions[function].type
else:
type = lib.functions[function].parameters[arg].type
# If this is a runtime mapping, check that the parameter is of a supported type
if lib.functions[function].runtimeStateTracking and type and \
lib.getNativeType(type) in ["float", "double"]:
self.fail("Values of type '%s' can not be saved to the runtime state tree" % type)
continue
node = traverseStatePath(config, path)
if node is None:
self.fail("State path '%s' for function '%s' does not exist." % (".".join(path), function))
# Save the mapping
if arg == "@return":
lib.functions[function].retStateRelation = Library.StateRelationSet(path)
else:
lib.functions[function].parameters[arg].stateRelation = Library.StateRelationSet(path)
|
AttributeError
|
dataset/ETHPy150Open skyostil/tracy/src/generator/Target.py/CodeTarget.prepare
|
4,480 |
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except __HOLE__:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
|
OSError
|
dataset/ETHPy150Open django/django/django/core/files/move.py/_samefile
|
4,481 |
def file_move_safe(old_file_name, new_file_name, chunk_size=1024 * 64, allow_overwrite=False):
"""
Moves a file from one location to another in the safest way possible.
First, tries ``os.rename``, which is simple but will break across filesystems.
If that fails, streams manually from one file to another in pure Python.
If the destination file exists and ``allow_overwrite`` is ``False``, this
function will throw an ``IOError``.
"""
# There's no reason to move if we don't have to.
if _samefile(old_file_name, new_file_name):
return
try:
# If the destination file exists and allow_overwrite is False then raise an IOError
if not allow_overwrite and os.access(new_file_name, os.F_OK):
raise IOError("Destination file %s exists and allow_overwrite is False" % new_file_name)
os.rename(old_file_name, new_file_name)
return
except __HOLE__:
# This will happen with os.rename if moving to another filesystem
# or when moving opened files on certain operating systems
pass
# first open the old file, so that it won't go away
with open(old_file_name, 'rb') as old_file:
# now open the new file, not forgetting allow_overwrite
fd = os.open(new_file_name, (os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0) |
(os.O_EXCL if not allow_overwrite else 0)))
try:
locks.lock(fd, locks.LOCK_EX)
current_chunk = None
while current_chunk != b'':
current_chunk = old_file.read(chunk_size)
os.write(fd, current_chunk)
finally:
locks.unlock(fd)
os.close(fd)
copystat(old_file_name, new_file_name)
try:
os.remove(old_file_name)
except OSError as e:
# Certain operating systems (Cygwin and Windows)
# fail when deleting opened files, ignore it. (For the
# systems where this happens, temporary files will be auto-deleted
# on close anyway.)
if getattr(e, 'winerror', 0) != 32 and getattr(e, 'errno', 0) != 13:
raise
|
OSError
|
dataset/ETHPy150Open django/django/django/core/files/move.py/file_move_safe
|
4,482 |
def __init__(self):
self.nmap_path = "/usr/bin/nmap"
self.lib = True
if not os.path.exists(self.nmap_path):
try:
import nmap
self.lib = False
except __HOLE__:
mess = "Please install the python-nmap module (pip install nmap)!"
raise CrowbarExceptions(mess)
except:
mess = "File: %s doesn't exists!" % self.nmap_path
raise CrowbarExceptions(mess)
|
ImportError
|
dataset/ETHPy150Open galkan/crowbar/lib/nmap.py/Nmap.__init__
|
4,483 |
def configure(args):
if args.switch:
change_default_profile(args.switch)
return
config = ConfigParser()
config.add_section('profiles')
config['profiles']['host[1]'] = input('>> MPD host [localhost]: ') or \
'localhost'
config['profiles']['port[1]'] = input('>> MPD port [6600]: ') or '6600'
config['profiles']['password[1]'] = input('>> MPD password []: ') or ''
print('\n')
config.add_section('mpdc')
print('Later, you will propably need to store and edit your collections/'
'playlists in a specific file. Please create an empty file '
'(e.g. collections.mpdc) where you want and write its path below.')
while True:
path = input('>> Full path of the collections file: ')
if os.path.isfile(path):
break
warning('Cannot find the file: ' + path)
print('\n')
config['mpdc']['collections'] = path
colors = input('>> Enable colors [Y/n]: ').lower() or 'y'
if colors == 'y':
config['mpdc']['colors'] = 'green, red, blue'
print('\n')
config['mpdc']['columns'] = 'artist, title, album'
filepath = os.path.expanduser('~/.mpdc')
try:
with open(filepath, 'w') as configfile:
config.write(configfile)
info('Writing configuration file in: ' + filepath)
except __HOLE__:
warning('Cannot write configuration file in: ' + filepath)
|
IOError
|
dataset/ETHPy150Open nhrx/mpdc/mpdc/mpdc_configure.py/configure
|
4,484 |
def change_default_profile(profile):
config = ConfigParser()
filepath = os.path.expanduser('~/.mpdc')
if not config.read(filepath):
warning('Cannot read the configuration file, run mpdc-configure')
return
config['profiles']['default'] = str(profile)
try:
with open(filepath, 'w') as configfile:
config.write(configfile)
info('Writing configuration file in: ' + filepath)
except __HOLE__:
warning('Cannot write configuration file in: ' + filepath)
# --------------------------------
# Commands parser
# --------------------------------
|
IOError
|
dataset/ETHPy150Open nhrx/mpdc/mpdc/mpdc_configure.py/change_default_profile
|
4,485 |
def retry_using_http_NTLM_auth(self, req, auth_header_field, realm, headers):
user, pw = self.passwd.find_user_password(realm, req.get_full_url())
debug_output(user, pw)
if pw is not None:
user_parts = user.split('\\', 1)
if len(user_parts) == 1:
UserName = user_parts[0]
DomainName = ''
type1_flags = ntlm.NTLM_ttype1_FLAGS & ~ntlm.NTLM_NegotiateOemDomainSupplied
else:
DomainName = user_parts[0].upper()
UserName = user_parts[1]
type1_flags = ntlm.NTLM_ttype1_FLAGS
# ntlm secures a socket, so we must use the same socket for the complete handshake
debug_output(hex(type1_flags))
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
auth = 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(user, type1_flags)
if req.headers.get(self.auth_header, None) == auth:
debug_output("no auth_header")
return None
headers[self.auth_header] = auth
host = req.get_host()
if not host:
raise urllib.request.URLError('no host given')
h = None
if req.get_full_url().startswith('https://'):
h = http.client.HTTPSConnection(host) # will parse host:port
else:
h = http.client.HTTPConnection(host) # will parse host:port
# we must keep the connection because NTLM authenticates the connection, not single requests
headers["Connection"] = "Keep-Alive"
headers = dict((name.title(), val) for name, val in list(headers.items()))
h.request(req.get_method(), req.get_selector(), req.data, headers)
r = h.getresponse()
r.begin()
r._safe_read(int(r.getheader('content-length')))
debug_output('data read')
try:
if r.getheader('set-cookie'):
# this is important for some web applications that store authentication-related info in cookies (it took a long time to figure out)
headers['Cookie'] = r.getheader('set-cookie')
debug_output('cookie: ', headers['Cookie'])
except __HOLE__:
debug_output('no cookie')
pass
r.fp = None # remove the reference to the socket, so that it can not be closed by the response object (we want to keep the socket open)
auth_header_value = r.getheader(auth_header_field, None)
debug_output(r.headers)
debug_output(auth_header_field, ': ', auth_header_value)
(ServerChallenge, NegotiateFlags) = ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value[5:])
debug_output('server c ', ServerChallenge, ' server flags ', hex(NegotiateFlags))
auth = 'NTLM %s' % ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, UserName, DomainName, pw, NegotiateFlags)
headers[self.auth_header] = auth
debug_output('auth ', auth)
headers["Connection"] = "Close"
headers = dict((name.title(), val) for name, val in list(headers.items()))
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
# none of the configured handlers are triggered, for example redirect-responses are not handled!
response = h.getresponse()
debug_output('data 3 read')
def notimplemented():
raise NotImplementedError
response.readline = notimplemented
return addinfourl(response, response.msg, req.get_full_url())
except socket.error as err:
raise urllib.request.URLError(err)
else:
return None
|
TypeError
|
dataset/ETHPy150Open genzj/pybingwallpaper/src/ntlmauth/HTTPNtlmAuthHandler.py/AbstractNtlmAuthHandler.retry_using_http_NTLM_auth
|
4,486 |
def user_data(self, token, *args, **kwargs):
"""Loads user data from service"""
url = 'https://nodeapi.classlink.com/v2/my/info'
auth_header = {"Authorization": "Bearer %s" % token}
try:
return self.get_json(url, headers=auth_header)
except __HOLE__:
return None
|
ValueError
|
dataset/ETHPy150Open omab/python-social-auth/social/backends/classlink.py/ClasslinkOAuth.user_data
|
4,487 |
def __init__(self):
try:
super(HTML2PlainParser, self).__init__()
except __HOLE__:
self.reset()
self.text = '' # Used to push the results into a variable
self.links = [] # List of aggregated links
# Settings
self.ignored_elements = getattr(
settings, 'HTML2PLAINTEXT_IGNORED_ELEMENTS',
['html', 'head', 'style', 'meta', 'title', 'img']
)
self.newline_before_elements = getattr(
settings, 'HTML2PLAINTEXT_NEWLINE_BEFORE_ELEMENTS',
['br', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'div', 'p', 'li']
)
self.newline_after_elements = getattr(
settings, 'HTML2PLAINTEXT_NEWLINE_AFTER_ELEMENTS',
['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'div', 'p', 'td']
)
self.stroke_before_elements = getattr(
settings, 'HTML2PLAINTEXT_STROKE_BEFORE_ELEMENTS',
['tr']
)
self.stroke_after_elements = getattr(
settings, 'HTML2PLAINTEXT_STROKE_AFTER_ELEMENTS',
['tr']
)
self.stroke_text = getattr(settings, 'HTML2PLAINTEXT_STROKE_TEXT',
'------------------------------\n')
|
TypeError
|
dataset/ETHPy150Open bitmazk/django-libs/django_libs/utils/converter.py/HTML2PlainParser.__init__
|
4,488 |
def _filter_coverage_for_added_lines(self, diff, coverage):
"""
This function takes a diff (text based) and a map of file names to the coverage for those files and
returns an ordered list of the coverage for each "addition" line in the diff.
If we don't have coverage for a specific file, we just mark the lines in those files as unknown or 'N'.
"""
if not diff:
return None
diff_lines = diff.splitlines()
current_file = None
line_number = None
coverage_by_added_line = []
for line in diff_lines:
if line.startswith('diff'):
# We're about to start a new file.
current_file = None
line_number = None
elif current_file is None and line_number is None and (line.startswith('+++') or line.startswith('---')):
# We're starting a new file
if line.startswith('+++ b/'):
line = line.split('\t')[0]
current_file = unicode(line[6:])
elif line.startswith('@@'):
# Jump to new lines within the file
line_num_info = line.split('+')[1]
# Strip off the trailing ' @@' so that when only the line is specified
# and there is no comma, we can just parse as a number.
line_num_info = line_num_info.rstrip("@ ")
line_number = int(line_num_info.split(',')[0]) - 1
elif current_file is not None and line_number is not None:
# Iterate through the file.
if line.startswith('+'):
# Make sure we have coverage for this line. Else just tag it as unknown.
cov = 'N'
if current_file in coverage:
try:
cov = coverage[current_file][line_number]
except __HOLE__:
logger = logging.getLogger('coverage')
logger.info('Missing code coverage for line %d of file %s' % (line_number, current_file))
coverage_by_added_line.append(cov)
if not line.startswith('-'):
# Up the line count (assuming we aren't at a remove line)
line_number += 1
return coverage_by_added_line
|
IndexError
|
dataset/ETHPy150Open dropbox/changes/changes/api/source_details.py/SourceDetailsAPIView._filter_coverage_for_added_lines
|
4,489 |
def __getitem__(self, field):
try:
self.refresh()
return self.data.get(field)
except __HOLE__:
return None
|
KeyError
|
dataset/ETHPy150Open petrvanblokland/Xierpa3/xierpa3/toolbox/session/session.py/Session.__getitem__
|
4,490 |
def cleanUp(self):
"""
Clean up the session for objects that can't be cpickled:
1. parsed node trees with weakrefs cannot be cpickled;
python v 2.4 raises a TypeError instead of pickle.UnpickleableError, so catch this exception
>>> from xpyth.xmlparser import XMLParser
>>> parsed_xml = XMLParser().parse('<a>**c**</a>')
>>> parsed_xml
<a No parent >
>>> s = Session('1234')
>>> s.set('nodewithweakparent', parsed_xml)
<a No parent >
"""
for d in self.data:
for key, value in d.items():
try:
pickle.dumps(value, 1)
except (pickle.UnpickleableError, pickle.PicklingError, TypeError, __HOLE__): #@UndefinedVariable
del d[key]
|
KeyError
|
dataset/ETHPy150Open petrvanblokland/Xierpa3/xierpa3/toolbox/session/session.py/Session.cleanUp
|
4,491 |
def test_unicode_libraries():
try:
unicode
except __HOLE__:
py.test.skip("for python 2.x")
#
import math
lib_m = "m"
if sys.platform == 'win32':
#there is a small chance this fails on Mingw via environ $CC
import distutils.ccompiler
if distutils.ccompiler.get_default_compiler() == 'msvc':
lib_m = 'msvcrt'
ffi = FFI()
ffi.cdef(unicode("float sin(double); double cos(double);"))
lib = verify(ffi, 'test_math_sin_unicode', unicode('#include <math.h>'),
libraries=[unicode(lib_m)])
assert lib.cos(1.43) == math.cos(1.43)
|
NameError
|
dataset/ETHPy150Open johncsnyder/SwiftKitten/cffi/testing/cffi1/test_recompiler.py/test_unicode_libraries
|
4,492 |
def parseRange(text):
articles = text.split('-')
if len(articles) == 1:
try:
a = int(articles[0])
return a, a
except __HOLE__:
return None, None
elif len(articles) == 2:
try:
if len(articles[0]):
l = int(articles[0])
else:
l = None
if len(articles[1]):
h = int(articles[1])
else:
h = None
except ValueError:
return None, None
return l, h
|
ValueError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/news/nntp.py/parseRange
|
4,493 |
def extractCode(line):
line = line.split(' ', 1)
if len(line) != 2:
return None
try:
return int(line[0]), line[1]
except __HOLE__:
return None
|
ValueError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/news/nntp.py/extractCode
|
4,494 |
def lineReceived(self, line):
if self.inputHandler is not None:
self.inputHandler(line)
else:
parts = line.strip().split()
if len(parts):
cmd, parts = parts[0].upper(), parts[1:]
if cmd in NNTPServer.COMMANDS:
func = getattr(self, 'do_%s' % cmd)
try:
func(*parts)
except __HOLE__:
self.sendLine('501 command syntax error')
log.msg("501 command syntax error")
log.msg("command was", line)
log.deferr()
except:
self.sendLine('503 program fault - command not performed')
log.msg("503 program fault")
log.msg("command was", line)
log.deferr()
else:
self.sendLine('500 command not recognized')
|
TypeError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/news/nntp.py/NNTPServer.lineReceived
|
4,495 |
def articleWork(self, article, cmd, func):
if self.currentGroup is None:
self.sendLine('412 no newsgroup has been selected')
else:
if not article:
if self.currentIndex is None:
self.sendLine('420 no current article has been selected')
else:
article = self.currentIndex
else:
if article[0] == '<':
return func(self.currentGroup, index = None, id = article)
else:
try:
article = int(article)
return func(self.currentGroup, article)
except __HOLE__:
self.sendLine('501 command syntax error')
|
ValueError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/news/nntp.py/NNTPServer.articleWork
|
4,496 |
@property
def _case_property_queries(self):
"""
Returns all current case_property queries
"""
try:
return self.es_query['query']['filtered']['query']['bool']['must']
except (__HOLE__, TypeError):
return []
|
KeyError
|
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/es/case_search.py/CaseSearchES._case_property_queries
|
4,497 |
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id: maf2psl.py 2879 2010-04-06 14:44:34Z andreas $",
usage=globals()["__doc__"])
parser.add_option("-q", "--query", dest="query", type="string",
help="sequence to use for query [default=%default].")
parser.add_option("-t", "--target", dest="target", type="string",
help="sequence to use for target [default=%default].")
parser.set_defaults(
query=None,
target=None,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
if options.query is None or options.target is None:
if len(args) != 2:
raise ValueError(
"please supply two sequence identifiers for query and target")
options.query, options.target = args
# do sth
ninput, nskipped, noutput = 0, 0, 0
reader = maf.Reader(options.stdin)
psl = Blat.Match()
for cc in threaditer(reader, (options.query, options.target)):
ninput += 1
query, target = cc
# treat identfiers like Hsap.GL000223.1
try:
data = query.src.split(".")
qs, qcontig = data[0], ".".join(data[1:])
except ValueError, msg:
raise ValueError(
"error: could not parse query %s: msg=%s" % (query.src, msg))
try:
data = target.src.split(".")
ts, tcontig = data[0], ".".join(data[1:])
except __HOLE__, msg:
raise ValueError(
"error: could not parse target %s: msg=%s" % (target.src, msg))
assert qs == options.query
assert ts == options.target
psl.mQueryId = qcontig
psl.mSbjctId = tcontig
psl.fromPair(query.start, query.src_size, query.strand, query.text.upper(),
target.start, target.src_size, target.strand, target.text.upper())
E.debug("%s\t%s\t%i\t%i\t%s\t%s" %
(qs, qcontig, query.start, query.src_size, query.strand, query.text))
E.debug("%s\t%s\t%i\t%i\t%s\t%s" %
(ts, tcontig, target.start, target.src_size, target.strand, target.text))
options.stdout.write("%s\n" % str(psl))
noutput += 1
E.info("ninput=%i, noutput=%i, nskipped=%i" % (ninput, noutput, nskipped))
# write footer and output benchmark information.
E.Stop()
|
ValueError
|
dataset/ETHPy150Open CGATOxford/cgat/scripts/maf2psl.py/main
|
4,498 |
def serve(self):
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
t = threading.Thread(target = self.handle, args=(client,))
t.setDaemon(self.daemon)
t.start()
except __HOLE__:
raise
except Exception, x:
logging.exception(x)
|
KeyboardInterrupt
|
dataset/ETHPy150Open JT5D/Alfred-Popclip-Sublime/Sublime Text 2/SublimeEvernote/lib/thrift/server/TServer.py/TThreadedServer.serve
|
4,499 |
def serve(self):
def try_close(file):
try:
file.close()
except __HOLE__, e:
logging.warning(e, exc_info=True)
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
try:
pid = os.fork()
if pid: # parent
# add before collect, otherwise you race w/ waitpid
self.children.append(pid)
self.collect_children()
# Parent must close socket or the connection may not get
# closed promptly
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
try_close(itrans)
try_close(otrans)
else:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
ecode = 0
try:
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, e:
logging.exception(e)
ecode = 1
finally:
try_close(itrans)
try_close(otrans)
os._exit(ecode)
except TTransport.TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
|
IOError
|
dataset/ETHPy150Open JT5D/Alfred-Popclip-Sublime/Sublime Text 2/SublimeEvernote/lib/thrift/server/TServer.py/TForkingServer.serve
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.