Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
5,500
def _extract_biblio(self, page, id=None): dict_of_keylists = { 'title' : ['title'], 'genre' : ['defined_type'], #'authors_literal' : ['authors'], 'published_date' : ['published_date'] } item = self._extract_figshare_record(page, id) biblio_dict = provider._extract_from_data_dict(item, dict_of_keylists) biblio_dict["repository"] = "figshare" try: biblio_dict["year"] = int(biblio_dict["published_date"][-4:]) except (KeyError, __HOLE__): pass if "genre" in biblio_dict: genre = biblio_dict["genre"].lower() #override if genre in ["figure", "poster"]: genre = biblio_dict["genre"] elif genre == "presentation": genre = "slides" elif genre == "paper": genre = "article" elif genre == "media": genre = "video" else: genre = "dataset" #includes fileset biblio_dict["genre"] = genre if biblio_dict["genre"] == "article": biblio_dict["free_fulltext_url"] = self._get_templated_url(self.provenance_url_template, id, "provenance") # the authors data is messy, so just give up for now # if "authors_literal" in biblio_dict: # surname_list = [author["last_name"] for author in biblio_dict["authors_literal"]] # if surname_list: # biblio_dict["authors"] = ", ".join(surname_list) # del biblio_dict["authors_literal"] return biblio_dict
TypeError
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/providers/figshare.py/Figshare._extract_biblio
5,501
def member_items(self, account_name, provider_url_template=None, cache_enabled=True): if not self.provides_members: raise NotImplementedError() self.logger.debug(u"%s getting member_items for %s" % (self.provider_name, account_name)) if not provider_url_template: provider_url_template = self.member_items_url_template figshare_userid = self.get_figshare_userid_from_author_url(account_name) next_page = 1 members = [] while next_page: url = provider_url_template % (figshare_userid, next_page) # try to get a response from the data provider response = self.http_get(url, cache_enabled=cache_enabled) if response.status_code != 200: self.logger.info(u"%s status_code=%i" % (self.provider_name, response.status_code)) if response.status_code == 404: raise ProviderItemNotFoundError elif response.status_code == 303: #redirect pass else: self._get_error(response.status_code, response) # extract the member ids number_of_items_per_page = 10 #figshare default try: page = response.text data = provider._load_json(page) if data["items_found"] > next_page*number_of_items_per_page: next_page += 1 else: next_page = None members += self._extract_members(page, account_name) except (AttributeError, __HOLE__): next_page = None return(members)
TypeError
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/providers/figshare.py/Figshare.member_items
5,502
def __getstate__(self): d = dict(vars(self)) for k in '_credCache', '_cacheTimestamp': try: del d[k] except __HOLE__: pass return d
KeyError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/cred/checkers.py/FilePasswordDB.__getstate__
5,503
def requestAvatarId(self, c): try: u, p = self.getUser(c.username) except __HOLE__: return defer.fail(error.UnauthorizedLogin()) else: up = credentials.IUsernamePassword(c, None) if self.hash: if up is not None: h = self.hash(up.username, up.password, p) if h == p: return defer.succeed(u) return defer.fail(error.UnauthorizedLogin()) else: return defer.maybeDeferred(c.checkPassword, p ).addCallback(self._cbPasswordMatch, u)
KeyError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/cred/checkers.py/FilePasswordDB.requestAvatarId
5,504
def requestAvatarId(self, credentials): try: from twisted.cred import pamauth except __HOLE__: # PyPAM is missing return defer.fail(error.UnauthorizedLogin()) else: d = pamauth.pamAuthenticate(self.service, credentials.username, credentials.pamConversion) d.addCallback(lambda x: credentials.username) return d # For backwards compatibility # Allow access as the old name.
ImportError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/cred/checkers.py/PluggableAuthenticationModulesChecker.requestAvatarId
5,505
def run_subprocess(cmd, data=None): """ Execute the command C{cmd} in a subprocess. @param cmd: The command to execute, specified as a list of string. @param data: A string containing data to send to the subprocess. @return: A tuple C{(out, err)}. @raise OSError: If there is any problem executing the command, or if its exitval is not 0. """ if isinstance(cmd, basestring): cmd = cmd.split() # Under Python 2.4+, use subprocess try: from subprocess import Popen, PIPE pipe = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) out, err = pipe.communicate(data) if hasattr(pipe, 'returncode'): if pipe.returncode == 0: return out, err else: raise RunSubprocessError(cmd, out, err) else: # Assume that there was an error iff anything was written # to the child's stderr. if err == '': return out, err else: raise RunSubprocessError(cmd, out, err) except __HOLE__: pass # Under Python 2.3 or earlier, on unix, use popen2.Popen3 so we # can access the return value. import popen2 if hasattr(popen2, 'Popen3'): pipe = popen2.Popen3(' '.join(cmd), True) to_child = pipe.tochild from_child = pipe.fromchild child_err = pipe.childerr if data: to_child.write(data) to_child.close() out = err = '' while pipe.poll() is None: out += from_child.read() err += child_err.read() out += from_child.read() err += child_err.read() if pipe.wait() == 0: return out, err else: raise RunSubprocessError(cmd, out, err) # Under Python 2.3 or earlier, on non-unix, use os.popen3 else: to_child, from_child, child_err = os.popen3(' '.join(cmd), 'b') if data: try: to_child.write(data) # Guard for a broken pipe error except IOError, e: raise OSError(e) to_child.close() out = from_child.read() err = child_err.read() # Assume that there was an error iff anything was written # to the child's stderr. if err == '': return out, err else: raise RunSubprocessError(cmd, out, err) ###################################################################### ## Terminal Control ######################################################################
ImportError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/epydoc/util.py/run_subprocess
5,506
def survey_template_represent(template_id, row=None): """ Display the template name rather than the id """ if row: return row.name elif not template_id: return current.messages["NONE"] table = current.s3db.survey_template query = (table.id == template_id) record = current.db(query).select(table.name, limitby=(0, 1)).first() try: return record.name except __HOLE__: return current.messages.UNKNOWN_OPT # =============================================================================
AttributeError
dataset/ETHPy150Open sahana/eden/modules/s3db/survey.py/survey_template_represent
5,507
@staticmethod def question_list_onaccept(form): """ If a grid question is added to the the list then all of the grid children will need to be added as well """ qstntable = current.s3db.survey_question try: form_vars = form.vars question_id = form_vars.question_id template_id = form_vars.template_id section_id = form_vars.section_id posn = form_vars.posn except __HOLE__: return record = qstntable[question_id] qtype = record.type if qtype == "Grid": widget_obj = survey_question_type["Grid"]() widget_obj.insertChildrenToList(question_id, template_id, section_id, posn, ) if qtype == "Location": widget_obj = survey_question_type["Location"]() widget_obj.insertChildrenToList(question_id, template_id, section_id, posn, ) # -------------------------------------------------------------------------
AttributeError
dataset/ETHPy150Open sahana/eden/modules/s3db/survey.py/S3SurveyQuestionModel.question_list_onaccept
5,508
def getLocationList(series_id): """ Get a list of the LatLons for each Response in a Series """ response_locations = [] rappend = response_locations.append code_list = ["STD-L4", "STD-L3", "STD-L2", "STD-L1", "STD-L0"] table = current.s3db.survey_complete rows = current.db(table.series_id == series_id).select(table.id, table.answer_list) for row in rows: lat = None lon = None name = None answer_list = row.answer_list.splitlines() answer_dict = {} for line in answer_list: (question, answer) = line.split(",", 1) question = question.strip('"') if question in code_list: # Store to get the name answer_dict[question] = answer.strip('"') elif question == "STD-Lat": try: lat = float(answer.strip('"')) except ValueError: pass else: if lat < -90.0 or lat > 90.0: lat = None elif question == "STD-Lon": try: lon = float(answer.strip('"')) except __HOLE__: pass else: if lon < -180.0 or lon > 180.0: lon = None else: # Not relevant here continue for loc_code in code_list: # Retrieve the name of the lowest Lx if loc_code in answer_dict: name = answer_dict[loc_code] break if lat and lon: from s3dal import Row # We have sufficient data to display on the map location = Row() location.lat = lat location.lon = lon location.name = name location.complete_id = row.id rappend(location) else: # The lat & lon were not added to the assessment so try and get one loc_widget = get_default_location(row.id) if loc_widget: complete_id = loc_widget.question["complete_id"] if "answer" not in loc_widget.question: continue answer = loc_widget.question["answer"] if loc_widget != None: record = loc_widget.getLocationRecord(complete_id, answer) if len(record.records) == 1: location = record.records[0].gis_location location.complete_id = complete_id rappend(location) return response_locations # =============================================================================
ValueError
dataset/ETHPy150Open sahana/eden/modules/s3db/survey.py/getLocationList
5,509
@staticmethod def translate_onaccept(form): """ If the translation spreadsheet has been uploaded then it needs to be processed. The translation strings need to be extracted from the spreadsheet and inserted into the language file. """ if "file" in form.vars: try: import xlrd except ImportError: print >> sys.stderr, "ERROR: xlrd & xlwt modules are needed for importing spreadsheets" return None from gluon.languages import read_dict, write_dict T = current.T request = current.request response = current.response msg_none = T("No translations exist in spreadsheet") upload_file = request.post_vars.file upload_file.file.seek(0) open_file = upload_file.file.read() lang = form.record.language code = form.record.code try: workbook = xlrd.open_workbook(file_contents=open_file) except IOError: msg = T("Unable to open spreadsheet") response.error = msg response.flash = None return try: language_sheet = workbook.sheet_by_name(lang) except IOError: msg = T("Unable to find sheet %(sheet_name)s in uploaded spreadsheet") % \ dict(sheet_name=lang) response.error = msg response.flash = None return if language_sheet.ncols == 1: response.warning = msg_none response.flash = None return count = 0 lang_filename = "applications/%s/uploads/survey/translations/%s.py" % \ (request.application, code) try: strings = read_dict(lang_filename) except __HOLE__: strings = {} for row in xrange(1, language_sheet.nrows): original = language_sheet.cell_value(row, 0) translation = language_sheet.cell_value(row, 1) if (original not in strings) or translation != "": strings[original] = translation count += 1 write_dict(lang_filename, strings) if count == 0: response.warning = msg_none response.flash = None else: response.flash = T("%(count_of)d translations have been imported to the %(language)s language file") % \ dict(count_of=count, language=lang) # =============================================================================
IOError
dataset/ETHPy150Open sahana/eden/modules/s3db/survey.py/S3SurveyTranslateModel.translate_onaccept
5,510
def apply_method(self, r, **attr): """ Entry point for REST API @param r: the S3Request @param attr: controller arguments """ if r.representation != "xls": r.error(415, current.ERROR.BAD_FORMAT) template_id = r.id template = r.record if not template: r.error(405, current.ERROR.BAD_METHOD) T = current.T try: import xlwt except ImportError: r.error(501, T("xlwt not installed, so cannot export as a Spreadsheet")) s3db = current.s3db db = current.db # Get the translate record table = s3db.survey_translate record = db(table.id == self.record_id).select(table.code, table.language, limitby=(0, 1)).first() if record is None: r.error(404, current.ERROR.BAD_RECORD) code = record.code lang_filename = "applications/%s/languages/%s.py" % (r.application, code) try: from gluon.languages import read_dict strings = read_dict(lang_filename) except __HOLE__: strings = {} output = StringIO() book = xlwt.Workbook(encoding="utf-8") sheet = book.add_sheet(record.language) question_list = s3db.survey_getAllQuestionsForTemplate(template_id) original = {} original[template.name] = True if template.description != "": original[template.description] = True for qstn in question_list: original[qstn["name"]] = True widget_obj = survey_question_type[qstn["type"]](question_id = qstn["qstn_id"]) if isinstance(widget_obj, S3QuestionTypeOptionWidget): option_list = widget_obj.getList() for option in option_list: original[option] = True sections = s3db.survey_getAllSectionsForTemplate(template_id) for section in sections: original[section["name"]] = True section_id = section["section_id"] layout_rules = s3db.survey_getQstnLayoutRules(template_id, section_id) layout_str = str(layout_rules) posn = layout_str.find("heading") while posn != -1: start = posn + 11 end = layout_str.find("}", start) original[layout_str[start:end]] = True posn = layout_str.find("heading", end) row = 0 sheet.write(row, 0, u"Original") sheet.write(row, 1, u"Translation") original_list = original.keys() original_list.sort() for text in original_list: row += 1 original = s3_unicode(text) sheet.write(row, 0, original) if (original in strings): sheet.write(row, 1, s3_unicode(strings[original])) book.save(output) from gluon.contenttype import contenttype filename = "%s.xls" % code headers = current.response.headers headers["Content-Type"] = contenttype(".xls") headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename output.seek(0) return output.read() # =============================================================================
IOError
dataset/ETHPy150Open sahana/eden/modules/s3db/survey.py/survey_TranslateDownload.apply_method
5,511
def apply_method(self, r, **attr): """ Entry point for REST API @param r: the S3Request @param attr: controller arguments """ if r.representation != "xls": r.error(415, current.error.BAD_FORMAT) series_id = self.record_id if series_id is None: r.error(405, current.error.BAD_METHOD) s3db = current.s3db T = current.T try: import xlwt except __HOLE__: r.error(501, T("xlwt not installed, so cannot export as a Spreadsheet")) section_break = False try: filename = "%s_All_responses.xls" % r.record.name except AttributeError: r.error(404, T("Series not found!")) output = StringIO() book = xlwt.Workbook(encoding="utf-8") # Get all questions and write out as a heading col = 0 complete_row = {} next_row = 2 question_list = s3db.survey_getAllQuestionsForSeries(series_id) if len(question_list) > 256: section_list = s3db.survey_getAllSectionsForSeries(series_id) section_break = True if section_break: sheets = {} cols = {} for section in section_list: sheet_name = section["name"].split(" ")[0] if sheet_name not in sheets: sheets[sheet_name] = book.add_sheet(sheet_name) cols[sheet_name] = 0 else: sheet = book.add_sheet(s3_unicode(T("Responses"))) for qstn in question_list: if section_break: sheet_name = qstn["section"].split(" ")[0] sheet = sheets[sheet_name] col = cols[sheet_name] row = 0 sheet.write(row, col, s3_unicode(qstn["code"])) row += 1 widget_obj = s3db.survey_getWidgetFromQuestion(qstn["qstn_id"]) sheet.write(row, col, s3_unicode(widget_obj.fullName())) # For each question get the response all_responses = s3db.survey_getAllAnswersForQuestionInSeries(qstn["qstn_id"], series_id) for answer in all_responses: value = answer["value"] complete_id = answer["complete_id"] if complete_id in complete_row: row = complete_row[complete_id] else: complete_row[complete_id] = next_row row = next_row next_row += 1 sheet.write(row, col, s3_unicode(value)) col += 1 if section_break: cols[sheet_name] += 1 sheet.panes_frozen = True sheet.horz_split_pos = 2 book.save(output) from gluon.contenttype import contenttype headers = current.response.headers headers["Content-Type"] = contenttype(".xls") headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename output.seek(0) return output.read() # =============================================================================
ImportError
dataset/ETHPy150Open sahana/eden/modules/s3db/survey.py/survey_ExportResponses.apply_method
5,512
def formattedAnswer(self, data): """ This will take a string and do it's best to return a Date object It will try the following in order * Convert using the ISO format: * look for a month in words a 4 digit year and a day (1 or 2 digits) * a year and month that matches the date now and NOT a future date * a year that matches the current date and the previous month """ rawDate = data date = None try: # First convert any non-numeric to a hyphen isoDate = "" addHyphen = False for char in rawDate: if char.isdigit: if addHyphen == True and isoDate != "": iscDate += "-" isoDate += char addHyphen = False else: addHyphen = True # @ToDo: Use deployment_settings.get_L10n_date_format() date = datetime.strptime(rawDate, "%Y-%m-%d") return date except ValueError: try: for month in monthList: if month in rawDate: search = re, search("\D\d\d\D", rawDate) if search: day = search.group() else: search = re, search("^\d\d\D", rawDate) if search: day = search.group() else: search = re, search("\D\d\d$", rawDate) if search: day = search.group() else: search = re, search("\D\d\D", rawDate) if search: day = "0" + search.group() else: search = re, search("^\d\D", rawDate) if search: day = "0" + search.group() else: search = re, search("\D\d$", rawDate) if search: day = "0" + search.group() else: raise ValueError search = re, search("\D\d\d\d\d\D", rawDate) if search: year = search.group() else: search = re, search("^\d\d\d\d\D", rawDate) if search: year = search.group() else: search = re, search("\D\d\d\d\d$", rawDate) if search: year = search.group() else: raise ValueError # @ToDo: Use deployment_settings.get_L10n_date_format() testDate = "%s-%s-%s" % (day, month, year) if len(month) == 3: format == "%d-%b-%Y" else: format == "%d-%B-%Y" date = datetime.strptime(format, testDate) return date except __HOLE__: return date ###################################################################### # Functions not fully implemented or used ######################################################################
ValueError
dataset/ETHPy150Open sahana/eden/modules/s3db/survey.py/S3QuestionTypeDateWidget.formattedAnswer
5,513
def castRawAnswer(self, complete_id, answer): """ @todo: docstring """ try: return float(answer) except __HOLE__: return None # -------------------------------------------------------------------------
ValueError
dataset/ETHPy150Open sahana/eden/modules/s3db/survey.py/S3NumericAnalysis.castRawAnswer
5,514
def json2py(jsonstr): """ Utility function to convert a string in json to a python structure """ from xml.sax.saxutils import unescape if not isinstance(jsonstr, str): return jsonstr try: jsonstr = unescape(jsonstr, {"u'": '"'}) jsonstr = unescape(jsonstr, {"'": '"'}) python_structure = json.loads(jsonstr) except __HOLE__: _debug("ERROR: attempting to convert %s using modules/s3db/survey/json2py.py" % (jsonstr)) return jsonstr else: return python_structure # -----------------------------------------------------------------------------
ValueError
dataset/ETHPy150Open sahana/eden/modules/s3db/survey.py/json2py
5,515
def eatDirective(self): directive = self.matchDirective() try: self.calls[directive] += 1 except __HOLE__: self.calls[directive] = 1 super(Analyzer, self).eatDirective()
KeyError
dataset/ETHPy150Open binhex/moviegrabber/lib/site-packages/Cheetah/DirectiveAnalyzer.py/Analyzer.eatDirective
5,516
def main_dir(opts): results = _analyze_templates(_find_templates(opts.dir, opts.suffix)) totals = {} for series in results: if not series: continue for k, v in series.iteritems(): try: totals[k] += v except __HOLE__: totals[k] = v return totals
KeyError
dataset/ETHPy150Open binhex/moviegrabber/lib/site-packages/Cheetah/DirectiveAnalyzer.py/main_dir
5,517
def tearDown(self): try: self.cnx.close() self.removefile() except __HOLE__: pass except sqlite.InterfaceError: pass
AttributeError
dataset/ETHPy150Open sassoftware/conary/conary/pysqlite3/test/transaction_tests.py/TransactionTests.tearDown
5,518
def tearDown(self): try: self.cnx.close() self.removefile() except __HOLE__: pass except sqlite.InterfaceError: pass
AttributeError
dataset/ETHPy150Open sassoftware/conary/conary/pysqlite3/test/transaction_tests.py/AutocommitTests.tearDown
5,519
def run(): a = docopt.docopt(__doc__) vi_mode = bool(a['--vi']) config_dir = os.path.expanduser(a['--config-dir'] or '~/.ptpython/') # Create config directory. if not os.path.isdir(config_dir): os.mkdir(config_dir) # If IPython is not available, show message and exit here with error status # code. try: import IPython except __HOLE__: print('IPython not found. Please install IPython (pip install ipython).') sys.exit(1) else: from ptpython.ipython import embed from ptpython.repl import run_config, enable_deprecation_warnings # Add the current directory to `sys.path`. if sys.path[0] != '': sys.path.insert(0, '') # When a file has been given, run that, otherwise start the shell. if a['<arg>'] and not a['--interactive']: sys.argv = a['<arg>'] six.exec_(compile(open(a['<arg>'][0], "rb").read(), a['<arg>'][0], 'exec')) else: enable_deprecation_warnings() # Create an empty namespace for this interactive shell. (If we don't do # that, all the variables from this function will become available in # the IPython shell.) user_ns = {} # Startup path startup_paths = [] if 'PYTHONSTARTUP' in os.environ: startup_paths.append(os.environ['PYTHONSTARTUP']) # --interactive if a['--interactive']: startup_paths.append(a['--interactive']) sys.argv = [a['--interactive']] + a['<arg>'] # exec scripts from startup paths for path in startup_paths: if os.path.exists(path): with open(path, 'r') as f: code = compile(f.read(), path, 'exec') six.exec_(code, user_ns, user_ns) else: print('File not found: {}\n\n'.format(path)) sys.exit(1) # Apply config file def configure(repl): path = os.path.join(config_dir, 'config.py') if os.path.exists(path): run_config(repl, path) # Run interactive shell. embed(vi_mode=vi_mode, history_filename=os.path.join(config_dir, 'history'), configure=configure, user_ns=user_ns, title='IPython REPL (ptipython)')
ImportError
dataset/ETHPy150Open jonathanslenders/ptpython/ptpython/entry_points/run_ptipython.py/run
5,520
def run(self, point): """Execute all registered Hooks (callbacks) for the given point.""" exc = None hooks = self[point] hooks.sort() for hook in hooks: # Some hooks are guaranteed to run even if others at # the same hookpoint fail. We will still log the failure, # but proceed on to the next hook. The only way # to stop all processing from one of these hooks is # to raise SystemExit and stop the whole server. if exc is None or hook.failsafe: try: hook() except (__HOLE__, SystemExit): raise except (cherrypy.HTTPError, cherrypy.HTTPRedirect, cherrypy.InternalRedirect): exc = sys.exc_info()[1] except: exc = sys.exc_info()[1] cherrypy.log(traceback=True, severity=40) if exc: raise exc
KeyboardInterrupt
dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/_cprequest.py/HookMap.run
5,521
def process_query_string(self): """Parse the query string into Python structures. (Core)""" try: p = httputil.parse_query_string( self.query_string, encoding=self.query_string_encoding) except __HOLE__: raise cherrypy.HTTPError( 404, "The given query string could not be processed. Query " "strings for this resource must be encoded with %r." % self.query_string_encoding) # Python 2 only: keyword arguments must be byte strings (type 'str'). if not py3k: for key, value in p.items(): if isinstance(key, unicode): del p[key] p[key.encode(self.query_string_encoding)] = value self.params.update(p)
UnicodeDecodeError
dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/_cprequest.py/Request.process_query_string
5,522
def finalize(self): """Transform headers (and cookies) into self.header_list. (Core)""" try: code, reason, _ = httputil.valid_status(self.status) except __HOLE__: raise cherrypy.HTTPError(500, sys.exc_info()[1].args[0]) headers = self.headers self.status = "%s %s" % (code, reason) self.output_status = ntob(str(code), 'ascii') + ntob(" ") + headers.encode(reason) if self.stream: # The upshot: wsgiserver will chunk the response if # you pop Content-Length (or set it explicitly to None). # Note that lib.static sets C-L to the file's st_size. if dict.get(headers, 'Content-Length') is None: dict.pop(headers, 'Content-Length', None) elif code < 200 or code in (204, 205, 304): # "All 1xx (informational), 204 (no content), # and 304 (not modified) responses MUST NOT # include a message-body." dict.pop(headers, 'Content-Length', None) self.body = ntob("") else: # Responses which are not streamed should have a Content-Length, # but allow user code to set Content-Length if desired. if dict.get(headers, 'Content-Length') is None: content = self.collapse_body() dict.__setitem__(headers, 'Content-Length', len(content)) # Transform our header dict into a list of tuples. self.header_list = h = headers.output() cookie = self.cookie.output() if cookie: for line in cookie.split("\n"): if line.endswith("\r"): # Python 2.4 emits cookies joined by LF but 2.5+ by CRLF. line = line[:-1] name, value = line.split(": ", 1) if isinstance(name, unicodestr): name = name.encode("ISO-8859-1") if isinstance(value, unicodestr): value = headers.encode(value) h.append((name, value))
ValueError
dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/_cprequest.py/Response.finalize
5,523
def main(): ip = ipapi.get() try: ip.ex("import math,cmath") ip.ex("import numpy") ip.ex("import numpy as np") ip.ex("from numpy import *") except __HOLE__: print("Unable to start NumPy profile, is numpy installed?")
ImportError
dataset/ETHPy150Open ipython/ipython-py3k/IPython/deathrow/ipy_profile_numpy.py/main
5,524
def islink(path): """Test whether a path is a symbolic link""" try: st = os.lstat(path) except (os.error, __HOLE__): return False return stat.S_ISLNK(st.st_mode) # Does a path exist? # This is false for dangling symbolic links.
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/posixpath.py/islink
5,525
def parse_relative_time(dtstr): # example 600 seconds is: '000000001000000R' try: year = int(dtstr[:2]) month = int(dtstr[2:4]) day = int(dtstr[4:6]) hour = int(dtstr[6:8]) minute = int(dtstr[8:10]) second = int(dtstr[10:12]) dsecond = int(dtstr[12:13]) # According to spec dsecond should be set to 0 if dsecond != 0: raise ValueError("SMPP v3.4 spec violation: tenths of second value is %s instead of 0"% dsecond) except __HOLE__, e: raise ValueError("Error %s : Unable to parse relative Validity Period %s" % e,dtstr) return SMPPRelativeTime(year,month,day,hour,minute,second)
IndexError
dataset/ETHPy150Open jookies/jasmin/jasmin/vendor/smpp/pdu/smpp_time.py/parse_relative_time
5,526
def location(self): "(float, float) Return Where.Point.pos.text as a (lat,lon) tuple" try: return tuple([float(z) for z in self.Point.pos.text.split(' ')]) except __HOLE__: return tuple()
AttributeError
dataset/ETHPy150Open bradfitz/addressbooker/gdata/geo/__init__.py/Where.location
5,527
def set_location(self, latlon): """(bool) Set Where.Point.pos.text from a (lat,lon) tuple. Arguments: lat (float): The latitude in degrees, from -90.0 to 90.0 lon (float): The longitude in degrees, from -180.0 to 180.0 Returns True on success. """ assert(isinstance(latlon[0], float)) assert(isinstance(latlon[1], float)) try: self.Point.pos.text = "%s %s" % (latlon[0], latlon[1]) return True except __HOLE__: return False
AttributeError
dataset/ETHPy150Open bradfitz/addressbooker/gdata/geo/__init__.py/Where.set_location
5,528
def add_image(self, image): """ Add a PersistentImage-type object to this PersistenImageManager This should only be called with an image that has not yet been added to the store. To retrieve a previously persisted image use image_with_id() or image_query() @param image TODO @return TODO """ metadata = self.collection.find_one( { "_id": image.identifier } ) if metadata: raise ImageFactoryException("Image %s already managed, use image_with_id() and save_image()" % (image.identifier)) image.persistent_manager = self basename = self.storage_path + '/' + str(image.identifier) body_path = basename + BODY_EXT image.data = body_path try: if not os.path.isfile(body_path): open(body_path, 'w').close() self.log.debug('Created file %s' % body_path) except __HOLE__ as e: self.log.debug('Exception caught: %s' % e) self._save_image(image)
IOError
dataset/ETHPy150Open redhat-imaging/imagefactory/imgfac/MongoPersistentImageManager.py/MongoPersistentImageManager.add_image
5,529
def import_dotted_path(path): """ Takes a dotted path to a member name in a module, and returns the member after importing it. """ try: module_path, member_name = path.rsplit(".", 1) module = import_module(module_path) return getattr(module, member_name) except (ValueError, ImportError, __HOLE__) as e: raise ImportError("Could not import the name: %s: %s" % (path, e))
AttributeError
dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/utils/importing.py/import_dotted_path
5,530
def _update_reader_output(self): r = self.reader r.update() if r.error_code != 0: try: self.reader.i_blanking = True except __HOLE__: pass else: r.update() # Try reading file. if r.error_code != 0: # No output so the file might be an ASCII file. try: # Turn off IBlanking. r.set(i_blanking = False, binary_file = False) except AttributeError: pass else: r.update() # Try again this time as ascii and with blanking. if r.error_code != 0: # No output so the file might be an ASCII file. try: # Turn on IBlanking. r.i_blanking = True except AttributeError: pass else: r.update() # If there still is an error, ask the user. if r.error_code != 0: r.edit_traits(kind='livemodal') r.update() # If there still is an error, ask the user to retry. if r.error_code != 0: msg = 'Unable to read file properly. '\ 'Please check the settings of the reader '\ 'on the UI and press the "Update Reader" button '\ 'when done and try again!' error(msg) return # Now setup the outputs by resetting self.outputs. Changing # the outputs automatically fires a pipeline_changed event. try: n = r.get_output().number_of_blocks except AttributeError: # for VTK >= 4.5 n = r.number_of_outputs outputs = [] for i in range(n): outputs.append(r.get_output().get_block(i)) self.outputs = outputs # Fire data_changed just in case the outputs are not # really changed. This can happen if the dataset is of # the same type as before. self.data_changed = True # Change our name on the tree view self.name = self._get_name()
AttributeError
dataset/ETHPy150Open enthought/mayavi/mayavi/sources/plot3d_reader.py/PLOT3DReader._update_reader_output
5,531
def mapper_final(self): # hook for test_local.LocalRunnerSetupTestCase.test_python_archive() try: import foo foo # quiet pyflakes warning except __HOLE__: pass for dirpath, _, filenames in os.walk('.'): for filename in filenames: path = os.path.join(dirpath, filename) size = os.path.getsize(path) yield path, size
ImportError
dataset/ETHPy150Open Yelp/mrjob/tests/mr_os_walk_job.py/MROSWalkJob.mapper_final
5,532
def pibrella_exit(): try: pibrella.exit() except AttributeError: print("Pibrella not initialized!") except __HOLE__: print("Pibrella not initialized!")
NameError
dataset/ETHPy150Open pimoroni/pibrella/pibrella.py/pibrella_exit
5,533
def dequeue(self): if self.blocking: try: return self.conn.brpop( self.queue_key, timeout=self.read_timeout)[1] except (ConnectionError, __HOLE__, IndexError): # Unfortunately, there is no way to differentiate a socket # timing out and a host being unreachable. return None else: return self.conn.rpop(self.queue_key)
TypeError
dataset/ETHPy150Open coleifer/huey/huey/storage.py/RedisStorage.dequeue
5,534
def get(self, queue_name, task_id): """ Pops a specific task off the queue by identifier. :param queue_name: The name of the queue. Usually handled by the ``Gator`` instance. :type queue_name: string :param task_id: The identifier of the task. :type task_id: string :returns: The data for the task. :rtype: string """ # This method is *very* non-thread-safe. cls = self.__class__ queue = cls.queues.get(queue_name, []) if queue: try: offset = queue.index(task_id) except __HOLE__: return None queue.pop(offset) return cls.task_data.pop(task_id, None)
ValueError
dataset/ETHPy150Open toastdriven/alligator/alligator/backends/locmem_backend.py/Client.get
5,535
def _generate_filename_to_mtime(self): """Records the state of a directory. Returns: A dictionary of subdirectories and files under directory associated with their timestamps. the keys are absolute paths and values are epoch timestamps. Raises: ShutdownError: if the quit event has been fired during processing. """ filename_to_mtime = {} num_files = 0 for dirname, dirnames, filenames in os.walk(self._directory, followlinks=True): if self._quit_event.is_set(): raise ShutdownError() watcher_common.skip_ignored_dirs(dirnames) filenames = [f for f in filenames if not watcher_common.ignore_file(f)] for filename in filenames + dirnames: if self._quit_event.is_set(): raise ShutdownError() if num_files == _MAX_MONITORED_FILES: warnings.warn( 'There are too many files in your application for ' 'changes in all of them to be monitored. You may have to ' 'restart the development server to see some changes to your ' 'files.') return filename_to_mtime num_files += 1 path = os.path.join(dirname, filename) try: filename_to_mtime[path] = os.path.getmtime(path) except (__HOLE__, OSError): pass return filename_to_mtime
IOError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/mtime_file_watcher.py/MtimeFileWatcher._generate_filename_to_mtime
5,536
def render(self, context): try: user = context['user'] count = user.received_messages.filter(read_at__isnull=True, recipient_deleted_at__isnull=True).count() except (__HOLE__, AttributeError): count = '' if self.varname is not None: context[self.varname] = count return "" else: return "%s" % (count)
KeyError
dataset/ETHPy150Open amarandon/smeuhsocial/apps/messages/templatetags/inbox.py/InboxOutput.render
5,537
def parse(self, p): p.startLengthCheck(3) if self.certificateType == CertificateType.x509: chainLength = p.get(3) index = 0 certificate_list = [] while index != chainLength: certBytes = p.getVarBytes(3) x509 = X509() x509.parseBinary(certBytes) certificate_list.append(x509) index += len(certBytes)+3 if certificate_list: self.certChain = X509CertChain(certificate_list) elif self.certificateType == CertificateType.cryptoID: s = bytesToString(p.getVarBytes(2)) if s: try: import cryptoIDlib.CertChain except __HOLE__: raise SyntaxError(\ "cryptoID cert chain received, cryptoIDlib not present") self.certChain = cryptoIDlib.CertChain.CertChain().parse(s) else: raise AssertionError() p.stopLengthCheck() return self
ImportError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/gdata/tlslite/messages.py/Certificate.parse
5,538
def changes(request, slug, template_name='wakawaka/changes.html', extra_context=None): """ Displays the changes between two revisions. """ if extra_context is None: extra_context = {} rev_a_id = request.GET.get('a', None) rev_b_id = request.GET.get('b', None) # Some stinky fingers manipulated the url if not rev_a_id or not rev_b_id: return HttpResponseBadRequest('Bad Request') try: revision_queryset = Revision.objects.all() wikipage_queryset = WikiPage.objects.all() rev_a = revision_queryset.get(pk=rev_a_id) rev_b = revision_queryset.get(pk=rev_b_id) page = wikipage_queryset.get(slug=slug) except __HOLE__: raise Http404 if rev_a.content != rev_b.content: d = difflib.unified_diff(rev_b.content.splitlines(), rev_a.content.splitlines(), 'Original', 'Current', lineterm='') difftext = '\n'.join(d) else: difftext = _(u'No changes were made between this two files.') template_context = { 'page': page, 'diff': difftext, 'rev_a': rev_a, 'rev_b': rev_b, } template_context.update(extra_context) return render_to_response(template_name, template_context, RequestContext(request)) # Some useful views
ObjectDoesNotExist
dataset/ETHPy150Open bartTC/django-wakawaka/wakawaka/views.py/changes
5,539
def test_override(self): modname = os.path.__name__ tests = [ ("import os.path" , "('os.path', None, -1, 'os')"), ("import os.path as path2", "('os.path', None, -1, 'os')"), ("from os.path import *" , "('os.path', ('*',), -1, '%s')" % modname), ("from os.path import join", "('os.path', ('join',), -1, '%s')" % modname), ("from os.path import join as join2", "('os.path', ('join',), -1, '%s')" % modname), ("from os.path import join as join2, split as split2", "('os.path', ('join', 'split'), -1, '%s')" % modname), ] import sys # Replace __builtin__.__import__ to trace the calls import __builtin__ oldimp = __builtin__.__import__ try: def __import__(name, globs, locs, fromlist, level=-1): mod = oldimp(name, globs, locs, fromlist, level) globs["result"] = str((name, fromlist, level, mod.__name__)) raise ImportError __builtin__.__import__ = __import__ failed = 0 for statement, expected in tests: try: c = compile(statement, "<unknown>", "exec") exec c in locals(), globals() raise Exception("ImportError expected.") except __HOLE__: pass self.assertEquals(expected, result) finally: __builtin__.__import__ = oldimp
ImportError
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/test/test_import_jy.py/OverrideBuiltinsImportTestCase.test_override
5,540
def test_invalid_argument_type(self): values = (0, 0, 0, 'string not int') try: self.bound.bind(values) except TypeError as e: self.assertIn('v0', str(e)) self.assertIn('Int32Type', str(e)) self.assertIn('str', str(e)) else: self.fail('Passed invalid type but exception was not thrown') values = (['1', '2'], 0, 0, 0) try: self.bound.bind(values) except __HOLE__ as e: self.assertIn('rk0', str(e)) self.assertIn('Int32Type', str(e)) self.assertIn('list', str(e)) else: self.fail('Passed invalid type but exception was not thrown')
TypeError
dataset/ETHPy150Open datastax/python-driver/tests/unit/test_parameter_binding.py/BoundStatementTestV1.test_invalid_argument_type
5,541
@register.tag(name='get_latest_annotations') def do_get_latest_annotations(parser, token): """ Use like so: {% get_latest_annotations as annotations [count=1]%} """ as_var = None count = 1 try: contents = token.split_contents() if len(contents) >= 2 and contents[1] == "as": as_var = contents[2] if len(contents) == 4: count = int(contents[3]) except (ValueError, __HOLE__): raise template.TemplateSyntaxError, "Tag Syntax Error: %r as annotations [count=1]" % token.contents.split()[0] return LatestAnnotationsNode(as_var, count)
IndexError
dataset/ETHPy150Open stefanw/django-annotatetext/annotatetext/templatetags/annotatetags.py/do_get_latest_annotations
5,542
@register.tag(name='get_annotations_for') def do_get_annotations_for(parser, token): """Use like so: {% get_annotations_for object_list as var_name %} Note: objects in object_list must be of one ContentType! var_name is a dict that contains the id of every object in object_list as key. The value is another dict with two keys: "annotations" as a list of annotations and "form" as an annotation form instance """ as_var = None try: contents = token.split_contents() instance_name = contents[1] if len(contents) >= 4 and contents[2] == "as": as_var = contents[3] except (ValueError, __HOLE__): raise template.TemplateSyntaxError, "Tag Syntax Error: %r object_list as var_name" % token.contents.split()[0] return InsertAnnotationNode(instance_name, as_var)
IndexError
dataset/ETHPy150Open stefanw/django-annotatetext/annotatetext/templatetags/annotatetags.py/do_get_annotations_for
5,543
def get_dirs_and_files(self): try: dirs, files = self.storage.listdir(self.path) except __HOLE__: return [], [] if self.path: files = [os.path.join(self.path, filename) for filename in files] return dirs, [BoundFile(self.storage, filename) for filename in files]
NotImplementedError
dataset/ETHPy150Open zbyte64/django-hyperadmin/hyperadmin/resources/storages/resources.py/StorageQuery.get_dirs_and_files
5,544
def item_stats(host, port): """Check the stats for items and connection status.""" stats = None try: mc = memcache.Client(['%s:%s' % (host, port)]) stats = mc.get_stats()[0][1] except __HOLE__: raise finally: return stats
IndexError
dataset/ETHPy150Open rcbops/rpc-openstack/maas/plugins/memcached_status.py/item_stats
5,545
def main(args): bind_ip = str(args.ip) port = args.port is_up = True try: stats = item_stats(bind_ip, port) current_version = stats['version'] except (__HOLE__, IndexError): is_up = False else: is_up = True if current_version not in VERSIONS: status_err('This plugin has only been tested with version %s ' 'of memcached, and you are using version %s' % (VERSIONS, current_version)) status_ok() metric_bool('memcache_api_local_status', is_up) if is_up: for m, u in MEMCACHE_METRICS.iteritems(): metric('memcache_%s' % m, 'uint64', stats[m], u)
TypeError
dataset/ETHPy150Open rcbops/rpc-openstack/maas/plugins/memcached_status.py/main
5,546
def generate_thumb(storage, video_path, thumb_size=None, format='jpg', frames=100): histogram = [] http_status = 200 name = video_path path = storage.path(video_path) if not storage.exists(video_path): return "", '404' framemask = "%s%s%s%s" % (TMP_DIR, name.split('/')[-1].split('.')[0] + str(time.time()), '.%d.', format) # ffmpeg command for grabbing N number of frames cmd = "/usr/bin/ffmpeg -y -t 00:00:05 -i '%s' '%s'" % (path, framemask) # make sure that this command worked or return. if os.system(cmd) != 0: return "", '500' # loop through the generated images, open, and generate the image histogram. for i in range(1, frames + 1): fname = framemask % i if not os.path.exists(os.path.join(TMP_DIR, fname)): break image = Image.open(fname) # Convert to RGB if necessary if image.mode not in ('L', 'RGB'): image = image.convert('RGB') # The list of image historgrams histogram.append(image.histogram()) n = len(histogram) avg = [] # Get the image average of the first image for c in range(len(histogram[0])): ac = 0.0 for i in range(n): ac = ac + (float(histogram[i][c]) / n) avg.append(ac) minn = -1 minRMSE = -1 # Compute the mean squared error for i in range(1, n + 1): results = 0.0 num = len(avg) for j in range(num): median_error = avg[j] - float(histogram[i - 1][j]) results += (median_error * median_error) / num rmse = math.sqrt(results) if minn == -1 or rmse < minRMSE: minn = i minRMSE = rmse file_location = framemask % (minn) image = Image.open(file_location) # If you want to generate a square thumbnail if not thumb_size is None: thumb_w, thumb_h = thumb_size if thumb_w == thumb_h: # quad xsize, ysize = image.size # get minimum size minsize = min(xsize, ysize) # largest square possible in the image xnewsize = (xsize - minsize) / 2 ynewsize = (ysize - minsize) / 2 # crop it image2 = image.crop((xnewsize, ynewsize, xsize - xnewsize, ysize - ynewsize)) # load is necessary after crop image2.load() # thumbnail of the cropped image (with ANTIALIAS to make it look better) image2.thumbnail(thumb_size, Image.ANTIALIAS) else: # not quad image2 = image image2.thumbnail(thumb_size, Image.ANTIALIAS) else: image2 = image io = StringIO() # PNG and GIF are the same, JPG is JPEG if format.upper() == 'JPG': format = 'JPEG' image2.save(io, format) # We don't know how many frames we capture. We just captured the first 5 seconds, so keep removing until not found for i in range(1, 9999): fname = framemask % i try: os.unlink(fname) except __HOLE__: break return io.getvalue(), http_status
OSError
dataset/ETHPy150Open francescortiz/image/image/videothumbs.py/generate_thumb
5,547
@register.filter(name="getattr") def get_attr(obj, val): try: return getattr(obj, val) except AttributeError: try: return obj[val] except (__HOLE__, TypeError): return None
KeyError
dataset/ETHPy150Open mgaitan/waliki/waliki/templatetags/waliki_tags.py/get_attr
5,548
def _check_system_limits(): global _system_limits_checked, _system_limited if _system_limits_checked: if _system_limited: raise NotImplementedError(_system_limited) _system_limits_checked = True try: import os nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") except (__HOLE__, ValueError): # sysconf not available or setting not available return if nsems_max == -1: # indetermine limit, assume that limit is determined # by available memory only return if nsems_max >= 256: # minimum number of semaphores available # according to POSIX return _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max raise NotImplementedError(_system_limited)
AttributeError
dataset/ETHPy150Open SickRage/SickRage/lib/concurrent/futures/process.py/_check_system_limits
5,549
def short_name(model): """Return a simplified name for this model. A bit brittle.""" # for a single model, this will work name = model.__class__.__name__ try: if hasattr(model, 'steps'): # pipeline name = '-'.join( [ pair[0] for pair in model.steps ] ) elif hasattr(model, 'best_estimator_'): if hasattr(model.estimator, 'steps'): # gridsearchcv name = 'gscv_' + '-'.join( [x[0] for x in model.estimator.steps ]) elif hasattr(model.estimator, 'estimators'): # votingclassifier name = 'gscv_vc_' + '-'.join( [x[0] for x in model.estimator.estimators ]) elif hasattr(model, 'base_estimator_'): # bagging name = 'bag_' + short_name(model.base_estimator) except __HOLE__, e: util_logr.info('utils.short_name() couldnt generate quality name') # for a single model, this will work name = model.__class__.__name__ util_logr.info('falling back to generic name={}'.format(name)) return name
AttributeError
dataset/ETHPy150Open jrmontag/mnist-sklearn/utils.py/short_name
5,550
def test_f(HH, Istim, freq, stim_t0, duration=1, tmax=1000): """Istim is amplitude of square pulse input to neuron, having given duration in ms and frequency in Hz. Starts at stim_t0. """ baseline_Iapp = HH.pars['Iapp'] stim_period = 1000./freq HH.set(tdata=[0, tmax]) n = int(floor(tmax/stim_period)) print "Testing with stimulus frequency %.3f Hz" % freq print " (stimulus period is %.4f ms)" % stim_period print "Stimulus amplitude is %.3f" % Istim stim_ts = array([stim_t0+i*stim_period for i in range(0,n+1)]) Istim_vardict = make_spike_signal(stim_ts, 1, tmax*1.1, loval=0, hival=Istim, dt=0.1) HH.inputs = Istim_vardict HH._extInputsChanged = True traj = HH.compute('stim_test') pts = traj.sample() plt.figure(1) plt.clf() plt.plot(pts['t'], pts['v'], 'b', lw=2) plt.plot(pts['t'], 3*pts['I']-75, 'k', lw=2) #plt.ylim([-100, 50]) plt.xlabel('t') plt.ylabel('v') plt.title('Voltage trace and I(t) pulse stimulus') try: show_maps(traj, stim_ts, 0.3*tmax) except __HOLE__: print "Not enough spikes to show a map" return traj, pts, stim_ts
IndexError
dataset/ETHPy150Open robclewley/compneuro/Entrain_teacher_copy.py/test_f
5,551
def _pass_to_shared(self, name, item): try: item.traj = self.v_traj item.name = name item.parent = self except __HOLE__: pass
AttributeError
dataset/ETHPy150Open SmokinCaterpillar/pypet/pypet/shareddata.py/SharedResult._pass_to_shared
5,552
@classmethod def setupClass(cls): global np global npt try: import numpy as np import numpy.testing as npt except __HOLE__: raise SkipTest('NumPy not available.')
ImportError
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/algorithms/assortativity/tests/test_mixing.py/TestDegreeMixingMatrix.setupClass
5,553
@classmethod def setupClass(cls): global np global npt try: import numpy as np import numpy.testing as npt except __HOLE__: raise SkipTest('NumPy not available.')
ImportError
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/algorithms/assortativity/tests/test_mixing.py/TestAttributeMixingMatrix.setupClass
5,554
def main(): """ Run ftfy as a command-line utility. """ import argparse parser = argparse.ArgumentParser( description="ftfy (fixes text for you), version %s" % __version__ ) parser.add_argument('filename', default='-', nargs='?', help='The file whose Unicode is to be fixed. Defaults ' 'to -, meaning standard input.') parser.add_argument('-o', '--output', type=str, default='-', help='The file to output to. Defaults to -, meaning ' 'standard output.') parser.add_argument('-g', '--guess', action='store_true', help="Ask ftfy to guess the encoding of your input. " "This is risky. Overrides -e.") parser.add_argument('-e', '--encoding', type=str, default='utf-8', help='The encoding of the input. Defaults to UTF-8.') parser.add_argument('-n', '--normalization', type=str, default='NFC', help='The normalization of Unicode to apply. ' 'Defaults to NFC. Can be "none".') parser.add_argument('--preserve-entities', action='store_true', help="Leave HTML entities as they are. The default " "is to decode them, as long as no HTML tags " "have appeared in the file.") args = parser.parse_args() encoding = args.encoding if args.guess: encoding = None if args.filename == '-': # Get a standard input stream made of bytes, so we can decode it as # whatever encoding is necessary. if PYTHON2: file = sys.stdin else: file = sys.stdin.buffer else: file = open(args.filename, 'rb') if args.output == '-': encode_output = PYTHON2 outfile = sys.stdout else: encode_output = False outfile = io.open(args.output, 'w', encoding='utf-8') normalization = args.normalization if normalization.lower() == 'none': normalization = None if args.preserve_entities: fix_entities = False else: fix_entities = 'auto' try: for line in fix_file(file, encoding=encoding, fix_entities=fix_entities, normalization=normalization): if encode_output: outfile.write(line.encode('utf-8')) else: try: outfile.write(line) except UnicodeEncodeError: if sys.platform == 'win32': sys.stderr.write(ENCODE_ERROR_TEXT_WINDOWS) else: sys.stderr.write(ENCODE_ERROR_TEXT_UNIX) sys.exit(1) except __HOLE__ as err: sys.stderr.write(DECODE_ERROR_TEXT % (encoding, err)) sys.exit(1)
UnicodeDecodeError
dataset/ETHPy150Open LuminosoInsight/python-ftfy/ftfy/cli.py/main
5,555
def take_action(self, parsed_args): if not parsed_args.cmd: action = HelpAction(None, None, default=self.app) action(self.app.parser, self.app.parser, None, None) return 1 try: the_cmd = self.app.command_manager.find_command( parsed_args.cmd, ) cmd_factory, cmd_name, search_args = the_cmd except __HOLE__: # Did not find an exact match cmd = parsed_args.cmd[0] fuzzy_matches = [k[0] for k in self.app.command_manager if k[0].startswith(cmd) ] if not fuzzy_matches: raise print('Command "%s" matches:' % cmd) for fm in sorted(fuzzy_matches): print(' %s' % fm) return self.app_args.cmd = search_args cmd = cmd_factory(self.app, self.app_args) full_name = ' '.join([self.app.NAME, cmd_name]) cmd_parser = cmd.get_parser(full_name) cmd_parser.print_help(sys.stdout) return 0
ValueError
dataset/ETHPy150Open TurboGears/gearbox/gearbox/commands/help.py/HelpCommand.take_action
5,556
def reopen_connection(self): self.close_connection() root_delay = self.MIN_DELAY while True: try: self.open_connection() return except Exception: if self.verbose: log.warning('Unable to connect to Logentries') root_delay *= 2 if root_delay > self.MAX_DELAY: root_delay = self.MAX_DELAY wait_for = root_delay + random.uniform(0, root_delay) try: time.sleep(wait_for) except __HOLE__: raise
KeyboardInterrupt
dataset/ETHPy150Open saltstack/salt/salt/engines/logentries.py/PlainTextSocketAppender.reopen_connection
5,557
def start(endpoint='data.logentries.com', port=10000, token=None, tag='salt/engines/logentries'): ''' Listen to salt events and forward them to Logentries ''' if __opts__.get('id').endswith('_master'): event_bus = salt.utils.event.get_master_event( __opts__, __opts__['sock_dir'], listen=True) else: event_bus = salt.utils.event.get_event( 'minion', transport=__opts__['transport'], opts=__opts__, sock_dir=__opts__['sock_dir'], listen=True) log.debug('Logentries engine started') try: val = uuid.UUID(token) except __HOLE__: log.warning('Not a valid logentries token') appender = _get_appender(endpoint, port) appender.reopen_connection() while True: event = event_bus.get_event() if event: msg = '{0} {1}'.format(tag, json.dumps(event)) appender.put(_emit(token, msg)) appender.close_connection()
ValueError
dataset/ETHPy150Open saltstack/salt/salt/engines/logentries.py/start
5,558
def check_gil_released(self, func): for n_threads in (4, 12, 32): # Try harder each time. On an empty machine 4 threads seems # sufficient, but in some contexts (e.g. Travis CI) we need more. arr = self.run_in_threads(func, n_threads) distinct = set(arr) try: self.assertGreater(len(distinct), 1, distinct) except __HOLE__ as e: failure = e else: return raise failure
AssertionError
dataset/ETHPy150Open numba/numba/numba/tests/test_gil.py/TestGILRelease.check_gil_released
5,559
def get_meta_appversion_text(form_metadata): try: text = form_metadata['appVersion'] except __HOLE__: return None # just make sure this is a longish string and not something like '2.0' if isinstance(text, (str, unicode)) and len(text) > 5: return text else: return None
KeyError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/receiverwrapper/util.py/get_meta_appversion_text
5,560
def stop_watching_memory(): """Unregister memory profiling tools from IPython instance.""" global watching_memory watching_memory = False ip = get_ipython() try: ip.events.unregister("post_run_cell", watch_memory) except ValueError: pass try: ip.events.unregister("pre_run_cell", pre_run_cell) except __HOLE__: pass
ValueError
dataset/ETHPy150Open ianozsvald/ipython_memory_usage/src/ipython_memory_usage/ipython_memory_usage_perf.py/stop_watching_memory
5,561
def main(): opts, args = getopt.getopt(sys.argv[1:], 'i:h') name = None for o, a in opts: if o == '-i': name = a else: usage() pc = pcap.pcap(name) pc.setfilter(' '.join(args)) decode = { pcap.DLT_LOOP:dpkt.loopback.Loopback, pcap.DLT_NULL:dpkt.loopback.Loopback, pcap.DLT_EN10MB:dpkt.ethernet.Ethernet }[pc.datalink()] try: print 'listening on %s: %s' % (pc.name, pc.filter) for ts, pkt in pc: print ts, `decode(pkt)` except __HOLE__: nrecv, ndrop, nifdrop = pc.stats() print '\n%d packets received by filter' % nrecv print '%d packets dropped by kernel' % ndrop
KeyboardInterrupt
dataset/ETHPy150Open dugsong/pypcap/testsniff.py/main
5,562
@requires_venv def do_enable(): """ Uncomment any lines that start with #import in the .pth file """ try: _lines = [] with open(vext_pth, mode='r') as f: for line in f.readlines(): if line.startswith('#') and line[1:].lstrip().startswith('import '): _lines.append(line[1:].lstrip()) else: _lines.append(line) try: os.unlink('%s.tmp' % vext_pth) except: pass with open('%s.tmp' % vext_pth, mode='w+') as f: f.writelines(_lines) try: os.unlink('%s~' % vext_pth) except: pass os.rename(vext_pth, '%s~' % vext_pth) os.rename('%s.tmp' % vext_pth, vext_pth) except __HOLE__ as e: if e.errno == 2: # vext file doesn't exist, recreate it. create_pth()
IOError
dataset/ETHPy150Open stuaxo/vext/vext/cmdline/__init__.py/do_enable
5,563
@requires_venv def do_disable(): """ Comment any lines that start with import in the .pth file """ from vext import vext_pth try: _lines = [] with open(vext_pth, mode='r') as f: for line in f.readlines(): if not line.startswith('#') and line.startswith('import '): _lines.append('# %s' % line) else: _lines.append(line) try: os.unlink('%s.tmp' % vext_pth) except: pass with open('%s.tmp' % vext_pth, mode='w+') as f: f.writelines(_lines) try: os.unlink('%s~' % vext_pth) except: pass os.rename(vext_pth, '%s~' % vext_pth) os.rename('%s.tmp' % vext_pth, vext_pth) except __HOLE__ as e: if e.errno == 2: # file didn't exist == disabled return
IOError
dataset/ETHPy150Open stuaxo/vext/vext/cmdline/__init__.py/do_disable
5,564
def getout(*args): try: return Popen(args, stdout=PIPE).communicate()[0] except __HOLE__: return ''
OSError
dataset/ETHPy150Open kivy/kivy/kivy/input/providers/probesysfs.py/getout
5,565
def rmtree_ignore_error(path): """ Have to do this a lot, moving this into a function to save lines of code. ignore_error=True should work, but doesn't. """ try: shutil.rmtree(path) except __HOLE__: pass
OSError
dataset/ETHPy150Open kenfar/DataGristle/scripts/tests/test_gristle_dir_merger_cmd.py/rmtree_ignore_error
5,566
def _get_raw_post_data(self): if not hasattr(self, '_raw_post_data'): if self._read_started: raise Exception("You cannot access raw_post_data after reading from request's data stream") try: content_length = int(self.META.get('CONTENT_LENGTH', 0)) except (ValueError, __HOLE__): # If CONTENT_LENGTH was empty string or not an integer, don't # error out. We've also seen None passed in here (against all # specs, but see ticket #8259), so we handle TypeError as well. content_length = 0 if content_length: self._raw_post_data = self.read(content_length) else: self._raw_post_data = self.read() self._stream = StringIO(self._raw_post_data) return self._raw_post_data
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/http/__init__.py/HttpRequest._get_raw_post_data
5,567
def __delitem__(self, header): try: del self._headers[header.lower()] except __HOLE__: pass
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/http/__init__.py/HttpResponse.__delitem__
5,568
def user_list(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None, return_password=False): ''' Return a dict with information about users of a Postgres server. Set return_password to True to get password hash in the result. CLI Example: .. code-block:: bash salt '*' postgres.user_list ''' ret = {} ver = _parsed_version(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) if ver: if ver >= distutils.version.LooseVersion('9.1'): replication_column = 'pg_roles.rolreplication' else: replication_column = 'NULL' if ver >= distutils.version.LooseVersion('9.5'): rolcatupdate_column = 'NULL' else: rolcatupdate_column = 'pg_roles.rolcatupdate' else: log.error('Could not retrieve Postgres version. Is Postgresql server running?') return False # will return empty string if return_password = False _x = lambda s: s if return_password else '' query = (''.join([ 'SELECT ' 'pg_roles.rolname as "name",' 'pg_roles.rolsuper as "superuser", ' 'pg_roles.rolinherit as "inherits privileges", ' 'pg_roles.rolcreaterole as "can create roles", ' 'pg_roles.rolcreatedb as "can create databases", ' '{0} as "can update system catalogs", ' 'pg_roles.rolcanlogin as "can login", ' '{1} as "replication", ' 'pg_roles.rolconnlimit as "connections", ' 'pg_roles.rolvaliduntil::timestamp(0) as "expiry time", ' 'pg_roles.rolconfig as "defaults variables" ' , _x(', COALESCE(pg_shadow.passwd, pg_authid.rolpassword) as "password" '), 'FROM pg_roles ' , _x('LEFT JOIN pg_authid ON pg_roles.oid = pg_authid.oid ') , _x('LEFT JOIN pg_shadow ON pg_roles.oid = pg_shadow.usesysid') ]).format(rolcatupdate_column, replication_column)) rows = psql_query(query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) def get_bool(rowdict, key): ''' Returns the boolean value of the key, instead of 't' and 'f' strings. ''' if rowdict[key] == 't': return True elif rowdict[key] == 'f': return False else: return None for row in rows: retrow = {} for key in ('superuser', 'inherits privileges', 'can create roles', 'can create databases', 'can update system catalogs', 'can login', 'replication', 'connections'): retrow[key] = get_bool(row, key) for date_key in ('expiry time',): try: retrow[date_key] = datetime.datetime.strptime( row['date_key'], '%Y-%m-%d %H:%M:%S') except (ValueError, __HOLE__): retrow[date_key] = None retrow['defaults variables'] = row['defaults variables'] if return_password: retrow['password'] = row['password'] ret[row['name']] = retrow # for each role, determine the inherited roles for role in six.iterkeys(ret): rdata = ret[role] groups = rdata.setdefault('groups', []) query = ( 'select rolname' ' from pg_user' ' join pg_auth_members' ' on (pg_user.usesysid=pg_auth_members.member)' ' join pg_roles ' ' on (pg_roles.oid=pg_auth_members.roleid)' ' where pg_user.usename=\'{0}\'' ).format(role) try: rows = psql_query(query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) for row in rows: if row['rolname'] not in groups: groups.append(row['rolname']) except Exception: # do not fail here, it is just a bonus # to try to determine groups, but the query # is not portable amongst all pg versions continue return ret
KeyError
dataset/ETHPy150Open saltstack/salt/salt/modules/postgres.py/user_list
5,569
def role_get(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None, return_password=False): ''' Return a dict with information about users of a Postgres server. Set return_password to True to get password hash in the result. CLI Example: .. code-block:: bash salt '*' postgres.role_get postgres ''' all_users = user_list(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas, return_password=return_password) try: return all_users.get(name, None) except __HOLE__: log.error('Could not retrieve Postgres role. Is Postgres running?') return None
AttributeError
dataset/ETHPy150Open saltstack/salt/salt/modules/postgres.py/role_get
5,570
def schema_get(dbname, name, db_user=None, db_password=None, db_host=None, db_port=None): ''' Return a dict with information about schemas in a database. CLI Example: .. code-block:: bash salt '*' postgres.schema_get dbname name dbname Database name we query on name Schema name we look for db_user database username if different from config or default db_password user password if any password for a specified user db_host Database host if different from config or default db_port Database port if different from config or default ''' all_schemas = schema_list(dbname, db_user=db_user, db_host=db_host, db_port=db_port, db_password=db_password) try: return all_schemas.get(name, None) except __HOLE__: log.error('Could not retrieve Postgres schema. Is Postgres running?') return False
AttributeError
dataset/ETHPy150Open saltstack/salt/salt/modules/postgres.py/schema_get
5,571
def _get_object_owner(name, object_type, prepend='public', maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): ''' Return the owner of a postgres object ''' if object_type == 'table': query = (' '.join([ 'SELECT tableowner AS name', 'FROM pg_tables', "WHERE schemaname = '{0}'", "AND tablename = '{1}'" ])).format(prepend, name) elif object_type == 'sequence': query = (' '.join([ 'SELECT rolname AS name', 'FROM pg_catalog.pg_class c', 'JOIN pg_roles r', 'ON c.relowner = r.oid', 'JOIN pg_catalog.pg_namespace n', 'ON n.oid = c.relnamespace', "WHERE relkind='S'", "AND nspname='{0}'", "AND relname = '{1}'", ])).format(prepend, name) elif object_type == 'schema': query = (' '.join([ 'SELECT rolname AS name', 'FROM pg_namespace n', 'JOIN pg_roles r', 'ON n.nspowner = r.oid', "WHERE nspname = '{0}'", ])).format(name) elif object_type == 'tablespace': query = (' '.join([ 'SELECT rolname AS name', 'FROM pg_tablespace t', 'JOIN pg_roles r', 'ON t.spcowner = r.oid', "WHERE spcname = '{0}'", ])).format(name) elif object_type == 'language': query = (' '.join([ 'SELECT rolname AS name', 'FROM pg_language l', 'JOIN pg_roles r', 'ON l.lanowner = r.oid', "WHERE lanname = '{0}'", ])).format(name) elif object_type == 'database': query = (' '.join([ 'SELECT rolname AS name', 'FROM pg_database d', 'JOIN pg_roles r', 'ON d.datdba = r.oid', "WHERE datname = '{0}'", ])).format(name) rows = psql_query( query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) try: ret = rows[0]['name'] except __HOLE__: ret = None return ret
IndexError
dataset/ETHPy150Open saltstack/salt/salt/modules/postgres.py/_get_object_owner
5,572
@staticmethod def _resource_deserialize(s): """Returns dict deserialization of a given JSON string.""" try: return json.loads(s) except __HOLE__: raise ResponseError('The API Response was not valid.')
ValueError
dataset/ETHPy150Open kennethreitz-archive/python-github3/github3/api.py/GithubCore._resource_deserialize
5,573
def main(): parser = argparse.ArgumentParser() parser.add_argument('-p', '--profile', help='The profile name to use ' 'when starting the AWS Shell.') args = parser.parse_args() indexer = completion.CompletionIndex() try: index_str = indexer.load_index(utils.AWSCLI_VERSION) index_data = json.loads(index_str) except completion.IndexLoadError: print("First run, creating autocomplete index...") from awsshell.makeindex import write_index # TODO: Using internal method, but this will eventually # be moved into the CompletionIndex class anyways. index_file = indexer._filename_for_version(utils.AWSCLI_VERSION) write_index(index_file) index_str = indexer.load_index(utils.AWSCLI_VERSION) index_data = json.loads(index_str) doc_index_file = determine_doc_index_filename() from awsshell.makeindex import write_doc_index doc_data = docs.load_lazy_doc_index(doc_index_file) # There's room for improvement here. If the docs didn't finish # generating, we regen the whole doc index. Ideally we pick up # from where we left off. try: docs.load_doc_db(doc_index_file)['__complete__'] except __HOLE__: print("Creating doc index in the background. " "It will be a few minutes before all documentation is " "available.") t = threading.Thread(target=write_doc_index, args=(doc_index_file,)) t.daemon = True t.start() model_completer = autocomplete.AWSCLIModelCompleter(index_data) completer = shellcomplete.AWSShellCompleter(model_completer) shell = app.create_aws_shell(completer, model_completer, doc_data) if args.profile: shell.profile = args.profile shell.run()
KeyError
dataset/ETHPy150Open awslabs/aws-shell/awsshell/__init__.py/main
5,574
def _detect_gis_backend(): """Determine whether or not a GIS backend is currently in use, to allow for divergent behavior elsewhere. """ # If the connection is from `django.contrib.gis`, then we know that this # is a GIS backend. if '.gis.' in connections[DEFAULT_DB_ALIAS].__module__: return True # Annoying case: If we're using a dummy backend (the most likely reason # being because, for testing, the database is mocked out), we need to # determine GIS presence or absence in such a way that will work on # the system. # # We have to approximate this; essentially, return True if geos is # installed, and False otherwise. We can determine this by trying to # import GEOSException. if '.dummy.' in connections[DEFAULT_DB_ALIAS].__module__: # pragma: no cover try: from django.contrib.gis.geos import GEOSException return True except __HOLE__: return False # Okay, there's no GIS backend in use. return False
ImportError
dataset/ETHPy150Open lukesneeringer/django-pgfields/django_pg/utils/gis.py/_detect_gis_backend
5,575
def import_by_path(dotted_path, error_prefix=''): """ Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImproperlyConfigured if something goes wrong. """ try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError: raise ImproperlyConfigured("%s%s doesn't look like a module path" % ( error_prefix, dotted_path)) try: module = import_module(module_path) except __HOLE__ as e: msg = '%sError importing module %s: "%s"' % ( error_prefix, module_path, e) six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2]) try: attr = getattr(module, class_name) except AttributeError: raise ImproperlyConfigured('%sModule "%s" does not define a "%s" attribute/class' % ( error_prefix, module_path, class_name)) return attr
ImportError
dataset/ETHPy150Open danirus/django-comments-xtd/django_comments_xtd/compat.py/import_by_path
5,576
def _parseJoints(self, node=None): for name in self._names: try: self._joints.append(self._world.getXODERoot().namedChild(name).getODEObject()) except __HOLE__: # the given object name is not found. output warning and quit. warnings.warn("Joint with name '", name, "' not found.") sys.exit()
KeyError
dataset/ETHPy150Open pybrain/pybrain/pybrain/rl/environments/ode/actuators.py/SpecificJointActuator._parseJoints
5,577
def _parseJoints(self, node=None): for name in self._names: try: self._joints.append(self._world.getXODERoot().namedChild(name).getODEObject()) except __HOLE__: # the given object name is not found. output warning and quit. warnings.warn("Joint with name '", name, "' not found.") sys.exit()
KeyError
dataset/ETHPy150Open pybrain/pybrain/pybrain/rl/environments/ode/actuators.py/CopyJointActuator._parseJoints
5,578
def fit_predict(training_data, fitting_data, tau=1, samples_per_job=0, save_results=True, show=False): from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator from disco.core import Disco """ training_data - training samples fitting_data - dataset to be fitted to training data. tau - controls how quickly the weight of a training sample falls off with distance of its x(i) from the query point x. samples_per_job - define a number of samples that will be processed in single mapreduce job. If 0, algorithm will calculate number of samples per job. """ try: tau = float(tau) if tau <= 0: raise Exception("Parameter tau should be >= 0.") except __HOLE__: raise Exception("Parameter tau should be numerical.") if fitting_data.params["id_index"] == -1: raise Exception("Predict data should have id_index set.") job = Job(worker=Worker(save_results=save_results)) job.pipeline = [ ("split", Stage("map", input_chain=fitting_data.params["input_chain"], init=simple_init, process=map_predict))] job.params = fitting_data.params job.run(name="lwlr_read_data", input=fitting_data.params["data_tag"]) samples = {} results = [] tau = float(2 * tau ** 2) # calculate tau once counter = 0 for test_id, x in result_iterator(job.wait(show=show)): if samples_per_job == 0: # calculate number of samples per job if len(x) <= 100: # if there is less than 100 attributes samples_per_job = 100 # 100 samples is max per on job else: # there is more than 100 attributes samples_per_job = len(x) * -25 / 900. + 53 # linear function samples[test_id] = x if counter == samples_per_job: results.append(_fit_predict(training_data, samples, tau, save_results, show)) counter = 0 samples = {} counter += 1 if len(samples) > 0: # if there is some samples left in the the dictionary results.append(_fit_predict(training_data, samples, tau, save_results, show)) # merge results of every iteration into a single tag ddfs = Disco().ddfs ddfs.tag(job.name, [[list(ddfs.blobs(tag))[0][0]] for tag in results]) return ["tag://" + job.name]
ValueError
dataset/ETHPy150Open romanorac/discomll/discomll/regression/locally_weighted_linear_regression.py/fit_predict
5,579
def write8(self, reg, value): "Writes an 8-bit value to the specified register/address" try: self.bus.write_byte_data(self.address, reg, value) if self.debug: print("I2C: Wrote 0x%02X to register 0x%02X" % (value, reg)) except __HOLE__ as err: return self.errMsg()
IOError
dataset/ETHPy150Open DexterInd/GrovePi/Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/Adafruit_I2C.py/Adafruit_I2C.write8
5,580
def write16(self, reg, value): "Writes a 16-bit value to the specified register/address pair" try: self.bus.write_word_data(self.address, reg, value) if self.debug: print ("I2C: Wrote 0x%02X to register pair 0x%02X,0x%02X" % (value, reg, reg+1)) except __HOLE__ as err: return self.errMsg()
IOError
dataset/ETHPy150Open DexterInd/GrovePi/Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/Adafruit_I2C.py/Adafruit_I2C.write16
5,581
def writeRaw8(self, value): "Writes an 8-bit value on the bus" try: self.bus.write_byte(self.address, value) if self.debug: print("I2C: Wrote 0x%02X" % value) except __HOLE__ as err: return self.errMsg()
IOError
dataset/ETHPy150Open DexterInd/GrovePi/Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/Adafruit_I2C.py/Adafruit_I2C.writeRaw8
5,582
def writeList(self, reg, list): "Writes an array of bytes using I2C format" try: if self.debug: print("I2C: Writing list to register 0x%02X:" % reg) print(list) self.bus.write_i2c_block_data(self.address, reg, list) except __HOLE__ as err: return self.errMsg()
IOError
dataset/ETHPy150Open DexterInd/GrovePi/Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/Adafruit_I2C.py/Adafruit_I2C.writeList
5,583
def readList(self, reg, length): "Read a list of bytes from the I2C device" try: results = self.bus.read_i2c_block_data(self.address, reg, length) if self.debug: print ("I2C: Device 0x%02X returned the following from reg 0x%02X" % (self.address, reg)) print(results) return results except __HOLE__ as err: return self.errMsg()
IOError
dataset/ETHPy150Open DexterInd/GrovePi/Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/Adafruit_I2C.py/Adafruit_I2C.readList
5,584
def readU8(self, reg): "Read an unsigned byte from the I2C device" try: result = self.bus.read_byte_data(self.address, reg) if self.debug: print("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" % (self.address, result & 0xFF, reg)) return result except __HOLE__ as err: return self.errMsg()
IOError
dataset/ETHPy150Open DexterInd/GrovePi/Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/Adafruit_I2C.py/Adafruit_I2C.readU8
5,585
def readS8(self, reg): "Reads a signed byte from the I2C device" try: result = self.bus.read_byte_data(self.address, reg) if result > 127: result -= 256 if self.debug: print("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" % (self.address, result & 0xFF, reg)) return result except __HOLE__ as err: return self.errMsg()
IOError
dataset/ETHPy150Open DexterInd/GrovePi/Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/Adafruit_I2C.py/Adafruit_I2C.readS8
5,586
def readU16(self, reg, little_endian=True): "Reads an unsigned 16-bit value from the I2C device" try: result = self.bus.read_word_data(self.address,reg) # Swap bytes if using big endian because read_word_data assumes little # endian on ARM (little endian) systems. if not little_endian: result = ((result << 8) & 0xFF00) + (result >> 8) if (self.debug): print("I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, result & 0xFFFF, reg)) return result except __HOLE__ as err: return self.errMsg()
IOError
dataset/ETHPy150Open DexterInd/GrovePi/Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/Adafruit_I2C.py/Adafruit_I2C.readU16
5,587
def readS16(self, reg, little_endian=True): "Reads a signed 16-bit value from the I2C device" try: result = self.readU16(reg,little_endian) if result > 32767: result -= 65536 return result except __HOLE__ as err: return self.errMsg()
IOError
dataset/ETHPy150Open DexterInd/GrovePi/Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/Adafruit_I2C.py/Adafruit_I2C.readS16
5,588
def geometry_string(s): """Get a X-style geometry definition and return a tuple. 600x400 -> (600,400) """ try: x_string, y_string = s.split('x') geometry = (int(x_string), int(y_string)) except __HOLE__: msg = "%s is not a valid geometry specification" %s raise argparse.ArgumentTypeError(msg) return geometry
ValueError
dataset/ETHPy150Open ASPP/pelita/tkviewer.py/geometry_string
5,589
@never_cache def activate(request, uidb64=None, token=None, template_name='users/activate.html', post_activation_redirect=None, current_app=None, extra_context=None): context = { 'title': _('Account activation '), } if post_activation_redirect is None: post_activation_redirect = reverse('users_activation_complete') UserModel = get_user_model() assert uidb64 is not None and token is not None token_generator = EmailActivationTokenGenerator() try: uid = urlsafe_base64_decode(uidb64) user = UserModel._default_manager.get(pk=uid) except (TypeError, __HOLE__, OverflowError, UserModel.DoesNotExist): user = None if user is not None and token_generator.check_token(user, token): user.activate() user_activated.send(sender=user.__class__, request=request, user=user) if settings.USERS_AUTO_LOGIN_ON_ACTIVATION: user.backend = 'django.contrib.auth.backends.ModelBackend' # todo - remove this hack login(request, user) messages.info(request, 'Thanks for registering. You are now logged in.') return redirect(post_activation_redirect) else: title = _('Email confirmation unsuccessful') context = { 'title': title, } if extra_context is not None: # pragma: no cover context.update(extra_context) return TemplateResponse(request, template_name, context, current_app=current_app)
ValueError
dataset/ETHPy150Open mishbahr/django-users2/users/views.py/activate
5,590
def get_cluster_info(): # The fallback constraints used for testing will come from the current # effective constraints. eff_constraints = dict(constraints.EFFECTIVE_CONSTRAINTS) # We'll update those constraints based on what the /info API provides, if # anything. global cluster_info global config try: conn = Connection(config) conn.authenticate() cluster_info.update(conn.cluster_info()) except (ResponseError, socket.error): # Failed to get cluster_information via /info API, so fall back on # test.conf data pass else: try: eff_constraints.update(cluster_info['swift']) except __HOLE__: # Most likely the swift cluster has "expose_info = false" set # in its proxy-server.conf file, so we'll just do the best we # can. print("** Swift Cluster not exposing /info **", file=sys.stderr) # Finally, we'll allow any constraint present in the swift-constraints # section of test.conf to override everything. Note that only those # constraints defined in the constraints module are converted to integers. test_constraints = get_config('swift-constraints') for k in constraints.DEFAULT_CONSTRAINTS: try: test_constraints[k] = int(test_constraints[k]) except KeyError: pass except ValueError: print("Invalid constraint value: %s = %s" % ( k, test_constraints[k]), file=sys.stderr) eff_constraints.update(test_constraints) # Just make it look like these constraints were loaded from a /info call, # even if the /info call failed, or when they are overridden by values # from the swift-constraints section of test.conf cluster_info['swift'] = eff_constraints
KeyError
dataset/ETHPy150Open openstack/swift/test/functional/__init__.py/get_cluster_info
5,591
def setup_package(): global policy_specified policy_specified = os.environ.get('SWIFT_TEST_POLICY') in_process_env = os.environ.get('SWIFT_TEST_IN_PROCESS') if in_process_env is not None: use_in_process = utils.config_true_value(in_process_env) else: use_in_process = None global in_process global config if use_in_process: # Explicitly set to True, so barrel on ahead with in-process # functional test setup. in_process = True # NOTE: No attempt is made to a read local test.conf file. else: if use_in_process is None: # Not explicitly set, default to using in-process functional tests # if the test.conf file is not found, or does not provide a usable # configuration. config.update(get_config('func_test')) if not config: in_process = True # else... leave in_process value unchanged. It may be that # setup_package is called twice, in which case in_process_setup may # have loaded config before we reach here a second time, so the # existence of config is not reliable to determine that in_process # should be False. Anyway, it's default value is False. else: # Explicitly set to False, do not attempt to use in-process # functional tests, be sure we attempt to read from local # test.conf file. in_process = False config.update(get_config('func_test')) if in_process: in_mem_obj_env = os.environ.get('SWIFT_TEST_IN_MEMORY_OBJ') in_mem_obj = utils.config_true_value(in_mem_obj_env) try: in_process_setup(the_object_server=( mem_object_server if in_mem_obj else object_server)) except InProcessException as exc: print(('Exception during in-process setup: %s' % str(exc)), file=sys.stderr) raise global web_front_end web_front_end = config.get('web_front_end', 'integral') global normalized_urls normalized_urls = config.get('normalized_urls', False) global orig_collate orig_collate = locale.setlocale(locale.LC_COLLATE) locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C')) global insecure insecure = config_true_value(config.get('insecure', False)) global swift_test_auth_version global swift_test_auth global swift_test_user global swift_test_key global swift_test_tenant global swift_test_perm global swift_test_domain global swift_test_service_prefix swift_test_service_prefix = None if config: swift_test_auth_version = str(config.get('auth_version', '1')) swift_test_auth = 'http' if config_true_value(config.get('auth_ssl', 'no')): swift_test_auth = 'https' if 'auth_prefix' not in config: config['auth_prefix'] = '/' try: suffix = '://%(auth_host)s:%(auth_port)s%(auth_prefix)s' % config swift_test_auth += suffix except KeyError: pass # skip if 'service_prefix' in config: swift_test_service_prefix = utils.append_underscore( config['service_prefix']) if swift_test_auth_version == "1": swift_test_auth += 'v1.0' try: if 'account' in config: swift_test_user[0] = '%(account)s:%(username)s' % config else: swift_test_user[0] = '%(username)s' % config swift_test_key[0] = config['password'] except KeyError: # bad config, no account/username configured, tests cannot be # run pass try: swift_test_user[1] = '%s%s' % ( '%s:' % config['account2'] if 'account2' in config else '', config['username2']) swift_test_key[1] = config['password2'] except KeyError: pass # old config, no second account tests can be run try: swift_test_user[2] = '%s%s' % ( '%s:' % config['account'] if 'account' in config else '', config['username3']) swift_test_key[2] = config['password3'] except KeyError: pass # old config, no third account tests can be run try: swift_test_user[4] = '%s%s' % ( '%s:' % config['account5'], config['username5']) swift_test_key[4] = config['password5'] swift_test_tenant[4] = config['account5'] except __HOLE__: pass # no service token tests can be run for _ in range(3): swift_test_perm[_] = swift_test_user[_] else: swift_test_user[0] = config['username'] swift_test_tenant[0] = config['account'] swift_test_key[0] = config['password'] swift_test_user[1] = config['username2'] swift_test_tenant[1] = config['account2'] swift_test_key[1] = config['password2'] swift_test_user[2] = config['username3'] swift_test_tenant[2] = config['account'] swift_test_key[2] = config['password3'] if 'username4' in config: swift_test_user[3] = config['username4'] swift_test_tenant[3] = config['account4'] swift_test_key[3] = config['password4'] swift_test_domain[3] = config['domain4'] if 'username5' in config: swift_test_user[4] = config['username5'] swift_test_tenant[4] = config['account5'] swift_test_key[4] = config['password5'] if 'username6' in config: swift_test_user[5] = config['username6'] swift_test_tenant[5] = config['account6'] swift_test_key[5] = config['password6'] for _ in range(5): swift_test_perm[_] = swift_test_tenant[_] + ':' \ + swift_test_user[_] global skip skip = not all([swift_test_auth, swift_test_user[0], swift_test_key[0]]) if skip: print('SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG', file=sys.stderr) global skip2 skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]]) if not skip and skip2: print('SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS ' 'DUE TO NO CONFIG FOR THEM', file=sys.stderr) global skip3 skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]]) if not skip and skip3: print('SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS' 'DUE TO NO CONFIG FOR THEM', file=sys.stderr) global skip_if_not_v3 skip_if_not_v3 = (swift_test_auth_version != '3' or not all([not skip, swift_test_user[3], swift_test_key[3]])) if not skip and skip_if_not_v3: print('SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3', file=sys.stderr) global skip_service_tokens skip_service_tokens = not all([not skip, swift_test_user[4], swift_test_key[4], swift_test_tenant[4], swift_test_service_prefix]) if not skip and skip_service_tokens: print( 'SKIPPING FUNCTIONAL TESTS SPECIFIC TO SERVICE TOKENS', file=sys.stderr) if policy_specified: policies = FunctionalStoragePolicyCollection.from_info() for p in policies: # policy names are case-insensitive if policy_specified.lower() == p['name'].lower(): _info('Using specified policy %s' % policy_specified) FunctionalStoragePolicyCollection.policy_specified = p Container.policy_specified = policy_specified break else: _info( 'SKIPPING FUNCTIONAL TESTS: Failed to find specified policy %s' % policy_specified) raise Exception('Failed to find specified policy %s' % policy_specified) global skip_if_no_reseller_admin skip_if_no_reseller_admin = not all([not skip, swift_test_user[5], swift_test_key[5], swift_test_tenant[5]]) if not skip and skip_if_no_reseller_admin: print( 'SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG FOR RESELLER ADMIN', file=sys.stderr) get_cluster_info()
KeyError
dataset/ETHPy150Open openstack/swift/test/functional/__init__.py/setup_package
5,592
def load_constraint(name): global cluster_info try: c = cluster_info['swift'][name] except __HOLE__: raise SkipTest("Missing constraint: %s" % name) if not isinstance(c, int): raise SkipTest("Bad value, %r, for constraint: %s" % (c, name)) return c
KeyError
dataset/ETHPy150Open openstack/swift/test/functional/__init__.py/load_constraint
5,593
@classmethod def from_info(cls, info=None): if not (info or cluster_info): get_cluster_info() info = info or cluster_info try: policy_info = info['swift']['policies'] except __HOLE__: raise AssertionError('Did not find any policy info in %r' % info) policies = cls(policy_info) assert policies.default, \ 'Did not find default policy in %r' % policy_info return policies
KeyError
dataset/ETHPy150Open openstack/swift/test/functional/__init__.py/FunctionalStoragePolicyCollection.from_info
5,594
def requires_policies(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): if skip: raise SkipTest try: self.policies = FunctionalStoragePolicyCollection.from_info() except __HOLE__: raise SkipTest("Unable to determine available policies") if len(self.policies) < 2: raise SkipTest("Multiple policies not enabled") return f(self, *args, **kwargs) return wrapper
AssertionError
dataset/ETHPy150Open openstack/swift/test/functional/__init__.py/requires_policies
5,595
def run(self, end_time, delay=False): # <1> """Schedule and display events until time is up""" # schedule the first event for each cab for _, proc in sorted(self.procs.items()): # <2> first_event = next(proc) # <3> self.events.put(first_event) # <4> # main loop of the simulation sim_time = 0 # <5> while sim_time < end_time: # <6> if self.events.empty(): # <7> print('*** end of events ***') break # get and display current event current_event = self.events.get() # <8> if delay: time.sleep((current_event.time - sim_time) / 2) # update the simulation time sim_time, proc_id, previous_action = current_event print('taxi:', proc_id, proc_id * ' ', current_event) active_proc = self.procs[proc_id] # schedule next action for current proc next_time = sim_time + compute_duration(previous_action) try: next_event = active_proc.send(next_time) # <12> except __HOLE__: del self.procs[proc_id] # <13> else: self.events.put(next_event) # <14> else: # <15> msg = '*** end of simulation time: {} events pending ***' print(msg.format(self.events.qsize())) # END TAXI_SIMULATOR
StopIteration
dataset/ETHPy150Open fluentpython/example-code/16-coroutine/taxi_sim_delay.py/Simulator.run
5,596
def blit_to_texture(self, target, level, x, y, z, internalformat=None): '''Draw this image to to the currently bound texture at `target`. This image's anchor point will be aligned to the given `x` and `y` coordinates. If the currently bound texture is a 3D texture, the `z` parameter gives the image slice to blit into. If `internalformat` is specified, glTexImage is used to initialise the texture; otherwise, glTexSubImage is used to update a region. ''' x -= self.anchor_x y -= self.anchor_y data_format = self.format data_pitch = abs(self._current_pitch) # Determine pixel format from format string matrix = None format, type = self._get_gl_format_and_type(data_format) if format is None: if (len(data_format) in (3, 4) and gl_info.have_extension('GL_ARB_imaging')): # Construct a color matrix to convert to GL_RGBA def component_column(component): try: pos = 'RGBA'.index(component) return [0] * pos + [1] + [0] * (3 - pos) except __HOLE__: return [0, 0, 0, 0] # pad to avoid index exceptions lookup_format = data_format + 'XXX' matrix = (component_column(lookup_format[0]) + component_column(lookup_format[1]) + component_column(lookup_format[2]) + component_column(lookup_format[3])) format = { 3: GL_RGB, 4: GL_RGBA}.get(len(data_format)) type = GL_UNSIGNED_BYTE glMatrixMode(GL_COLOR) glPushMatrix() glLoadMatrixf((GLfloat * 16)(*matrix)) else: # Need to convert data to a standard form data_format = { 1: 'L', 2: 'LA', 3: 'RGB', 4: 'RGBA'}.get(len(data_format)) format, type = self._get_gl_format_and_type(data_format) # Workaround: don't use GL_UNPACK_ROW_LENGTH if gl.current_context._workaround_unpack_row_length: data_pitch = self.width * len(data_format) # Get data in required format (hopefully will be the same format it's # already in, unless that's an obscure format, upside-down or the # driver is old). data = self._convert(data_format, data_pitch) if data_pitch & 0x1: alignment = 1 elif data_pitch & 0x2: alignment = 2 else: alignment = 4 row_length = data_pitch // len(data_format) glPushClientAttrib(GL_CLIENT_PIXEL_STORE_BIT) glPixelStorei(GL_UNPACK_ALIGNMENT, alignment) glPixelStorei(GL_UNPACK_ROW_LENGTH, row_length) self._apply_region_unpack() if target == GL_TEXTURE_3D: assert not internalformat glTexSubImage3D(target, level, x, y, z, self.width, self.height, 1, format, type, data) elif internalformat: glTexImage2D(target, level, internalformat, self.width, self.height, 0, format, type, data) else: glTexSubImage2D(target, level, x, y, self.width, self.height, format, type, data) glPopClientAttrib() if matrix: glPopMatrix() glMatrixMode(GL_MODELVIEW) # Flush image upload before data get GC'd. glFlush()
ValueError
dataset/ETHPy150Open ardekantur/pyglet/pyglet/image/__init__.py/ImageData.blit_to_texture
5,597
def _convert(self, format, pitch): '''Return data in the desired format; does not alter this instance's current format or pitch. ''' if format == self._current_format and pitch == self._current_pitch: return self._current_data self._ensure_string_data() data = self._current_data current_pitch = self._current_pitch current_format = self._current_format sign_pitch = current_pitch // abs(current_pitch) if format != self._current_format: # Create replacement string, e.g. r'\4\1\2\3' to convert RGBA to # ARGB repl = '' for c in format: try: idx = current_format.index(c) + 1 except __HOLE__: idx = 1 repl += r'\%d' % idx if len(current_format) == 1: swap_pattern = self._swap1_pattern elif len(current_format) == 2: swap_pattern = self._swap2_pattern elif len(current_format) == 3: swap_pattern = self._swap3_pattern elif len(current_format) == 4: swap_pattern = self._swap4_pattern else: raise ImageException( 'Current image format is wider than 32 bits.') packed_pitch = self.width * len(current_format) if abs(self._current_pitch) != packed_pitch: # Pitch is wider than pixel data, need to go row-by-row. rows = re.findall( '.' * abs(self._current_pitch), data, re.DOTALL) rows = [swap_pattern.sub(repl, r[:packed_pitch]) for r in rows] data = ''.join(rows) else: # Rows are tightly packed, apply regex over whole image. data = swap_pattern.sub(repl, data) # After conversion, rows will always be tightly packed current_pitch = sign_pitch * (len(format) * self.width) if pitch != current_pitch: diff = abs(current_pitch) - abs(pitch) if diff > 0: # New pitch is shorter than old pitch, chop bytes off each row pattern = re.compile( '(%s)%s' % ('.' * abs(pitch), '.' * diff), re.DOTALL) data = pattern.sub(r'\1', data) elif diff < 0: # New pitch is longer than old pitch, add '0' bytes to each row pattern = re.compile( '(%s)' % ('.' * abs(current_pitch)), re.DOTALL) pad = '.' * -diff data = pattern.sub(r'\1%s' % pad, data) if current_pitch * pitch < 0: # Pitch differs in sign, swap row order rows = re.findall(asbytes('.') * abs(pitch), data, re.DOTALL) rows.reverse() data = asbytes('').join(rows) return data
ValueError
dataset/ETHPy150Open ardekantur/pyglet/pyglet/image/__init__.py/ImageData._convert
5,598
def get_authorization(self, req, chal): try: realm = chal['realm'] nonce = chal['nonce'] qop = chal.get('qop') algorithm = chal.get('algorithm', 'MD5') # mod_digest doesn't send an opaque, even though it isn't # supposed to be optional opaque = chal.get('opaque', None) except __HOLE__: return None H, KD = self.get_algorithm_impls(algorithm) if H is None: return None user, pw = self.passwd.find_user_password(realm, req.get_full_url()) if user is None: return None # XXX not implemented yet if req.has_data(): entdig = self.get_entity_digest(req.get_data(), chal) else: entdig = None A1 = "%s:%s:%s" % (user, realm, pw) A2 = "%s:%s" % (req.get_method(), # XXX selector: what about proxies and full urls req.get_selector()) if qop == 'auth': self.nonce_count += 1 ncvalue = '%08x' % self.nonce_count cnonce = self.get_cnonce(nonce) noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2)) respdig = KD(H(A1), noncebit) elif qop is None: respdig = KD(H(A1), "%s:%s" % (nonce, H(A2))) else: # XXX handle auth-int. pass # XXX should the partial digests be encoded too? base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ 'response="%s"' % (user, realm, nonce, req.get_selector(), respdig) if opaque: base += ', opaque="%s"' % opaque if entdig: base += ', digest="%s"' % entdig base += ', algorithm="%s"' % algorithm if qop: base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) return base
KeyError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/samples-and-tests/i-am-a-developer/mechanize/_auth.py/AbstractDigestAuthHandler.get_authorization
5,599
def percentiles(self, percentiles): """Given a list of percentiles (floats between 0 and 1), return a list of the values at those percentiles, interpolating if necessary.""" try: scores = [0.0]*len(percentiles) if self.count > 0: values = self.samples() values.sort() for i in range(len(percentiles)): p = percentiles[i] pos = p * (len(values) + 1) if pos < 1: scores[i] = values[0] elif pos > len(values): scores[i] = values[-1] else: upper, lower = values[int(pos - 1)], values[int(pos)] scores[i] = lower + (pos - floor(pos)) * (upper - lower) return scores except __HOLE__: return [float('NaN')] * len(percentiles)
IndexError
dataset/ETHPy150Open Cue/scales/src/greplin/scales/samplestats.py/Sampler.percentiles