Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
2,800
def on_name(self, node): # ('id', 'ctx') """ Name node """ ctx = node.ctx.__class__ if ctx == ast.Del: val = self.symtable.del_symbol(node.id) elif ctx == ast.Param: # for Function Def val = str(node.id) else: # val = self.symtable.get_symbol(node.id) try: val = self.symtable.get_symbol(node.id) except (__HOLE__, LookupError): msg = "name '%s' is not defined" % node.id self.raise_exception(node, msg=msg) return val
NameError
dataset/ETHPy150Open xraypy/xraylarch/lib/interpreter.py/Interpreter.on_name
2,801
def on_attribute(self, node): # ('value', 'attr', 'ctx') "extract attribute" ctx = node.ctx.__class__ if ctx == ast.Del: return delattr(sym, node.attr) sym = self.run(node.value) if node.attr not in UNSAFE_ATTRS: try: return getattr(sym, node.attr) except __HOLE__: pass obj = self.run(node.value) fmt = "%s does not have member '%s'" if not isgroup(obj): obj = obj.__class__ fmt = "%s does not have attribute '%s'" msg = fmt % (obj, node.attr) self.raise_exception(node, exc=AttributeError, msg=msg)
AttributeError
dataset/ETHPy150Open xraypy/xraylarch/lib/interpreter.py/Interpreter.on_attribute
2,802
def sync(opts, form, saltenv=None): ''' Sync custom modules into the extension_modules directory ''' if saltenv is None: saltenv = ['base'] if isinstance(saltenv, six.string_types): saltenv = saltenv.split(',') ret = [] remote = set() source = salt.utils.url.create('_' + form) mod_dir = os.path.join(opts['extension_modules'], '{0}'.format(form)) cumask = os.umask(0o77) try: if not os.path.isdir(mod_dir): log.info('Creating module dir \'{0}\''.format(mod_dir)) try: os.makedirs(mod_dir) except (IOError, __HOLE__): log.error( 'Cannot create cache module directory {0}. Check ' 'permissions.'.format(mod_dir) ) fileclient = salt.fileclient.get_file_client(opts) for sub_env in saltenv: log.info( 'Syncing {0} for environment \'{1}\''.format(form, sub_env) ) cache = [] log.info( 'Loading cache from {0}, for {1})'.format(source, sub_env) ) # Grab only the desired files (.py, .pyx, .so) cache.extend( fileclient.cache_dir( source, sub_env, include_empty=False, include_pat=r'E@\.(pyx?|so|zip)$', exclude_pat=None ) ) local_cache_dir = os.path.join( opts['cachedir'], 'files', sub_env, '_{0}'.format(form) ) log.debug('Local cache dir: \'{0}\''.format(local_cache_dir)) for fn_ in cache: relpath = os.path.relpath(fn_, local_cache_dir) relname = os.path.splitext(relpath)[0].replace(os.sep, '.') remote.add(relpath) dest = os.path.join(mod_dir, relpath) log.info('Copying \'{0}\' to \'{1}\''.format(fn_, dest)) if os.path.isfile(dest): # The file is present, if the sum differs replace it hash_type = opts.get('hash_type', 'md5') src_digest = salt.utils.get_hash(fn_, hash_type) dst_digest = salt.utils.get_hash(dest, hash_type) if src_digest != dst_digest: # The downloaded file differs, replace! shutil.copyfile(fn_, dest) ret.append('{0}.{1}'.format(form, relname)) else: dest_dir = os.path.dirname(dest) if not os.path.isdir(dest_dir): os.makedirs(dest_dir) shutil.copyfile(fn_, dest) ret.append('{0}.{1}'.format(form, relname)) touched = bool(ret) if opts.get('clean_dynamic_modules', True): current = set(_listdir_recursively(mod_dir)) for fn_ in current - remote: full = os.path.join(mod_dir, fn_) if os.path.isfile(full): touched = True os.remove(full) # Cleanup empty dirs while True: emptydirs = _list_emptydirs(mod_dir) if not emptydirs: break for emptydir in emptydirs: touched = True shutil.rmtree(emptydir, ignore_errors=True) except Exception as exc: log.error('Failed to sync {0} module: {1}'.format(form, exc)) finally: os.umask(cumask) return ret, touched
OSError
dataset/ETHPy150Open saltstack/salt/salt/utils/extmods.py/sync
2,803
def test_invalid_annodb_key_str(self): """ The invalid key should be mentioned in the KeyError... """ try: self.tdb.annodb['fooBar'] assert 0, "should not reach this point" except __HOLE__, e: assert 'fooBar' in str(e)
KeyError
dataset/ETHPy150Open cjlee112/pygr/tests/translationDB_test.py/TranslationDB_Test.test_invalid_annodb_key_str
2,804
def column_sql(self, table_name, field_name, field, tablespace='', with_name=True, field_prepared=False): """ Creates the SQL snippet for a column. Used by add_column and add_table. """ # If the field hasn't already been told its attribute name, do so. if not field_prepared: field.set_attributes_from_name(field_name) # hook for the field to do any resolution prior to it's attributes being queried if hasattr(field, 'south_init'): field.south_init() # Possible hook to fiddle with the fields (e.g. defaults & TEXT on MySQL) field = self._field_sanity(field) try: sql = field.db_type(connection=self._get_connection()) except __HOLE__: sql = field.db_type() if sql: # Some callers, like the sqlite stuff, just want the extended type. if with_name: field_output = [self.quote_name(field.column), sql] else: field_output = [sql] if field.primary_key: field_output.append('NOT NULL PRIMARY KEY') elif field.unique: # Just use UNIQUE (no indexes any more, we have delete_unique) field_output.append('UNIQUE') sql = ' '.join(field_output) sqlparams = () # if the field is "NOT NULL" and a default value is provided, create the column with it # this allows the addition of a NOT NULL field to a table with existing rows if not getattr(field, '_suppress_default', False): if field.has_default(): default = field.get_default() # If the default is actually None, don't add a default term if default is not None: # If the default is a callable, then call it! if callable(default): default = default() # Now do some very cheap quoting. TODO: Redesign return values to avoid this. if isinstance(default, string_types): default = "'%s'" % default.replace("'", "''") elif isinstance(default, (datetime.date, datetime.time, datetime.datetime)): default = "'%s'" % default elif isinstance(default, bool): default = int(default) # Escape any % signs in the output (bug #317) if isinstance(default, string_types): default = default.replace("%", "%%") # Add it in sql += " DEFAULT %s" sqlparams = (default) elif (not field.null and field.blank) or (field.get_default() == ''): if field.empty_strings_allowed and self._get_connection().features.interprets_empty_strings_as_nulls: sql += " DEFAULT ''" # Error here would be nice, but doesn't seem to play fair. #else: # raise ValueError("Attempting to add a non null column that isn't character based without an explicit default value.") # Firebird need set not null after of default value keyword if not field.primary_key and not field.null: sql += ' NOT NULL' if field.rel and self.supports_foreign_keys: self.add_deferred_sql( self.foreign_key_sql( table_name, field.column, field.rel.to._meta.db_table, field.rel.to._meta.get_field(field.rel.field_name).column ) ) # Things like the contrib.gis module fields have this in 1.1 and below if hasattr(field, 'post_create_sql'): for stmt in field.post_create_sql(no_style(), table_name): self.add_deferred_sql(stmt) # Avoid double index creation (#1317) # Firebird creates an index implicity for each foreign key field # sql_indexes_for_field tries to create an index for that field too if not field.rel: # In 1.2 and above, you have to ask the DatabaseCreation stuff for it. # This also creates normal indexes in 1.1. if hasattr(self._get_connection().creation, "sql_indexes_for_field"): # Make a fake model to pass in, with only db_table model = self.mock_model("FakeModelForGISCreation", table_name) for stmt in self._get_connection().creation.sql_indexes_for_field(model, field, no_style()): self.add_deferred_sql(stmt) if sql: return sql % sqlparams else: return None
TypeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/db/firebird.py/DatabaseOperations.column_sql
2,805
def _drop_constraints(self, table_name, name, field): if self.has_check_constraints: check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK") for constraint in check_constraints: self.execute(self.delete_check_sql % { 'table': self.quote_name(table_name), 'constraint': self.quote_name(constraint), }) # Drop or add UNIQUE constraint unique_constraint = list(self._constraints_affecting_columns(table_name, [name], "UNIQUE")) if field.unique and not unique_constraint: self.create_unique(table_name, [name]) elif not field.unique and unique_constraint: self.delete_unique(table_name, [name]) # Drop all foreign key constraints try: self.delete_foreign_key(table_name, name) except __HOLE__: # There weren't any pass
ValueError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/db/firebird.py/DatabaseOperations._drop_constraints
2,806
def run_only_if_pyutmp_is_available(func): try: import pyutmp except ImportError: pyutmp = None try: import utmp except __HOLE__: utmp = None pred = lambda: pyutmp is not None or utmp is not None return run_only(func, pred)
ImportError
dataset/ETHPy150Open BrightcoveOS/Diamond/src/collectors/users/test/testusers.py/run_only_if_pyutmp_is_available
2,807
def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except __HOLE__: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds)
AttributeError
dataset/ETHPy150Open PythonCharmers/python-future/docs/3rd-party-py3k-compat-code/pandas_py3k.py/_OrderedDict.__init__
2,808
def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in itervalues(self.__map): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except __HOLE__: pass dict.clear(self)
AttributeError
dataset/ETHPy150Open PythonCharmers/python-future/docs/3rd-party-py3k-compat-code/pandas_py3k.py/_OrderedDict.clear
2,809
def execute(self): """ Runs all cases and records results in `recorder`. Uses :meth:`setup` and :meth:`resume` with default arguments. """ self._setup() try: if self.sequential: self._logger.info('Start sequential evaluation.') server = self._servers[None] = self._seq_server server.top = self.parent while self._iter is not None: try: case = self._iter.next() self._todo.append(case) server.exception = None server.case = None server.state = _LOADING # 'server' already loaded. while self._server_ready(server): pass except __HOLE__: if not self._rerun: self._iter = None break else: self._logger.info('Start concurrent evaluation.') self._start() finally: self._cleanup() if self._abort_exc is not None: msg = "%s: Run aborted: %s" % (self.get_pathname(), traceback_str(self._abort_exc)) newexc = self._abort_exc[0](msg) raise self._abort_exc[0], newexc, self._abort_exc[2]
StopIteration
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/drivers/caseiterdriver.py/CaseIteratorDriver.execute
2,810
def _start(self): """ Start evaluating cases concurrently. """ # Need credentials in case we're using a PublicKey server. credentials = get_credentials() # Determine maximum number of servers available. resources = { 'required_distributions': self._egg_required_distributions, 'orphan_modules': self._egg_orphan_modules, 'python_version': sys.version[:3] } if self.extra_resources: resources.update(self.extra_resources) max_servers = RAM.max_servers(resources) self._logger.debug('max_servers %d', max_servers) if max_servers <= 0: msg = 'No servers supporting required resources %s' % resources self.raise_exception(msg, RuntimeError) # Kick off initial wave of cases. self._server_lock = threading.Lock() self._reply_q = Queue.Queue() self._generation += 1 n_servers = 0 while n_servers < max_servers: if not self._more_to_go(): break # Get next case. Limits servers started if max_servers > cases. try: case = self._iter.next() except __HOLE__: if not self._rerun: self._iter = None break self._todo.append(case) # Start server worker thread. n_servers += 1 name = '%s_%d_%d' % (self.name, self._generation, n_servers) self._logger.debug('starting worker for %r', name) server = self._servers[name] = _ServerData(name) server.in_use = True server_thread = threading.Thread(target=self._service_loop, args=(name, resources, credentials, self._reply_q)) server_thread.daemon = True try: server_thread.start() except thread.error: self._logger.warning('worker thread startup failed for %r', name) server.in_use = False break if sys.platform != 'win32': # Process any pending events. while self._busy(): try: name, result, exc = self._reply_q.get(True, 0.01) except Queue.Empty: break # Timeout. else: # Difficult to force startup failure. if server.server is None: # pragma nocover self._logger.debug('server startup failed for %r', name) server.in_use = False else: server.in_use = self._server_ready(server) if sys.platform == 'win32': # pragma no cover # Don't start server processing until all servers are started, # otherwise we have egg removal issues. for i in self._servers: name, result, exc = self._reply_q.get() server = self._servers[name] if server.server is None: self._logger.debug('server startup failed for %r', name) server.in_use = False # Kick-off started servers. for server in self._servers.values(): if server.in_use: server.in_use = self._server_ready(server) # Continue until no servers are busy. while self._busy(): if self._more_to_go(): timeout = None else: # Don't wait indefinitely for a server we don't need. # This has happened with a server that got 'lost' # in RAM.allocate() timeout = 60 try: name, result, exc = self._reply_q.get(timeout=timeout) # Hard to force worker to hang, which is handled here. except Queue.Empty: # pragma no cover msgs = [] for name, server in self._servers.items(): if server.in_use: if server.server is None or server.info is None: msgs.append('%r: no startup reply' % name) server.in_use = False else: state = server.state if state not in (_LOADING, _EXECUTING): msgs.append('%r: %r %s %s' % (name, server.server, state, server.info)) server.in_use = False if msgs: self._logger.error('Timeout waiting with nothing left to do:') for msg in msgs: self._logger.error(' %s', msg) else: server = self._servers[name] server.in_use = self._server_ready(server) # Shut-down (started) servers. self._logger.debug('Shut-down (started) servers') n_queues = 0 for server in self._servers.values(): if server.queue is not None: server.queue.put(None) n_queues += 1 for i in range(n_queues): try: name, status, exc = self._reply_q.get(True, 60) # Hard to force worker to hang, which is handled here. except Queue.Empty: # pragma no cover pass else: self._servers[name].queue = None # Hard to force worker to hang, which is handled here. for server in self._servers.values(): # pragma no cover if server.queue is not None: self._logger.warning('Timeout waiting for %r to shut-down.', server.name)
StopIteration
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/drivers/caseiterdriver.py/CaseIteratorDriver._start
2,811
def _server_ready(self, server): """ Responds to asynchronous callbacks during :meth:`execute` to run cases retrieved from `self._iter`. Results are processed by `recorder`. Returns True if this server is still in use. """ state = server.state self._logger.debug('server %r state %s', server.name, state) in_use = True if state == _LOADING: if server.exception is None: in_use = self._start_next_case(server) else: typ, exc, tback = server.exception self._logger.debug(' exception while loading: %r', exc) if self.error_policy == 'ABORT': if self._abort_exc is None: self._abort_exc = server.exception self._stop = True server.state = _EMPTY in_use = False else: server.load_failures += 1 if server.load_failures < 3: in_use = self._start_processing(server) else: self._logger.debug(' too many load failures') server.state = _EMPTY in_use = False elif state == _EXECUTING: case = server.case server.case = None if server.exception is None: # Grab the results from the model and record. try: self._record_case(server.top, case) except Exception as exc: msg = 'Exception recording case: %s' % exc self._logger.debug(' %s', msg) self._logger.debug('%s', case) case.msg = '%s: %s' % (self.get_pathname(), msg) else: self._logger.debug(' exception while executing: %r', server.exception[1]) case.exc = server.exception if case.exc is not None: if self.error_policy == 'ABORT': if self._abort_exc is None: self._abort_exc = case.exc self._stop = True elif case.retries < self.max_retries: case.exc = None case.retries += 1 self._rerun.append(case) else: self._logger.error('Too many retries for %s', case) # Set up for next case. in_use = self._start_processing(server, reload=True) elif state == _EMPTY: if server.name is None or server.queue is not None: if self._more_to_go(): if server.queue is not None: self._logger.debug(' load_model') server.load_failures = 0 self._load_model(server) server.state = _LOADING else: self._logger.debug(' no more cases') in_use = False # Difficult to force startup failure. else: # pragma nocover in_use = False # Never started. # Just being defensive, should never happen. else: # pragma no cover msg = 'unexpected state %r for server %r' % (state, server.name) self._logger.error(msg) if self.error_policy == 'ABORT': if self._abort_exc is None: try: raise RuntimeError(msg) except __HOLE__: self._abort_exc = sys.exc_info() self._stop = True in_use = False return in_use
RuntimeError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/drivers/caseiterdriver.py/CaseIteratorDriver._server_ready
2,812
def _start_next_case(self, server): """ Look for the next case and start it. """ if self._todo: self._logger.debug(' run startup case') case = self._todo.pop(0) in_use = self._run_case(case, server) elif self._rerun: self._logger.debug(' rerun case') case = self._rerun.pop(0) in_use = self._run_case(case, server) elif self._iter is None: self._logger.debug(' no more cases') in_use = False else: try: case = self._iter.next() except __HOLE__: self._logger.debug(' no more cases') self._iter = None in_use = False else: self._logger.debug(' run next case') in_use = self._run_case(case, server) return in_use
StopIteration
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/drivers/caseiterdriver.py/CaseIteratorDriver._start_next_case
2,813
def InsertVideo(self, album_or_uri, video, filename_or_handle, content_type='image/jpeg'): """Copy of InsertPhoto which removes protections since it *should* work""" try: assert(isinstance(video, VideoEntry)) except AssertionError: raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, 'body':'`video` must be a gdata.photos.VideoEntry instance', 'reason':'Found %s, not PhotoEntry' % type(video) }) try: majtype, mintype = content_type.split('/') #assert(mintype in SUPPORTED_UPLOAD_TYPES) except (ValueError, __HOLE__): raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE, 'body':'This is not a valid content type: %s' % content_type, 'reason':'Accepted content types:' }) if isinstance(filename_or_handle, (str, unicode)) and \ os.path.exists(filename_or_handle): # it's a file name mediasource = gdata.MediaSource() mediasource.setFile(filename_or_handle, content_type) elif hasattr(filename_or_handle, 'read'):# it's a file-like resource if hasattr(filename_or_handle, 'seek'): filename_or_handle.seek(0) # rewind pointer to the start of the file # gdata.MediaSource needs the content length, so read the whole image file_handle = StringIO.StringIO(filename_or_handle.read()) name = 'image' if hasattr(filename_or_handle, 'name'): name = filename_or_handle.name mediasource = gdata.MediaSource(file_handle, content_type, content_length=file_handle.len, file_name=name) else: #filename_or_handle is not valid raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, 'body':'`filename_or_handle` must be a path name or a file-like object', 'reason':'Found %s, not path name or object with a .read() method' % \ type(filename_or_handle) }) if isinstance(album_or_uri, (str, unicode)): # it's a uri feed_uri = album_or_uri elif hasattr(album_or_uri, 'GetFeedLink'): # it's a AlbumFeed object feed_uri = album_or_uri.GetFeedLink().href try: return self.Post(video, uri=feed_uri, media_source=mediasource, converter=None) except gdata.service.RequestError, e: raise GooglePhotosException(e.args[0])
AssertionError
dataset/ETHPy150Open jackpal/picasawebuploader/main.py/InsertVideo
2,814
def _float_or_none(value): try: return float(value) except __HOLE__: return None
ValueError
dataset/ETHPy150Open seatgeek/graphite-pager/graphitepager/graphite_data_record.py/_float_or_none
2,815
def get_by_natural_key(self, hashval=None, **kwargs): qs = self.filter_by_natural_key(hashval, **kwargs) real_hashval = qs._hashval try: return qs.get() except MultipleObjectsReturned, error: raise MultipleObjectsReturned('Duplicate natural keys found! Lookup parameters were %s. Natural key hash is: %s' % (hashval or kwargs, real_hashval)) except __HOLE__, error: raise ObjectDoesNotExist('Natural key not found! Lookup paramets were %s. Natural key hash is: %s' % (hashval or kwargs, real_hashval))
ObjectDoesNotExist
dataset/ETHPy150Open zbyte64/django-dockit/dockit/schema/manager.py/Manager.get_by_natural_key
2,816
def load_app_yaml(self, path=None): # default to the initialized path set with command args if path is None: self.set_app_path(self.app_path) else: self.set_app_path(path) try: with open(self.app_yaml_path, 'r') as yaml_file: self.app_yaml = yaml.safe_load(yaml_file.read()) except __HOLE__: logger.error('Cannot load %s' % self.app_yaml_path) sys.exit(2) except yaml.YAMLError as e: logger.error('Cannot parse yaml file %s' % self.app_yaml_path) sys.exit(2)
IOError
dataset/ETHPy150Open glennyonemitsu/MarkupHiveSDK/sdklib/wsgi.py/DynamicDispatcher.load_app_yaml
2,817
def __call__(self): logger.info('Running source code watcher') while True: app_path = os.path.abspath(self.path) css_path = os.path.join(app_path, 'static', 'css') js_path = os.path.join(app_path, 'static', 'js') static_files = [f for f in glob.glob(css_path + '/*') if os.path.isfile(f)] static_files += [f for f in glob.glob(css_path + '/**/*') if os.path.isfile(f)] static_files += [f for f in glob.glob(js_path + '/*') if os.path.isfile(f)] static_files += [f for f in glob.glob(js_path + '/**/*') if os.path.isfile(f)] for f in static_files: try: mtime = os.path.getmtime(f) if f not in self.statics or mtime > self.statics[f]['mtime']: if f in self.statics and mtime > self.statics[f]['mtime']: logger.debug('Found new file update for: {0}'.format(f)) if f.startswith(css_path): data = self.process_css(f) elif f.startswith(js_path): data = self.process_js(f) self.statics[f] = {'mtime': mtime, 'data': data} except __HOLE__ as e: # ignoring OS Errors since it could be an editor creating # scratch files momentarily pass time.sleep(1.0)
OSError
dataset/ETHPy150Open glennyonemitsu/MarkupHiveSDK/sdklib/wsgi.py/SourceWatcher.__call__
2,818
def get_dataflow_docstring(): """Get docstring for Dataflow module and give it an rST title.""" init_file_path = os.path.normpath('./google/cloud/dataflow/__init__.py') try: with open(init_file_path, 'r') as init_file: init_file_contents = init_file.read() except __HOLE__: return None doc_match = re.search(r'"""(.*)"""', init_file_contents, flags=re.DOTALL) if not doc_match: return None docstring = doc_match.group(1).rstrip() title_match = re.match(r'(.*)\.\n\n', docstring) if title_match: # A module docstring has a first line that ends with a period and has a # blank line after it. reStructuredText, the format used by setuptools # (and other Python API documentation tools), wants no trailing period # and a highlighting line of equal signs under the title line. # Convert by removing the period and adding a highlighting line. equalsigns_fill_format = '\n{:=^%d}\n' % title_match.end(1) title_underline = equalsigns_fill_format.format('=') docstring = re.sub(r'\.\n', title_underline, docstring, count=1) return docstring
IOError
dataset/ETHPy150Open GoogleCloudPlatform/DataflowPythonSDK/setup.py/get_dataflow_docstring
2,819
def catkin_main(sysargs): # Initialize config try: initialize_config() except __HOLE__ as exc: sys.exit("Failed to initialize config: {0}".format(exc)) # Create a top level parser parser = argparse.ArgumentParser( description="catkin command", formatter_class=argparse.RawDescriptionHelpFormatter) add = parser.add_argument add('-a', '--list-aliases', action="store_true", default=False, help="Lists the current verb aliases and then quits, all other arguments are ignored") add('--test-colors', action='store_true', default=False, help="Prints a color test pattern to the screen and then quits, all other arguments are ignored") add('--version', action='store_true', default=False, help="Prints the catkin_tools version.") color_control_group = parser.add_mutually_exclusive_group() add = color_control_group.add_argument add('--force-color', action='store_true', default=False, help='Forces catkin to output in color, even when the terminal does not appear to support it.') add('--no-color', action='store_true', default=False, help='Forces catkin to not use color in the output, regardless of the detect terminal type.') # Deprecated, moved to `catkin locate --shell-verbs add('--locate-extra-shell-verbs', action='store_true', help=argparse.SUPPRESS) # Generate a list of verbs available verbs = list_verbs() # Create the subparsers for each verb and collect the argument preprocessors argument_preprocessors = create_subparsers(parser, verbs) # Get verb aliases verb_aliases = get_verb_aliases() # Setup sysargs sysargs = sys.argv[1:] if sysargs is None else sysargs # Get colors config no_color = False force_color = os.environ.get('CATKIN_TOOLS_FORCE_COLOR', False) for arg in sysargs: if arg == '--no-color': no_color = True if arg == '--force-color': force_color = True if no_color or not force_color and not is_tty(sys.stdout): set_color(False) # Check for version if '--version' in sysargs: print('catkin_tools {} (C) 2014-{} Open Source Robotics Foundation'.format( pkg_resources.get_distribution('catkin_tools').version, date.today().year) ) print('catkin_tools is released under the Apache License,' ' Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)') print('---') print('Using Python {}'.format(''.join(sys.version.split('\n')))) sys.exit(0) # Deprecated option if '--locate-extra-shell-verbs' in sysargs: print('Please use `catkin locate --shell-verbs` instead of `catkin --locate-extra-shell-verbs`', file=sys.stderr) sys.exit(0) # Check for --test-colors for arg in sysargs: if arg == '--test-colors': test_colors() sys.exit(0) if not arg.startswith('-'): break # Check for --list-aliases for arg in sysargs: if arg == '--list-aliases' or arg == '-a': for alias in sorted(list(verb_aliases.keys())): print("{0}: {1}".format(alias, verb_aliases[alias])) sys.exit(0) if not arg.startswith('-'): break # Do verb alias expansion sysargs = expand_verb_aliases(sysargs, verb_aliases) # Determine the verb, splitting arguments into pre and post verb verb = None pre_verb_args = [] post_verb_args = [] for index, arg in enumerate(sysargs): # If the arg does not start with a `-` then it is a positional argument # The first positional argument must be the verb if not arg.startswith('-'): verb = arg post_verb_args = sysargs[index + 1:] break # If the `-h` or `--help` option comes before the verb, parse_args if arg in ['-h', '--help']: parser.parse_args(sysargs) # Otherwise it is a pre-verb option pre_verb_args.append(arg) # Error on no verb provided if verb is None: print(parser.format_usage()) sys.exit("Error: No verb provided.") # Error on unknown verb provided if verb not in verbs: print(parser.format_usage()) sys.exit("Error: Unknown verb '{0}' provided.".format(verb)) # First allow the verb's argument preprocessor to strip any args # and return any "extra" information it wants as a dict processed_post_verb_args, extras = argument_preprocessors[verb](post_verb_args) # Then allow argparse to process the left over post-verb arguments along # with the pre-verb arguments and the verb itself args = parser.parse_args(pre_verb_args + [verb] + processed_post_verb_args) # Extend the argparse result with the extras from the preprocessor for key, value in extras.items(): setattr(args, key, value) # Finally call the subparser's main function with the processed args # and the extras which the preprocessor may have returned sys.exit(args.main(args) or 0)
RuntimeError
dataset/ETHPy150Open catkin/catkin_tools/catkin_tools/commands/catkin.py/catkin_main
2,820
def main(sysargs=None): try: catkin_main(sysargs) except __HOLE__: print('Interrupted by user!')
KeyboardInterrupt
dataset/ETHPy150Open catkin/catkin_tools/catkin_tools/commands/catkin.py/main
2,821
def __getattribute__(self, name): try: attr = object.__getattribute__(self, name) except __HOLE__: raise AttributeError("'{}' object has no attribute '{}'".format( self.__class__.__name__, name)) if name == 'close' or name.startswith('_') or not hasattr( attr, '__call__'): # a 'local' or internal attribute, or a non-callable return attr # it's an asynchronous callable # return a callable wrapper for the attribute that will # run in its own IOLoop def wrapper(clb, *args, **kwargs): fn = functools.partial(clb, *args, **kwargs) return self.io_loop.run_sync(fn) return functools.partial(wrapper, attr)
AttributeError
dataset/ETHPy150Open nephics/tornado-couchdb/couch/couch.py/BlockingCouch.__getattribute__
2,822
def test_nullbooleanfield_blank(self): """ Regression test for #13071: NullBooleanField should not throw a validation error when given a value of None. """ nullboolean = NullBooleanModel(nbfield=None) try: nullboolean.full_clean() except __HOLE__ as e: self.fail("NullBooleanField failed validation with value of None: %s" % e.messages)
ValidationError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/tests/regressiontests/model_fields/tests.py/BasicFieldTests.test_nullbooleanfield_blank
2,823
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'profitbricks', vm_['profile']) is False: return False except __HOLE__: pass datacenter_id = get_datacenter_id() conn = get_conn() data = None # Apply component overrides to the size from the cloud profile config. vm_size = override_size(vm_) # Retrieve list of SSH public keys ssh_keys = get_public_keys(vm_) # Fetch image and construct volume image = get_image(conn, vm_) volume = Volume( name='{0} Storage'.format(vm_['name']), size=vm_size['disk'], image=image['id'], ssh_keys=ssh_keys ) # Construct server server = Server( name=vm_['name'], ram=vm_size['ram'], cores=vm_size['cores'], create_volumes=[volume] ) salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), {'name': vm_['name']}, transport=__opts__['transport'] ) try: data = conn.create_server(datacenter_id=datacenter_id, server=server) _wait_for_completion(conn, data, 120, "create_server") except Exception as exc: # pylint: disable=W0703 log.error( 'Error creating {0} on ProfitBricks\n\n' 'The following exception was thrown by the profitbricks library ' 'when trying to run the initial deployment: \n{1}'.format( vm_['name'], exc ), exc_info_on_loglevel=logging.DEBUG ) return False create_network_interfaces(conn, datacenter_id, data['id'], vm_) set_public_lan(conn, vm_) def __query_node_data(vm_, data): ''' Query node data until node becomes available. ''' running = False try: data = show_instance(vm_['name'], 'action') if not data: return False log.debug( 'Loaded node data for {0}:\nname: {1}\nstate: {2}'.format( vm_['name'], pprint.pformat(data['name']), data['vmState'] ) ) except Exception as err: log.error( 'Failed to get nodes list: {0}'.format( err ), # Show the trackback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = data['vmState'] == 'RUNNING' if not running: # Still not running, trigger another iteration return if ssh_interface(vm_) == 'private_lan' and data['private_ips']: vm_['ssh_host'] = data['private_ips'][0] if ssh_interface(vm_) != 'private_lan' and data['public_ips']: vm_['ssh_host'] = data['public_ips'][0] return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc.message)) log.debug('VM is now running') log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug( '{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data) ) ) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport'] ) if 'ssh_host' in vm_: vm_['key_filename'] = get_key_filename(vm_) ret = salt.utils.cloud.bootstrap(vm_, __opts__) ret.update(data) return ret else: raise SaltCloudSystemExit('A valid IP address was not found.')
AttributeError
dataset/ETHPy150Open saltstack/salt/salt/cloud/clouds/profitbricks.py/create
2,824
def complete(self, text, state): if text.startswith("#"): matches = self.reader_matches(text) elif "." in text: matches = self.attr_matches(text) else: matches = self.global_matches(text) try: return matches[state] except __HOLE__: return None
IndexError
dataset/ETHPy150Open hylang/hy/hy/completer.py/Completer.complete
2,825
@contextlib.contextmanager def completion(completer=None): delims = "()[]{} " if not completer: completer = Completer() if docomplete: readline.set_completer(completer.complete) readline.set_completer_delims(delims) history = os.path.expanduser("~/.hy-history") readline.parse_and_bind("set blink-matching-paren on") try: readline.read_history_file(history) except __HOLE__: open(history, 'a').close() readline.parse_and_bind(readline_bind) yield if docomplete: readline.write_history_file(history)
IOError
dataset/ETHPy150Open hylang/hy/hy/completer.py/completion
2,826
def key(self, identifier=None, security_level=None): """ Tries to find an encryption key, first using the ``identifier`` and if that fails or isn't provided using the ``security_level``. Returns ``None`` if nothing matches. """ if identifier: try: return self._encryption_keys[identifier] except __HOLE__: pass if security_level: for key in self._encryption_keys.values(): if key.level == security_level: return key
KeyError
dataset/ETHPy150Open georgebrock/1pass/onepassword/keychain.py/Keychain.key
2,827
def choice_detail(request, app_label, module_name, field_name, field_val, models): m, f = lookup_field(app_label, module_name, field_name, models) try: label = dict(f.field.choices)[field_val] except __HOLE__: raise Http404('Invalid choice value given') obj_list = m.objects(**{f.field.name: field_val}) numitems = request.GET.get('items') items_per_page = [25,50,100] if numitems and numitems.isdigit() and int(numitems)>0: paginator = Paginator(obj_list, numitems) else: # fall back to default paginator = Paginator(obj_list, items_per_page[0]) page = request.GET.get('page') try: obj_list_page = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. obj_list_page = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page. obj_list_page = paginator.page(paginator.num_pages) return render_to_response( 'databrowse/choice_detail.html', { 'model': m, 'field': f, 'value': label, 'object_list': obj_list_page, 'items_per_page': items_per_page, } )
KeyError
dataset/ETHPy150Open Alir3z4/django-databrowse/django_databrowse/views.py/choice_detail
2,828
def run(self): self.loop = asyncio.get_event_loop() self.coro = self.loop.create_server(self.factory, '127.0.0.1', self.port) self.server = self.loop.run_until_complete(self.coro) try: self.loop.run_forever() except __HOLE__: pass finally: self.server.close() self.loop.close()
KeyboardInterrupt
dataset/ETHPy150Open ekulyk/PythonPusherClient/tests/pusherserver/pusherserverasyncio.py/Pusher.run
2,829
def _get_anno_version(self): """ Extract the snpEff or VEP version used to annotate the VCF """ # default to unknown version self.args.version = None if self.args.anno_type == "snpEff": try: version_string = self.vcf_reader['SnpEffVersion']['SnpEffVersion'] except __HOLE__: error = ("\nWARNING: VCF is not annotated with snpEff, check documentation at:\n"\ "http://gemini.readthedocs.org/en/latest/content/functional_annotation.html#stepwise-installation-and-usage-of-snpeff\n") sys.exit(error) # e.g., "SnpEff 3.0a (build 2012-07-08), by Pablo Cingolani" # or "3.3c (build XXXX), by Pablo Cingolani" version_string = version_string.replace('"', '') # No quotes toks = version_string.split() if "SnpEff" in toks[0]: self.args.raw_version = toks[1] # SnpEff *version*, etc else: self.args.raw_version = toks[0] # *version*, etc # e.g., 3.0a -> 3 self.args.maj_version = int(self.args.raw_version.split('.')[0]) elif self.args.anno_type == "VEP": pass
KeyError
dataset/ETHPy150Open arq5x/gemini/gemini/gemini_load_chunk.py/GeminiLoader._get_anno_version
2,830
def _get_vep_csq(self, reader): """ Test whether the VCF header meets expectations for proper execution of VEP for use with Gemini. """ required = ["Consequence"] try: parts = reader["CSQ"]["Description"].strip().replace('"', '').split("Format: ")[-1].split("|") all_found = True for check in required: if check not in parts: all_found = False break if all_found: return parts except __HOLE__: # Did not find expected fields pass error = "\nERROR: Check gemini docs for the recommended VCF annotation with VEP"\ "\nhttp://gemini.readthedocs.org/en/latest/content/functional_annotation.html#stepwise-installation-and-usage-of-vep" sys.exit(error)
KeyError
dataset/ETHPy150Open arq5x/gemini/gemini/gemini_load_chunk.py/GeminiLoader._get_vep_csq
2,831
def _prepare_variation(self, var, anno_keys): """private method to collect metrics for a single variant (var) in a VCF file. Extracts variant information, variant impacts and extra fields for annotation. """ extra_fields = {} # these metric require that genotypes are present in the file call_rate = None hwe_p_value = None pi_hat = None inbreeding_coeff = None hom_ref = het = hom_alt = unknown = None # only compute certain metrics if genoypes are available if not self.args.no_genotypes and not self.args.no_load_genotypes: hom_ref = var.num_hom_ref hom_alt = var.num_hom_alt het = var.num_het unknown = var.num_unknown call_rate = var.call_rate aaf = var.aaf hwe_p_value, inbreeding_coeff = \ popgen.get_hwe_likelihood(hom_ref, het, hom_alt, aaf) pi_hat = var.nucl_diversity else: aaf = infotag.extract_aaf(var) if not isinstance(aaf, (float, int)): if aaf is not None: aaf = max(aaf) ############################################################ # collect annotations from gemini's custom annotation files # but only if the size of the variant is <= 50kb ############################################################ if var.end - var.POS < 50000: pfam_domain = annotations.get_pfamA_domains(var) cyto_band = annotations.get_cyto_info(var) rs_ids = annotations.get_dbsnp_info(var) clinvar_info = annotations.get_clinvar_info(var) in_dbsnp = 0 if rs_ids is None else 1 rmsk_hits = annotations.get_rmsk_info(var) in_cpg = annotations.get_cpg_island_info(var) in_segdup = annotations.get_segdup_info(var) is_conserved = annotations.get_conservation_info(var) esp = annotations.get_esp_info(var) thousandG = annotations.get_1000G_info(var) recomb_rate = annotations.get_recomb_info(var) gms = annotations.get_gms(var) grc = annotations.get_grc(var) in_cse = annotations.get_cse(var) encode_tfbs = annotations.get_encode_tfbs(var) encode_dnaseI = annotations.get_encode_dnase_clusters(var) encode_cons_seg = annotations.get_encode_consensus_segs(var) gerp_el = annotations.get_gerp_elements(var) vista_enhancers = annotations.get_vista_enhancers(var) cosmic_ids = annotations.get_cosmic_info(var) fitcons = annotations.get_fitcons(var) Exac = annotations.get_exac_info(var) #load CADD scores by default if self.args.skip_cadd is False: (cadd_raw, cadd_scaled) = annotations.get_cadd_scores(var) else: (cadd_raw, cadd_scaled) = (None, None) # load the GERP score for this variant by default. gerp_bp = None if self.args.skip_gerp_bp is False: gerp_bp = annotations.get_gerp_bp(var) # the variant is too big to annotate else: pfam_domain = None cyto_band = None rs_ids = None clinvar_info = annotations.ClinVarInfo() in_dbsnp = None rmsk_hits = None in_cpg = None in_segdup = None is_conserved = None esp = annotations.ESPInfo(False, -1, -1, -1, 0) thousandG = annotations.EMPTY_1000G Exac = annotations.EXAC_EMPTY recomb_rate = None gms = annotations.GmsTechs(None, None, None) grc = None in_cse = None encode_tfbs = None encode_dnaseI = annotations.ENCODEDnaseIClusters(None, None) encode_cons_seg = annotations.ENCODESegInfo(None, None, None, None, None, None) gerp_el = None vista_enhancers = None cosmic_ids = None fitcons = None cadd_raw = None cadd_scaled = None gerp_bp = None top_impact = empty if anno_keys == {}: impacts = [] else: impacts = [] if self.args.anno_type in ("all", "snpEff"): try: if "EFF" in anno_keys: impacts += [geneimpacts.OldSnpEff(e, anno_keys["EFF"]) for e in var.INFO["EFF"].split(",")] elif "ANN" in anno_keys: impacts += [geneimpacts.SnpEff(e, anno_keys["ANN"]) for e in var.INFO["ANN"].split(",")] except __HOLE__: pass if self.args.anno_type in ("all", "VEP"): try: impacts += [geneimpacts.VEP(e, anno_keys["CSQ"]) for e in var.INFO["CSQ"].split(",")] except KeyError: pass for i, im in enumerate(impacts, start=1): im.anno_id = i if impacts != []: top_impact = geneimpacts.Effect.top_severity(impacts) if isinstance(top_impact, list): top_impact = top_impact[0] filter = None if var.FILTER is not None and var.FILTER != ".": if isinstance(var.FILTER, list): filter = ";".join(var.FILTER) else: filter = var.FILTER vcf_id = None if var.ID is not None and var.ID != ".": vcf_id = var.ID chrom = var.CHROM if var.CHROM.startswith("chr") else "chr" + var.CHROM clinvar_gene_phenotype = None if top_impact.gene is not None: clinvar_gene_phenotype = self.clinvar_chrom_gene_lookup.get((chrom[3:], top_impact.gene)) # build up numpy arrays for the genotype information. # these arrays will be pickled-to-binary, compressed, # and loaded as BLOB values (see compression.pack_blob) gt_phred_ll_homref = gt_phred_ll_het = gt_phred_ll_homalt = None if not (self.args.no_genotypes or self.args.no_load_genotypes): gt_bases = var.gt_bases gt_types = var.gt_types gt_phases = var.gt_phases gt_depths = var.gt_depths gt_ref_depths = var.gt_ref_depths gt_alt_depths = var.gt_alt_depths gt_quals = var.gt_quals #gt_copy_numbers = np.array(var.gt_copy_numbers, np.float32) # 1.0 2.0 2.1 -1 gt_copy_numbers = None gt_phred_ll_homref = var.gt_phred_ll_homref gt_phred_ll_het = var.gt_phred_ll_het gt_phred_ll_homalt = var.gt_phred_ll_homalt # tally the genotypes self._update_sample_gt_counts(gt_types) else: gt_bases = gt_types = gt_phases = gt_depths = gt_ref_depths = None gt_alt_depths = gt_quals = gt_copy_numbers = None if self.args.skip_info_string: info = None else: info = dict(var.INFO) assert isinstance(thousandG.aaf_AMR, (int, float)) # were functional impacts predicted by SnpEFF or VEP? # if so, build up a row for each of the impacts / transcript variant_impacts = [] for idx, impact in enumerate(impacts or [], start=1): var_impact = dict(variant_id=self.v_id, anno_id=idx, gene=impact.gene, transcript=impact.transcript, is_exonic=impact.is_exonic, is_coding=impact.is_coding, is_lof=impact.is_lof, is_splicing=impact.is_splicing, exon=impact.exon, codon_change=impact.codon_change, aa_change=impact.aa_change, aa_length=impact.aa_length, biotype=impact.biotype, impact=impact.top_consequence, impact_so=impact.so, impact_severity=impact.effect_severity, polyphed_pred=impact.polyphen_pred, polyphen_score=impact.polyphen_score, sift_pred=impact.sift_pred, sift_score=impact.sift_score) variant_impacts.append(var_impact) # extract structural variants sv = svs.StructuralVariant(var) ci_left = sv.get_ci_left() ci_right = sv.get_ci_right() if top_impact is not empty: for dbkey, infokey in self._extra_effect_fields: extra_fields[dbkey] = top_impact.effects[infokey] # construct the core variant record. # 1 row per variant to VARIANTS table variant = dict(chrom=chrom, start=var.start, end=var.end, vcf_id=vcf_id, variant_id=self.v_id, anno_id=top_impact.anno_id, ref=var.REF, alt=','.join([x or "" for x in var.ALT]), qual=var.QUAL, filter=filter, type=var.var_type, sub_type=var.var_subtype, gts=pack_blob(gt_bases), gt_types=pack_blob(gt_types), gt_phases=pack_blob(gt_phases), gt_depths=pack_blob(gt_depths), gt_ref_depths=pack_blob(gt_ref_depths), gt_alt_depths=pack_blob(gt_alt_depths), gt_quals=pack_blob(gt_quals), gt_copy_numbers=pack_blob(gt_copy_numbers), gt_phred_ll_homref=pack_blob(gt_phred_ll_homref), gt_phred_ll_het=pack_blob(gt_phred_ll_het), gt_phred_ll_homalt=pack_blob(gt_phred_ll_homalt), call_rate=call_rate, in_dbsnp=bool(in_dbsnp), rs_ids=rs_ids, sv_cipos_start_left=ci_left[0], sv_cipos_end_left=ci_left[1], sv_cipos_start_right=ci_right[0], sv_cipos_end_right=ci_right[1], sv_length=sv.get_length(), sv_is_precise=sv.is_precise(), sv_tool=sv.get_sv_tool(), sv_evidence_type=sv.get_evidence_type(), sv_event_id=sv.get_event_id(), sv_mate_id=sv.get_mate_id(), sv_strand=sv.get_strand(), in_omim=bool(clinvar_info.clinvar_in_omim), clinvar_sig=clinvar_info.clinvar_sig, clinvar_disease_name=clinvar_info.clinvar_disease_name, clinvar_dbsource=clinvar_info.clinvar_dbsource, clinvar_dbsource_id=clinvar_info.clinvar_dbsource_id, clinvar_origin=clinvar_info.clinvar_origin, clinvar_dsdb=clinvar_info.clinvar_dsdb, clinvar_dsdbid=clinvar_info.clinvar_dsdbid, clinvar_disease_acc=clinvar_info.clinvar_disease_acc, clinvar_in_locus_spec_db=bool(clinvar_info.clinvar_in_locus_spec_db), clinvar_on_diag_assay=bool(clinvar_info.clinvar_on_diag_assay), clinvar_causal_allele=clinvar_info.clinvar_causal_allele, clinvar_gene_phenotype=clinvar_gene_phenotype, geno2mp_hpo_ct=annotations.get_geno2mp_ct(var), pfam_domain=pfam_domain, cyto_band=cyto_band, rmsk=rmsk_hits, in_cpg_island=bool(in_cpg), in_segdup=bool(in_segdup), is_conserved=bool(is_conserved), gerp_bp_score=gerp_bp, gerp_element_pval=gerp_el, num_hom_ref=hom_ref, num_het=het, num_hom_alt=hom_alt, num_unknown=unknown, aaf=aaf, hwe=hwe_p_value, inbreeding_coeff=inbreeding_coeff, pi=pi_hat, recomb_rate=recomb_rate, gene=top_impact.gene, transcript=top_impact.transcript, is_exonic=top_impact.is_exonic, is_coding=top_impact.is_coding, is_splicing=top_impact.is_splicing, is_lof=top_impact.is_lof, exon=top_impact.exon, codon_change=top_impact.codon_change, aa_change=top_impact.aa_change, aa_length=top_impact.aa_length, biotype=top_impact.biotype, impact=top_impact.top_consequence, impact_so=top_impact.so, impact_severity=top_impact.effect_severity, polyphen_pred=top_impact.polyphen_pred, polyphen_score=top_impact.polyphen_score, sift_pred=top_impact.sift_pred, sift_score=top_impact.sift_score, anc_allele=infotag.get_ancestral_allele(var), rms_bq=infotag.get_rms_bq(var), cigar=infotag.get_cigar(var), depth=infotag.get_depth(var), strand_bias=infotag.get_strand_bias(var), rms_map_qual=infotag.get_rms_map_qual(var), in_hom_run=infotag.get_homopol_run(var), num_mapq_zero=infotag.get_map_qual_zero(var), num_alleles=infotag.get_num_of_alleles(var), num_reads_w_dels=infotag.get_frac_dels(var), haplotype_score=infotag.get_haplotype_score(var), qual_depth=infotag.get_quality_by_depth(var), allele_count=infotag.get_allele_count(var), allele_bal=infotag.get_allele_bal(var), # bools? in_hm2=infotag.in_hm2(var), in_hm3=infotag.in_hm3(var), is_somatic=infotag.is_somatic(var), somatic_score=infotag.get_somatic_score(var), in_esp=esp.found, aaf_esp_ea=esp.aaf_EA, aaf_esp_aa=esp.aaf_AA, aaf_esp_all=esp.aaf_ALL, exome_chip=bool(esp.exome_chip), in_1kg=thousandG.found, aaf_1kg_amr=thousandG.aaf_AMR, aaf_1kg_eas=thousandG.aaf_EAS, aaf_1kg_sas=thousandG.aaf_SAS, aaf_1kg_afr=thousandG.aaf_AFR, aaf_1kg_eur=thousandG.aaf_EUR, aaf_1kg_all=thousandG.aaf_ALL, grc=grc, gms_illumina=gms.illumina, gms_solid=gms.solid, gms_iontorrent=gms.iontorrent, in_cse=in_cse, encode_tfbs=encode_tfbs, encode_dnaseI_cell_count=encode_dnaseI.cell_count, encode_dnaseI_cell_list=encode_dnaseI.cell_list, encode_consensus_gm12878=encode_cons_seg.gm12878, encode_consensus_h1hesc=encode_cons_seg.h1hesc, encode_consensus_helas3=encode_cons_seg.helas3, encode_consensus_hepg2=encode_cons_seg.hepg2, encode_consensus_huvec=encode_cons_seg.huvec, encode_consensus_k562=encode_cons_seg.k562, vista_enhancers=vista_enhancers, cosmic_ids=cosmic_ids, info=pack_blob(info), cadd_raw=cadd_raw, cadd_scaled=cadd_scaled, fitcons=fitcons, in_exac=Exac.found, aaf_exac_all=Exac.aaf_ALL, aaf_adj_exac_all=Exac.adj_aaf_ALL, aaf_adj_exac_afr=Exac.aaf_AFR, aaf_adj_exac_amr=Exac.aaf_AMR, aaf_adj_exac_eas=Exac.aaf_EAS, aaf_adj_exac_fin=Exac.aaf_FIN, aaf_adj_exac_nfe=Exac.aaf_NFE, aaf_adj_exac_oth=Exac.aaf_OTH, aaf_adj_exac_sas=Exac.aaf_SAS, exac_num_het=Exac.num_het, exac_num_hom_alt=Exac.num_hom_alt, exac_num_chroms=Exac.num_chroms) variant['max_aaf_all'] = max(-1, variant['aaf_esp_ea'], variant['aaf_esp_aa'], variant['aaf_1kg_amr'], variant['aaf_1kg_eas'], variant['aaf_1kg_sas'], variant['aaf_1kg_afr'], variant['aaf_1kg_eur'], variant['aaf_adj_exac_afr'], variant['aaf_adj_exac_amr'], variant['aaf_adj_exac_eas'], variant['aaf_adj_exac_nfe'], variant['aaf_adj_exac_sas']) variant.update(self._extra_empty) return variant, variant_impacts, extra_fields
KeyError
dataset/ETHPy150Open arq5x/gemini/gemini/gemini_load_chunk.py/GeminiLoader._prepare_variation
2,832
def test01d_errors(self): "Testing the Error handlers." # string-based print "\nBEGIN - expecting GEOS_ERROR; safe to ignore.\n" for err in errors: try: g = fromstr(err.wkt) except (GEOSException, __HOLE__): pass print "\nEND - expecting GEOS_ERROR; safe to ignore.\n" class NotAGeometry(object): pass # Some other object self.assertRaises(TypeError, GEOSGeometry, NotAGeometry()) # None self.assertRaises(TypeError, GEOSGeometry, None) # Bad WKB self.assertRaises(GEOSException, GEOSGeometry, buffer('0'))
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/contrib/gis/tests/test_geos.py/GEOSTest.test01d_errors
2,833
def dtype_specs(self): """ Return a tuple (python type, c type, numpy typenum) that corresponds to self.dtype. This function is used internally as part of C code generation. """ try: return { 'float16': (float, 'npy_float16', 'NPY_FLOAT16'), 'float32': (float, 'npy_float32', 'NPY_FLOAT32'), 'float64': (float, 'npy_float64', 'NPY_FLOAT64'), 'uint8': (int, 'npy_uint8', 'NPY_UINT8'), 'int8': (int, 'npy_int8', 'NPY_INT8'), 'uint16': (int, 'npy_uint16', 'NPY_UINT16'), 'int16': (int, 'npy_int16', 'NPY_INT16'), 'uint32': (int, 'npy_uint32', 'NPY_UINT32'), 'int32': (int, 'npy_int32', 'NPY_INT32'), 'uint64': (int, 'npy_uint64', 'NPY_UINT64'), 'int64': (int, 'npy_int64', 'NPY_INT64'), # 'complex128': (complex, 'theano_complex128', 'NPY_COMPLEX128'), # 'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64') }[self.dtype] except __HOLE__: raise TypeError("Unsupported dtype for %s: %s" % (self.__class__.__name__, self.dtype))
KeyError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/sandbox/gpuarray/type.py/GpuArrayType.dtype_specs
2,834
def _should_fail(self, f): try: f(self.unit_test) self.fail( 'AssertionError is expected to be raised, but none is raised') except __HOLE__ as e: # check if the detail is included in the error object self.assertIn('first error message:', str(e))
AssertionError
dataset/ETHPy150Open pfnet/chainer/tests/chainer_tests/testing_tests/test_condition.py/_should_fail
2,835
def run(self): # check repo root for a package.json file if not os.path.isfile(self.repo_root+"/package.json"): print("Creating package.json...", file=sys.stderr) self.create_package_json() cmds = ["npm", "install", "--progress-false"] try: run(cmds, cwd=self.repo_root) except __HOLE__ as e: print("Failed to run `npm install`: %s" % e, file=sys.stderr) print("npm is required to build a pyxley app", file=sys.stderr) raise
OSError
dataset/ETHPy150Open stitchfix/pyxley/pyxley/utils/npm.py/NPM.run
2,836
def sort(ctx_name, defaultfield=None, defaultdirection=DEFAULT): """Sort queryset found in TemplateResponse context under ``ctx_name``.""" def decorator(view_func): @wraps(view_func) def _wrapped_view(request, *args, **kwargs): response = view_func(request, *args, **kwargs) try: ctx = response.context_data except __HOLE__: return response ctx["sort"] = Sort(request, defaultfield, defaultdirection) try: sortqs = ctx[ctx_name].order_by(*ctx["sort"].order_by) str(sortqs.query) # hack to force evaluation of sort arguments except FieldError: pass else: ctx[ctx_name] = sortqs return response return _wrapped_view return decorator
AttributeError
dataset/ETHPy150Open mozilla/moztrap/moztrap/view/lists/sort.py/sort
2,837
@task @write def add_validation_jobs(pks, job_pk, **kw): log.info('[%s@None] Adding validation jobs for addons starting at: %s ' ' for job: %s' % (len(pks), pks[0], job_pk)) job = ValidationJob.objects.get(pk=job_pk) curr_ver = job.curr_max_version.version_int target_ver = job.target_version.version_int prelim_app = list(amo.UNDER_REVIEW_STATUSES) + [amo.STATUS_BETA] for addon in Addon.objects.filter(pk__in=pks): ids = [] base = addon.versions.filter(apps__application=job.application, apps__max__version_int__gte=curr_ver, apps__max__version_int__lt=target_ver) already_compat = addon.versions.filter( files__status=amo.STATUS_PUBLIC, apps__max__version_int__gte=target_ver) if already_compat.count(): log.info('Addon %s already has a public version %r which is ' 'compatible with target version of app %s %s (or newer)' % (addon.pk, [v.pk for v in already_compat.all()], job.application, job.target_version)) continue try: public = (base.filter(files__status=amo.STATUS_PUBLIC) .latest('id')) except __HOLE__: public = None if public: ids.extend([f.id for f in public.files.all()]) ids.extend(base.filter(files__status__in=prelim_app, id__gt=public.id) .values_list('files__id', flat=True)) else: try: prelim = (base.filter(files__status__in=amo.LITE_STATUSES) .latest('id')) except ObjectDoesNotExist: prelim = None if prelim: ids.extend([f.id for f in prelim.files.all()]) ids.extend(base.filter(files__status__in=prelim_app, id__gt=prelim.id) .values_list('files__id', flat=True)) else: ids.extend(base.filter(files__status__in=prelim_app) .values_list('files__id', flat=True)) ids = set(ids) # Just in case. log.info('Adding %s files for validation for ' 'addon: %s for job: %s' % (len(ids), addon.pk, job_pk)) for id in set(ids): result = ValidationResult.objects.create(validation_job_id=job_pk, file_id=id) bulk_validate_file.delay(result.pk)
ObjectDoesNotExist
dataset/ETHPy150Open mozilla/addons-server/src/olympia/zadmin/tasks.py/add_validation_jobs
2,838
def load_collectors(paths=None, filter=None): """ Scan for collectors to load from path """ # Initialize return value collectors = {} log = logging.getLogger('diamond') if paths is None: return if isinstance(paths, basestring): paths = map(str, paths.split(',')) print paths paths = map(str.strip, paths) load_include_path(paths) for path in paths: # Get a list of files in the directory, if the directory exists if not os.path.exists(path): raise OSError("Directory does not exist: %s" % path) if path.endswith('tests') or path.endswith('fixtures'): return collectors # Load all the files in path for f in os.listdir(path): # Are we a directory? If so process down the tree fpath = os.path.join(path, f) if os.path.isdir(fpath): subcollectors = load_collectors([fpath]) for key in subcollectors: collectors[key] = subcollectors[key] # Ignore anything that isn't a .py file elif (os.path.isfile(fpath) and len(f) > 3 and f[-3:] == '.py' and f[0:4] != 'test' and f[0] != '.'): # Check filter if filter and os.path.join(path, f) != filter: continue modname = f[:-3] try: # Import the module mod = __import__(modname, globals(), locals(), ['*']) except (KeyboardInterrupt, __HOLE__), err: log.error( "System or keyboard interrupt " "while loading module %s" % modname) if isinstance(err, SystemExit): sys.exit(err.code) raise KeyboardInterrupt except: # Log error log.error("Failed to import module: %s. %s", modname, traceback.format_exc()) continue # Find all classes defined in the module for attrname in dir(mod): attr = getattr(mod, attrname) # Only attempt to load classes that are infact classes # are Collectors but are not the base Collector class if (inspect.isclass(attr) and issubclass(attr, Collector) and attr != Collector): if attrname.startswith('parent_'): continue # Get class name fqcn = '.'.join([modname, attrname]) try: # Load Collector class cls = load_dynamic_class(fqcn, Collector) # Add Collector class collectors[cls.__name__] = cls except Exception: # Log error log.error( "Failed to load Collector: %s. %s", fqcn, traceback.format_exc()) continue # Return Collector classes return collectors
SystemExit
dataset/ETHPy150Open Yelp/fullerite/src/diamond/utils/classes.py/load_collectors
2,839
def add_timezone(value, tz=None): """If the value is naive, then the timezone is added to it. If no timezone is given, timezone.get_current_timezone() is used. """ tz = tz or timezone.get_current_timezone() try: if timezone.is_naive(value): return timezone.make_aware(value, tz) except __HOLE__: # 'datetime.date' object has no attribute 'tzinfo' dt = datetime.datetime.combine(value, datetime.time()) return timezone.make_aware(dt, tz) return value
AttributeError
dataset/ETHPy150Open caktus/django-timepiece/timepiece/utils/__init__.py/add_timezone
2,840
def get_wordlist(wordlist_name): """ Get and iterator of specified word list. :param wordlist_name: Word list name :type wordlist_name: basestring :return: iterator with each line of file. :rtype: str """ if not isinstance(wordlist_name, str): raise TypeError("Expected basestring, got '%s' instead" % type(wordlist_name)) word_list_name = join(get_data_folder(), wordlist_name) try: with open(word_list_name, "rU") as f: for word in f: yield word.replace("\n", "").replace("\r", "") except __HOLE__ as e: raise PlecostWordListNotFound("Wordlist '%s' not found. Error: %s" % (wordlist_name, e))
IOError
dataset/ETHPy150Open iniqua/plecost/plecost_lib/libs/wordlist.py/get_wordlist
2,841
def unique_id_from_node(self, node): new_key = self.convert_key('pm_addr') assert new_key is not None try: result = node.driver_info[new_key] except __HOLE__: # Node cannot be identified return if self._has_port: new_port = self.convert_key('pm_port') assert new_port try: return '%s:%s' % (result, node.driver_info[new_port]) except KeyError: pass return result
KeyError
dataset/ETHPy150Open openstack/tripleo-common/tripleo_common/utils/nodes.py/PrefixedDriverInfo.unique_id_from_node
2,842
def unique_id_from_node(self, node): try: result = super(iBootDriverInfo, self).unique_id_from_node(node) except __HOLE__: return if node.driver_info.get('iboot_relay_id'): result = '%s#%s' % (result, node.driver_info['iboot_relay_id']) return result
IndexError
dataset/ETHPy150Open openstack/tripleo-common/tripleo_common/utils/nodes.py/iBootDriverInfo.unique_id_from_node
2,843
def _find_node_handler(fields): try: driver = fields['pm_type'] except __HOLE__: raise exception.InvalidNode('pm_type (ironic driver to use) is ' 'required', node=fields) return _find_driver_handler(driver)
KeyError
dataset/ETHPy150Open openstack/tripleo-common/tripleo_common/utils/nodes.py/_find_node_handler
2,844
def _get_node_id(node, handler, node_map): candidates = [] for mac in node.get('mac', []): try: candidates.append(node_map['mac'][mac.lower()]) except __HOLE__: pass unique_id = handler.unique_id_from_fields(node) if unique_id: try: candidates.append(node_map['pm_addr'][unique_id]) except KeyError: pass if len(candidates) > 1: raise exception.InvalidNode('Several candidates found for the same ' 'node data: %s' % candidates, node=node) elif candidates: return candidates[0]
KeyError
dataset/ETHPy150Open openstack/tripleo-common/tripleo_common/utils/nodes.py/_get_node_id
2,845
def ValueOf(self, expression, default_value=None, return_type=None): """Returns the value of an expression on a document. Args: expression: The expression string. default_value: The value to return if the expression cannot be evaluated. return_type: The type the expression should evaluate to. Used to create multiple sorts for ambiguous expressions. If None, the expression evaluates to the inferred type or first type of a field it encounters in a document. Returns: The value of the expression on the evaluator's document, or default_value if the expression cannot be evaluated on the document. Raises: ExpressionEvaluationError: sort expression cannot be evaluated because the expression or default value is malformed. Callers of ValueOf should catch and return error to user in response. QueryExpressionEvaluationError: same as ExpressionEvaluationError but these errors should return query as error status to users. """ expression_tree = Parse(expression) if not expression_tree.getType() and expression_tree.children: expression_tree = expression_tree.children[0] name = query_parser.GetQueryNodeText(expression_tree) schema = self._inverted_index.GetSchema() if (expression_tree.getType() == ExpressionParser.NAME and name in schema): contains_text_result = False for field_type in schema[name].type_list(): if field_type in search_util.TEXT_DOCUMENT_FIELD_TYPES: contains_text_result = True if (schema.IsType(name, document_pb.FieldValue.DATE) and not contains_text_result): if isinstance(default_value, basestring): try: default_value = search_util.DeserializeDate(default_value) except __HOLE__: raise QueryExpressionEvaluationError( 'Default text value is not appropriate for sort expression \'' + name + '\': failed to parse date \"' + default_value + '\"') result = default_value try: result = self._Eval(expression_tree, return_type=return_type) except _ExpressionError, e: logging.debug('Skipping expression %s: %s', expression, e) except search_util.UnsupportedOnDevError, e: logging.warning(e.args[0]) return result
ValueError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/api/search/stub/expression_evaluator.py/ExpressionEvaluator.ValueOf
2,846
def setUp(self): # Create method was not returning the created object with # the create() method try: self.config = Config.objects.get(group='test') except __HOLE__: self.config = Config(group='test') self.config.save() self.config.values.append(CustomValue(name='test_config', rawvalue=u'a nice config', formatter='text'))
DoesNotExist
dataset/ETHPy150Open LeightonStreet/LingoBarter/lingobarter/core/tests/test_models.py/TestConfig.setUp
2,847
def abs__file__(): """Set all module' __file__ attribute to an absolute path""" for m in sys.modules.values(): if hasattr(m, '__loader__'): continue # don't mess with a PEP 302-supplied __file__ try: m.__file__ = os.path.abspath(m.__file__) except __HOLE__: continue
AttributeError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site.py/abs__file__
2,848
def _init_pathinfo(): """Return a set containing all existing directory entries from sys.path""" d = set() for dir in sys.path: try: if os.path.isdir(dir): dir, dircase = makepath(dir) d.add(dircase) except __HOLE__: continue return d
TypeError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site.py/_init_pathinfo
2,849
def addpackage(sitedir, name, known_paths): """Process a .pth file within the site-packages directory: For each line in the file, either combine it with sitedir to a path and add that to known_paths, or execute it if it starts with 'import '. """ if known_paths is None: _init_pathinfo() reset = 1 else: reset = 0 fullname = os.path.join(sitedir, name) try: f = open(fullname, "rU") except __HOLE__: return with f: for line in f: if line.startswith("#"): continue if line.startswith(("import ", "import\t")): exec line continue line = line.rstrip() dir, dircase = makepath(sitedir, line) if not dircase in known_paths and os.path.exists(dir): sys.path.append(dir) known_paths.add(dircase) if reset: known_paths = None return known_paths
IOError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site.py/addpackage
2,850
def __setup(self): if self.__lines: return data = None for dir in self.__dirs: for filename in self.__files: filename = os.path.join(dir, filename) try: fp = file(filename, "rU") data = fp.read() fp.close() break except __HOLE__: pass if data: break if not data: data = self.__data self.__lines = data.split('\n') self.__linecnt = len(self.__lines)
IOError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site.py/_Printer.__setup
2,851
def __call__(self): self.__setup() prompt = 'Hit Return for more, or q (and Return) to quit: ' lineno = 0 while 1: try: for i in range(lineno, lineno + self.MAXLINES): print self.__lines[i] except __HOLE__: break else: lineno += self.MAXLINES key = None while key is None: key = raw_input(prompt) if key not in ('', 'q'): key = None if key == 'q': break
IndexError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site.py/_Printer.__call__
2,852
def execsitecustomize(): """Run custom site specific code, if available.""" try: import sitecustomize except __HOLE__: pass
ImportError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site.py/execsitecustomize
2,853
def execusercustomize(): """Run custom user specific code, if available.""" try: import usercustomize except __HOLE__: pass
ImportError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site.py/execusercustomize
2,854
def remove_settings(self, filename, is_dir=False): full_name = os.path.join(test_dir, filename) if is_dir: shutil.rmtree(full_name) else: os.remove(full_name) # Also try to remove the compiled file; if it exists, it could # mess up later tests that depend upon the .py file not existing try: if sys.platform.startswith('java'): # Jython produces module$py.class files os.remove(re.sub(r'\.py$', '$py.class', full_name)) else: # CPython produces module.pyc files os.remove(full_name + 'c') except __HOLE__: pass
OSError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/tests/regressiontests/admin_scripts/tests.py/AdminScriptTestCase.remove_settings
2,855
def __new__(cls, name, bases, attrs): """ Checks for an inner ``Meta`` class with a ``mixin_for`` attribute containing the model that this model will be mixed into. Once found, copy over any model fields and methods onto the model being mixed into, and return it as the actual class definition for the mixin. """ if name == "ModelMixin": # Actual ModelMixin class definition. return super(ModelMixinBase, cls).__new__(cls, name, bases, attrs) try: mixin_for = attrs.pop("Meta").mixin_for if not issubclass(mixin_for, Model): raise TypeError except (TypeError, KeyError, __HOLE__): raise ImproperlyConfigured("The ModelMixin class '%s' requires " "an inner Meta class with the " "``mixin_for`` attribute defined, " "with a value that is a valid model.") # Copy fields and methods onto the model being mixed into, and # return it as the definition for the mixin class itself. for k, v in attrs.items(): if isinstance(v, Field): v.contribute_to_class(mixin_for, k) elif k != "__module__": setattr(mixin_for, k, v) return mixin_for
AttributeError
dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/utils/models.py/ModelMixinBase.__new__
2,856
def execute_wf(wf, output_port): # Save the workflow in a temporary file temp_wf_fd, temp_wf = tempfile.mkstemp() try: f = open(temp_wf, 'w') f.write(wf) f.close() os.close(temp_wf_fd) # Clean the cache interpreter = get_default_interpreter() interpreter.flush() # Load the Pipeline from the temporary file vistrail = Vistrail() locator = XMLFileLocator(temp_wf) workflow = locator.load(Pipeline) # Build a Vistrail from this single Pipeline action_list = [] for module in workflow.module_list: action_list.append(('add', module)) for connection in workflow.connection_list: action_list.append(('add', connection)) action = vistrails.core.db.action.create_action(action_list) vistrail.add_action(action, 0L) vistrail.update_id_scope() tag = 'parallel flow' vistrail.addTag(tag, action.id) # Build a controller and execute controller = VistrailController() controller.set_vistrail(vistrail, None) controller.change_selected_version(vistrail.get_version_number(tag)) execution = controller.execute_current_workflow( custom_aliases=None, custom_params=None, extra_info=None, reason='API Pipeline Execution') # Build a list of errors errors = [] pipeline = vistrail.getPipeline(tag) execution_errors = execution[0][0].errors if execution_errors: for key in execution_errors: module = pipeline.modules[key] msg = '%s: %s' %(module.name, execution_errors[key]) errors.append(msg) # Get the execution log from the controller try: module_log = controller.log.workflow_execs[0].item_execs[0] except __HOLE__: errors.append("Module log not found") return dict(errors=errors) else: machine = controller.log.workflow_execs[0].machines[ module_log.machine_id] xml_log = serialize(module_log) machine_log = serialize(machine) # Get the output value output = None if not execution_errors: executed_module, = execution[0][0].executed executed_module = execution[0][0].objects[executed_module] try: output = executed_module.get_output(output_port) except ModuleError: errors.append("Output port not found: %s" % output_port) return dict(errors=errors) if isinstance(output, Module): raise TypeError("Output value is a Module instance") # Return the dictionary, that will be sent back to the client return dict(errors=errors, output=output, xml_log=xml_log, machine_log=machine_log) finally: os.unlink(temp_wf) ###############################################################################
IndexError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/parallelflow/map.py/execute_wf
2,857
def _get_last_lobbyist_represent_data(self, data): try: last_lobbyist_represent_data = LobbyistRepresentData.objects.filter(scrape_time__isnull=False).latest('scrape_time') except __HOLE__: last_lobbyist_represent_data = None if last_lobbyist_represent_data is not None: for key in ['name', 'domain', 'type']: if data[key] != getattr(last_lobbyist_represent_data, key): last_lobbyist_represent_data = None break return last_lobbyist_represent_data
ObjectDoesNotExist
dataset/ETHPy150Open ofri/Open-Knesset/lobbyists/scrapers/lobbyist_represent.py/LobbyistRepresentListStorage._get_last_lobbyist_represent_data
2,858
def modified(date=None, etag=None): """ Checks to see if the page has been modified since the version in the requester's cache. When you publish pages, you can include `Last-Modified` and `ETag` with the date the page was last modified and an opaque token for the particular version, respectively. When readers reload the page, the browser sends along the modification date and etag value for the version it has in its cache. If the page hasn't changed, the server can just return `304 Not Modified` and not have to send the whole page again. This function takes the last-modified date `date` and the ETag `etag` and checks the headers to see if they match. If they do, it returns `True`, or otherwise it raises NotModified error. It also sets `Last-Modified` and `ETag` output headers. """ try: from __builtin__ import set except __HOLE__: # for python 2.3 from sets import Set as set n = set([x.strip('" ') for x in web.ctx.env.get('HTTP_IF_NONE_MATCH', '').split(',')]) m = net.parsehttpdate(web.ctx.env.get('HTTP_IF_MODIFIED_SINCE', '').split(';')[0]) validate = False if etag: if '*' in n or etag in n: validate = True if date and m: # we subtract a second because # HTTP dates don't have sub-second precision if date-datetime.timedelta(seconds=1) <= m: validate = True if date: lastmodified(date) if etag: web.header('ETag', '"' + etag + '"') if validate: raise web.notmodified() else: return True
ImportError
dataset/ETHPy150Open rouge8/20questions/web/http.py/modified
2,859
def sync_plans(): """ Syncronizes all plans from the Stripe API """ try: plans = stripe.Plan.auto_paging_iter() except __HOLE__: plans = iter(stripe.Plan.all().data) for plan in plans: defaults = dict( amount=utils.convert_amount_for_db(plan["amount"], plan["currency"]), currency=plan["currency"] or "", interval=plan["interval"], interval_count=plan["interval_count"], name=plan["name"], statement_descriptor=plan["statement_descriptor"] or "", trial_period_days=plan["trial_period_days"] ) obj, created = models.Plan.objects.get_or_create( stripe_id=plan["id"], defaults=defaults ) utils.update_with_defaults(obj, defaults, created)
AttributeError
dataset/ETHPy150Open pinax/pinax-stripe/pinax/stripe/actions/plans.py/sync_plans
2,860
def long_description(): """Return long description from README.rst if it's present because it doesn't get installed.""" try: return open(join(dirname(__file__), 'README.rst')).read() except __HOLE__: return LONG_DESCRIPTION
IOError
dataset/ETHPy150Open omab/python-social-auth/setup.py/long_description
2,861
def update_category_cache(instance): # NOTE: ATM, we clear the whole cache if a category has been changed. # Otherwise is lasts to long when the a category has a lot of products # (1000s) and the shop admin changes a category. clear_cache() return cache.delete("%s-category-breadcrumbs-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.slug)) cache.delete("%s-category-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.slug)) cache.delete("%s-category-all-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.slug)) cache.delete("%s-category-categories-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.slug)) for category in Category.objects.all(): cache.delete("%s-categories-portlet-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, category.slug)) cache.delete("%s-category-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id)) cache.delete("%s-category-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.slug)) cache.delete("%s-category-all-children-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id)) cache.delete("%s-category-children-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id)) cache.delete("%s-category-parents-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id)) cache.delete("%s-category-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id)) cache.delete("%s-category-all-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id)) # Note: As this is called "pre-saved" newly created categories don't have # the many-to-many attribute "products", hence we have to take care of it # here. try: for product in instance.products.all(): update_product_cache(product) except __HOLE__: pass
ValueError
dataset/ETHPy150Open diefenbach/django-lfs/lfs/caching/listeners.py/update_category_cache
2,862
def update_product_cache(instance): # If the instance is a product with variant or a variant we have to # delete also the parent and all other variants if instance.is_variant(): parent = instance.parent else: parent = instance # if product was changed then we have to clear all product_navigation caches invalidate_cache_group_id('product_navigation') invalidate_cache_group_id('properties-%s' % parent.id) cache.delete("%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id)) cache.delete("%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.slug)) cache.delete("%s-product-images-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id)) cache.delete("%s-related-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id)) cache.delete("%s-product-categories-%s-False" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id)) cache.delete("%s-product-categories-%s-True" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id)) cache.delete("%s-default-variant-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id)) if parent.manufacturer: cache.delete("%s-manufacturer-all-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.manufacturer.pk)) cache.delete("%s-manufacturer-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.manufacturer.slug)) try: c = cache.get("%s-shipping-delivery-time" % settings.CACHE_MIDDLEWARE_KEY_PREFIX) del c["%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.slug)] cache.set("%s-shipping-delivery-time" % settings.CACHE_MIDDLEWARE_KEY_PREFIX, c) except (KeyError, __HOLE__): pass for variant in parent.get_variants(): cache.delete("%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.id)) cache.delete("%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.slug)) cache.delete("%s-product-images-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.id)) cache.delete("%s-related-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.id)) cache.delete("%s-product-categories-%s-False" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.id)) cache.delete("%s-product-categories-%s-True" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.id)) cache.delete("%s-product-shipping-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.slug))
TypeError
dataset/ETHPy150Open diefenbach/django-lfs/lfs/caching/listeners.py/update_product_cache
2,863
def parse_bits(parser, bits, params, varargs, varkw, defaults, takes_context, name): """ Parse bits for template tag helpers simple_tag and inclusion_tag, in particular by detecting syntax errors and by extracting positional and keyword arguments. """ if takes_context: if params[0] == 'context': params = params[1:] else: raise TemplateSyntaxError( "'%s' is decorated with takes_context=True so it must " "have a first argument of 'context'" % name) args = [] kwargs = {} unhandled_params = list(params) for bit in bits: # First we try to extract a potential kwarg from the bit kwarg = token_kwargs([bit], parser) if kwarg: # The kwarg was successfully extracted param, value = kwarg.popitem() if param not in params and varkw is None: # An unexpected keyword argument was supplied raise TemplateSyntaxError( "'%s' received unexpected keyword argument '%s'" % (name, param)) elif param in kwargs: # The keyword argument has already been supplied once raise TemplateSyntaxError( "'%s' received multiple values for keyword argument '%s'" % (name, param)) else: # All good, record the keyword argument kwargs[str(param)] = value if param in unhandled_params: # If using the keyword syntax for a positional arg, then # consume it. unhandled_params.remove(param) else: if kwargs: raise TemplateSyntaxError( "'%s' received some positional argument(s) after some " "keyword argument(s)" % name) else: # Record the positional argument args.append(parser.compile_filter(bit)) try: # Consume from the list of expected positional arguments unhandled_params.pop(0) except __HOLE__: if varargs is None: raise TemplateSyntaxError( "'%s' received too many positional arguments" % name) if defaults is not None: # Consider the last n params handled, where n is the # number of defaults. unhandled_params = unhandled_params[:-len(defaults)] if unhandled_params: # Some positional arguments were not supplied raise TemplateSyntaxError( "'%s' did not receive value(s) for the argument(s): %s" % (name, ", ".join("'%s'" % p for p in unhandled_params))) return args, kwargs
IndexError
dataset/ETHPy150Open django/django/django/template/library.py/parse_bits
2,864
def import_library(name): """ Load a Library object from a template tag module. """ try: module = import_module(name) except ImportError as e: raise InvalidTemplateLibrary( "Invalid template library specified. ImportError raised when " "trying to load '%s': %s" % (name, e) ) try: return module.register except __HOLE__: raise InvalidTemplateLibrary( "Module %s does not have a variable named 'register'" % name, )
AttributeError
dataset/ETHPy150Open django/django/django/template/library.py/import_library
2,865
def skip_row(self, instance, original): """ Returns ``True`` if ``row`` importing should be skipped. Default implementation returns ``False`` unless skip_unchanged == True. Override this method to handle skipping rows meeting certain conditions. """ if not self._meta.skip_unchanged: return False for field in self.get_fields(): try: # For fields that are models.fields.related.ManyRelatedManager # we need to compare the results if list(field.get_value(instance).all()) != list(field.get_value(original).all()): return False except __HOLE__: if field.get_value(instance) != field.get_value(original): return False return True
AttributeError
dataset/ETHPy150Open django-import-export/django-import-export/import_export/resources.py/Resource.skip_row
2,866
def property_clean(prop, value): """Apply Property level validation to value. Calls .make_value_from_form() and .validate() on the property and catches exceptions generated by either. The exceptions are converted to forms.ValidationError exceptions. Args: prop: The property to validate against. value: The value to validate. Raises: forms.ValidationError if the value cannot be validated. """ if value is not None: try: prop.validate(prop.make_value_from_form(value)) except (db.BadValueError, __HOLE__), e: raise forms.ValidationError(unicode(e))
ValueError
dataset/ETHPy150Open IanLewis/kay/kay/utils/forms/modelform.py/property_clean
2,867
def _normalKeyEvent(key, upDown): assert upDown in ('up', 'down'), "upDown argument must be 'up' or 'down'" try: if pyautogui.isShiftCharacter(key): key_code = keyboardMapping[key.lower()] event = Quartz.CGEventCreateKeyboardEvent(None, keyboardMapping['shift'], upDown == 'down') Quartz.CGEventPost(Quartz.kCGHIDEventTap, event) # Tiny sleep to let OS X catch up on us pressing shift time.sleep(0.01) else: key_code = keyboardMapping[key] event = Quartz.CGEventCreateKeyboardEvent(None, key_code, upDown == 'down') Quartz.CGEventPost(Quartz.kCGHIDEventTap, event) time.sleep(0.01) # TODO - wait, is the shift key's keyup not done? # TODO - get rid of this try-except. except __HOLE__: raise RuntimeError("Key %s not implemented." % (key))
KeyError
dataset/ETHPy150Open asweigart/pyautogui/pyautogui/_pyautogui_osx.py/_normalKeyEvent
2,868
def do_work(job_queue, counter=None): """ Process work function, read more fetch page jobs from queue until all jobs are finished """ signal.signal(signal.SIGINT, signal.SIG_IGN) while not job_queue.empty(): try: job = job_queue.get_nowait() fetch_result_page(job) num_done = 0 with counter.get_lock(): counter.value += 1 num_done = counter.value logging.info('{0} page(s) of {1} finished'.format(num_done, job['num_pages'])) except Empty: pass except __HOLE__: break except Exception: if not job: raise retries = job.get('retries', 0) if retries < job['max_retries']: logging.error('Retrying Page {0}'.format(job['page'])) job['retries'] = retries + 1 job_queue.put_nowait(job) else: logging.error('Max retries exceeded for page {0}'. format(job['page']))
KeyboardInterrupt
dataset/ETHPy150Open ikreymer/cdx-index-client/cdx-index-client.py/do_work
2,869
def run_workers(num_workers, jobs, shuffle): """ Queue up all jobs start workers with job_queue catch KeyboardInterrupt to allow interrupting all workers Not using Pool to better hande KeyboardInterrupt gracefully Adapted from example at: http://bryceboe.com/2012/02/14/python-multiprocessing-pool-and-keyboardinterrupt-revisited/ """ # Queue up all jobs job_queue = Queue() counter = Value('i', 0) # optionally shuffle queue if shuffle: jobs = list(jobs) random.shuffle(jobs) for job in jobs: job_queue.put(job) workers = [] for i in xrange(0, num_workers): tmp = Process(target=do_work, args=(job_queue, counter)) tmp.start() workers.append(tmp) try: for worker in workers: worker.join() except __HOLE__: logging.info('Received Ctrl-C, interrupting all workers') for worker in workers: worker.terminate() worker.join()
KeyboardInterrupt
dataset/ETHPy150Open ikreymer/cdx-index-client/cdx-index-client.py/run_workers
2,870
def main(): url_help = """ url to query in the index: For prefix, use: http://example.com/* For domain query, use: *.example.com """ field_list_help = """ select fields to include: eg, --fl url,timestamp """ parser = ArgumentParser('CDX Index API Client') parser.add_argument('url', help=url_help) parser.add_argument('-n', '--show-num-pages', action='store_true', help='Show Number of Pages only and exit') parser.add_argument('-p', '--processes', type=int, help='Number of worker processes to use') parser.add_argument('--fl', help=field_list_help) parser.add_argument('-j', '--json', action='store_true', help='Use json output instead of cdx(j)') parser.add_argument('-z', '--gzipped', action='store_true', help='Storge gzipped results, with .gz extensions') parser.add_argument('-o', '--output-prefix', help='Custom output prefix, append with -NN for each page') parser.add_argument('-d', '--directory', help='Specify custom output directory') parser.add_argument('--page-size', type=int, help='size of each page in blocks, >=1') group = parser.add_mutually_exclusive_group() group.add_argument('-c', '--coll', default='CC-MAIN-2015-06', help='The index collection to use') group.add_argument('--cdx-server-url', help='Set endpoint for CDX Server API') parser.add_argument('--timeout', default=30, type=int, help='HTTP read timeout before retry') parser.add_argument('--max-retries', default=5, type=int, help='Number of retry attempts') parser.add_argument('-v', '--verbose', action='store_true', help='Verbose logging of debug msgs') parser.add_argument('--pages', type=int, nargs='*', help=('Get only the specified result page(s) instead ' + 'of all results')) parser.add_argument('--header', nargs='*', help='Add custom header to request') parser.add_argument('--in-order', action='store_true', help='Fetch pages in order (default is to shuffle page list)') # Logging r = parser.parse_args() if r.verbose: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s', level=level) logging.getLogger("requests").setLevel(logging.WARNING) if r.cdx_server_url: api_url = r.cdx_server_url else: api_url = DEF_API_BASE + r.coll + '-index' logging.debug('Getting Num Pages...') num_pages = get_num_pages(api_url, r.url, r.page_size) # Num Pages Only Query if r.show_num_pages: print(num_pages) return if num_pages == 0: print('No results found for: ' + r.url) # set output if not r.output_prefix: if r.url.startswith('*'): output_prefix = 'domain-' + r.url.strip('*.') elif r.url.endswith('*'): output_prefix = 'prefix-' + r.url.strip('*') elif r.url.startswith(('http://', 'https://', '//')): output_prefix = r.url.split('//', 1)[-1] else: output_prefix = r.url output_prefix = output_prefix.strip('/') output_prefix = output_prefix.replace('/', '-') output_prefix = urllib.quote(output_prefix) + '-' else: output_prefix = r.output_prefix def get_page_job(page): job = {} job['api_url'] = api_url job['url'] = r.url job['page'] = page job['num_pages'] = num_pages job['output_prefix'] = output_prefix job['fl'] = r.fl job['json'] = r.json job['page_size'] = r.page_size job['timeout'] = r.timeout job['max_retries'] = r.max_retries job['gzipped'] = r.gzipped job['headers'] = r.header job['dir'] = r.directory return job if r.pages: page_list = r.pages logging.info('Fetching pages {0} of {1}'.format(r.pages, r.url)) num_pages = len(page_list) else: page_list = range(0, num_pages) logging.info('Fetching {0} pages of {1}'.format(num_pages, r.url)) if num_pages == 1: fetch_result_page(get_page_job(page_list[0])) return # set num workers based on proesses if not r.processes: try: num_workers = cpu_count() * 2 except __HOLE__: num_workers = 4 else: num_workers = r.processes num_workers = min(num_workers, num_pages) # generate page jobs job_list = map(get_page_job, page_list) run_workers(num_workers, job_list, not r.in_order)
NotImplementedError
dataset/ETHPy150Open ikreymer/cdx-index-client/cdx-index-client.py/main
2,871
def directory_listing(self, request, folder_id=None, viewtype=None): clipboard = tools.get_user_clipboard(request.user) if viewtype == 'images_with_missing_data': folder = ImagesWithMissingData() elif viewtype == 'unfiled_images': folder = UnfiledImages() elif viewtype == 'last': last_folder_id = request.session.get('filer_last_folder_id') try: Folder.objects.get(id=last_folder_id) except Folder.DoesNotExist: url = reverse('admin:filer-directory_listing-root') url = "%s%s%s" % (url, popup_param(request), selectfolder_param(request, "&")) else: url = reverse('admin:filer-directory_listing', kwargs={'folder_id': last_folder_id}) url = "%s%s%s" % (url, popup_param(request), selectfolder_param(request, "&")) return HttpResponseRedirect(url) elif folder_id is None: folder = FolderRoot() else: folder = get_object_or_404(Folder, id=folder_id) request.session['filer_last_folder_id'] = folder_id # Check actions to see if any are available on this changelist actions = self.get_actions(request) # Remove action checkboxes if there aren't any actions available. list_display = list(self.list_display) if not actions: try: list_display.remove('action_checkbox') except __HOLE__: pass # search q = request.GET.get('q', None) if q: search_terms = unquote(q).split(" ") else: search_terms = [] q = '' limit_search_to_folder = request.GET.get('limit_search_to_folder', False) in (True, 'on') if len(search_terms) > 0: if folder and limit_search_to_folder and not folder.is_root: folder_qs = folder.get_descendants() file_qs = File.objects.filter( folder__in=folder.get_descendants()) else: folder_qs = Folder.objects.all() file_qs = File.objects.all() folder_qs = self.filter_folder(folder_qs, search_terms) file_qs = self.filter_file(file_qs, search_terms) show_result_count = True else: folder_qs = folder.media_folder_children.all() file_qs = folder.files.all() show_result_count = False folder_qs = folder_qs.order_by('name') order_by = request.GET.get('order_by', None) if order_by is not None: order_by = order_by.split(',') order_by = [field for field in order_by if re.sub(r'^-', '', field) in self.order_by_file_fields] if len(order_by) > 0: file_qs = file_qs.order_by(*order_by) folder_children = [] folder_files = [] if folder.is_root: folder_children += folder.virtual_folders perms = FolderPermission.objects.get_read_id_list(request.user) root_exclude_kw = {'parent__isnull': False, 'parent__id__in': perms} if perms != 'All': file_qs = file_qs.filter( Q(folder__id__in=perms) | Q(owner=request.user)) folder_qs = folder_qs.filter( Q(id__in=perms) | Q(owner=request.user)) else: root_exclude_kw.pop('parent__id__in') if folder.is_root: folder_qs = folder_qs.exclude(**root_exclude_kw) folder_children += folder_qs folder_files += file_qs try: permissions = { 'has_edit_permission': folder.has_edit_permission(request), 'has_read_permission': folder.has_read_permission(request), 'has_add_children_permission': folder.has_add_children_permission(request), } except: permissions = {} if order_by is None or len(order_by) == 0: folder_files.sort() items = folder_children + folder_files items_permissions = [ (item, {'change': self.has_change_permission(request, item)}) for item in items] paginator = Paginator(items_permissions, settings.MEDIA_PAGINATE_BY) # Are we moving to clipboard? if request.method == 'POST' and '_save' not in request.POST: for f in folder_files: if "move-to-clipboard-%d" % (f.id,) in request.POST: clipboard = tools.get_user_clipboard(request.user) if f.has_edit_permission(request): tools.move_file_to_clipboard([f], clipboard) return HttpResponseRedirect(request.get_full_path()) else: raise PermissionDenied selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) # Actions with no confirmation if (actions and request.method == 'POST' and 'index' in request.POST and '_save' not in request.POST): if selected: response = self.response_action( request, files_queryset=file_qs, folders_queryset=folder_qs) if response: return response else: msg = _("Items must be selected in order to perform " "actions on them. No items have been changed.") self.message_user(request, msg) # Actions with confirmation if (actions and request.method == 'POST' and helpers.ACTION_CHECKBOX_NAME in request.POST and 'index' not in request.POST and '_save' not in request.POST): if selected: response = self.response_action( request, files_queryset=file_qs, folders_queryset=folder_qs) if response: return response # Build the action form and populate it with available actions. if actions: action_form = self.action_form(auto_id=None) action_form.fields[ 'action'].choices = self.get_action_choices(request) else: action_form = None selection_note_all = ungettext('%(total_count)s selected', 'All %(total_count)s selected', paginator.count) # If page request (9999) is out of range, deliver last page of results. try: paginated_items = paginator.page(request.GET.get('page', 1)) except PageNotAnInteger: paginated_items = paginator.page(1) except EmptyPage: paginated_items = paginator.page(paginator.num_pages) return render_to_response( self.directory_listing_template, { 'folder': folder, 'clipboard_files': File.objects.filter( in_clipboards__clipboarditem__clipboard__user=request.user ).distinct(), 'paginator': paginator, 'paginated_items': paginated_items, # [(item, item_perms), ] 'permissions': permissions, 'permstest': userperms_for_request(folder, request), 'current_url': request.path, 'title': 'Directory listing for %s' % folder.name, 'search_string': ' '.join(search_terms), 'q': urlquote(q), 'show_result_count': show_result_count, 'limit_search_to_folder': limit_search_to_folder, 'is_popup': popup_status(request), 'select_folder': selectfolder_status(request), # needed in the admin/base.html template for logout links 'root_path': reverse('admin:index'), 'action_form': action_form, 'actions_on_top': self.actions_on_top, 'actions_on_bottom': self.actions_on_bottom, 'actions_selection_counter': self.actions_selection_counter, 'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(paginated_items.object_list)}, 'selection_note_all': selection_note_all % {'total_count': paginator.count}, 'media': self.media, 'enable_permissions': settings.FILER_ENABLE_PERMISSIONS, 'can_make_folder': request.user.is_superuser or \ (folder.is_root and settings.FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS) or \ permissions.get("has_add_children_permission"), }, context_instance=RequestContext(request))
ValueError
dataset/ETHPy150Open django-leonardo/django-leonardo/leonardo/module/media/admin/folder/admin.py/FolderAdmin.directory_listing
2,872
@property def owner_search_fields(self): """ Returns all the fields that are CharFields except for password from the User model. For the built-in User model, that means username, first_name, last_name, and email. """ try: from django.contrib.auth import get_user_model except __HOLE__: # Django < 1.5 from django.contrib.auth.models import User else: User = get_user_model() return [ field.name for field in User._meta.fields if isinstance(field, models.CharField) and field.name != 'password' ]
ImportError
dataset/ETHPy150Open django-leonardo/django-leonardo/leonardo/module/media/admin/folder/admin.py/FolderAdmin.owner_search_fields
2,873
def response_action(self, request, files_queryset, folders_queryset): """ Handle an admin action. This is called if a request is POSTed to the changelist; it returns an HttpResponse if the action was handled, and None otherwise. """ # There can be multiple action forms on the page (at the top # and bottom of the change list, for example). Get the action # whose button was pushed. try: action_index = int(request.POST.get('index', 0)) except ValueError: action_index = 0 # Construct the action form. data = request.POST.copy() data.pop(helpers.ACTION_CHECKBOX_NAME, None) data.pop("index", None) # Use the action whose button was pushed try: data.update({'action': data.getlist('action')[action_index]}) except __HOLE__: # If we didn't get an action from the chosen form that's invalid # POST data, so by deleting action it'll fail the validation check # below. So no need to do anything here pass action_form = self.action_form(data, auto_id=None) action_form.fields['action'].choices = self.get_action_choices(request) # If the form's valid we can handle the action. if action_form.is_valid(): action = action_form.cleaned_data['action'] select_across = action_form.cleaned_data['select_across'] func, name, description = self.get_actions(request)[action] # Get the list of selected PKs. If nothing's selected, we can't # perform an action on it, so bail. Except we want to perform # the action explicitly on all objects. selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) if not selected and not select_across: # Reminder that something needs to be selected or nothing will # happen msg = _("Items must be selected in order to perform " "actions on them. No items have been changed.") self.message_user(request, msg) return None if not select_across: selected_files = [] selected_folders = [] for pk in selected: if pk[:5] == "file-": selected_files.append(pk[5:]) else: selected_folders.append(pk[7:]) # Perform the action only on the selected objects files_queryset = files_queryset.filter(pk__in=selected_files) folders_queryset = folders_queryset.filter( pk__in=selected_folders) response = func(self, request, files_queryset, folders_queryset) # Actions may return an HttpResponse, which will be used as the # response from the POST. If not, we'll be a good little HTTP # citizen and redirect back to the changelist page. if isinstance(response, HttpResponse): return response else: return HttpResponseRedirect(request.get_full_path()) else: msg = _("No action selected.") self.message_user(request, msg) return None
IndexError
dataset/ETHPy150Open django-leonardo/django-leonardo/leonardo/module/media/admin/folder/admin.py/FolderAdmin.response_action
2,874
def copy_files_and_folders(self, request, files_queryset, folders_queryset): opts = self.model._meta app_label = opts.app_label current_folder = self._get_current_action_folder( request, files_queryset, folders_queryset) perms_needed = self._check_copy_perms( request, files_queryset, folders_queryset) to_copy = self._list_all_to_copy_or_move( request, files_queryset, folders_queryset) folders = self._list_all_destination_folders( request, folders_queryset, current_folder, False) if request.method == 'POST' and request.POST.get('post'): if perms_needed: raise PermissionDenied form = CopyFilesAndFoldersForm(request.POST) if form.is_valid(): try: destination = Folder.objects.get( pk=request.POST.get('destination')) except Folder.DoesNotExist: raise PermissionDenied folders_dict = dict(folders) if destination not in folders_dict or not folders_dict[destination][1]: raise PermissionDenied if files_queryset.count() + folders_queryset.count(): # We count all files and folders here (recursivelly) n = self._copy_files_and_folders_impl( files_queryset, folders_queryset, destination, form.cleaned_data['suffix'], False) self.message_user(request, _("Successfully copied %(count)d files and/or folders to folder '%(destination)s'.") % { "count": n, "destination": destination, }) return None else: form = CopyFilesAndFoldersForm() try: selected_destination_folder = int( request.POST.get('destination', 0)) except __HOLE__: if current_folder: selected_destination_folder = current_folder.pk else: selected_destination_folder = 0 context = { "title": _("Copy files and/or folders"), "instance": current_folder, "breadcrumbs_action": _("Copy files and/or folders"), "to_copy": to_copy, "destination_folders": folders, "selected_destination_folder": selected_destination_folder, "copy_form": form, "files_queryset": files_queryset, "folders_queryset": folders_queryset, "perms_lacking": perms_needed, "opts": opts, 'is_popup': popup_status(request), "root_path": reverse('admin:index'), "app_label": app_label, "action_checkbox_name": helpers.ACTION_CHECKBOX_NAME, } # Display the destination folder selection page return render_to_response([ "admin/media/folder/choose_copy_destination.html" ], context, context_instance=template.RequestContext(request))
ValueError
dataset/ETHPy150Open django-leonardo/django-leonardo/leonardo/module/media/admin/folder/admin.py/FolderAdmin.copy_files_and_folders
2,875
def get_total_count(self): """ Calling `len()` on a QuerySet would execute the whole SELECT. See `/blog/2012/0124` """ di = self.data_iterator if isinstance(di, QuerySet): return di.count() #~ if di is None: #~ raise Exception("data_iterator is None: %s" % self) if False: return len(di) else: try: return len(di) except __HOLE__: raise TypeError("{0} has no length".format(di))
TypeError
dataset/ETHPy150Open lsaffre/lino/lino/core/tablerequest.py/TableRequest.get_total_count
2,876
def parse_req(self, request, rqdata, **kw): """Parse the incoming HttpRequest and translate it into keyword arguments to be used by :meth:`setup`. The `mt` url param is parsed only when needed. Usually it is not needed because the `master_class` is constant and known per actor. But there are exceptions: - `master` is `ContentType` - `master` is some abstract model - `master` is not a subclass of Model, e.g. :class:`lino.modlib.polls.models.AnswersByResponse`, a virtual table which defines :meth:`get_row_by_pk <lino.core.actors.Actor.get_row_by_pk>`. """ # logger.info("20120723 %s.parse_req() %s", self.actor, rqdata) #~ rh = self.ah master = kw.get('master', self.actor.master) if master is not None: if not isinstance(master, type): raise Exception("20150216 not a type: %r" % master) if settings.SITE.is_installed('contenttypes'): from django.contrib.contenttypes.models import ContentType if issubclass(master, models.Model) and ( master is ContentType or master._meta.abstract): mt = rqdata.get(constants.URL_PARAM_MASTER_TYPE) try: master = kw['master'] = ContentType.objects.get( pk=mt).model_class() except ContentType.DoesNotExist: pass # master is None if 'master_instance' not in kw: pk = rqdata.get(constants.URL_PARAM_MASTER_PK, None) #~ print '20100406a', self.actor,URL_PARAM_MASTER_PK,"=",pk #~ if pk in ('', '-99999'): if pk == '': pk = None if pk is None: kw['master_instance'] = None else: mi = self.actor.get_master_instance(self, master, pk) if mi is None: raise ObjectDoesNotExist( "Invalid master key {0} for {1}".format( pk, self.actor)) kw['master_instance'] = mi # ~ print '20100212', self #, kw['master_instance'] #~ print '20100406b', self.actor,kw if settings.SITE.use_filterRow: exclude = dict() for f in self.ah.store.fields: if f.field: filterOption = rqdata.get( 'filter[%s_filterOption]' % f.field.name) if filterOption == 'empty': kw[f.field.name + "__isnull"] = True elif filterOption == 'notempty': kw[f.field.name + "__isnull"] = False else: filterValue = rqdata.get('filter[%s]' % f.field.name) if filterValue: if not filterOption: filterOption = 'contains' if filterOption == 'contains': kw[f.field.name + "__icontains"] = filterValue elif filterOption == 'doesnotcontain': exclude[f.field.name + "__icontains"] = filterValue else: print("unknown filterOption %r" % filterOption) if len(exclude): kw.update(exclude=exclude) if settings.SITE.use_gridfilters: filter = rqdata.get(constants.URL_PARAM_GRIDFILTER, None) if filter is not None: filter = json.loads(filter) kw['gridfilters'] = [constants.dict2kw(flt) for flt in filter] kw = ActionRequest.parse_req(self, request, rqdata, **kw) #~ raise Exception("20120121 %s.parse_req(%s)" % (self,kw)) #~ kw.update(self.report.known_values) #~ for fieldname, default in self.report.known_values.items(): #~ v = request.REQUEST.get(fieldname,None) #~ if v is not None: #~ kw[fieldname] = v quick_search = rqdata.get(constants.URL_PARAM_FILTER, None) if quick_search: kw.update(quick_search=quick_search) sort = rqdata.get(constants.URL_PARAM_SORT, None) if sort: sort_dir = rqdata.get(constants.URL_PARAM_SORTDIR, 'ASC') if sort_dir == 'DESC': sort = '-' + sort kw.update(order_by=[sort]) try: offset = rqdata.get(constants.URL_PARAM_START, None) if offset: kw.update(offset=int(offset)) limit = rqdata.get( constants.URL_PARAM_LIMIT, self.actor.preview_limit) if limit: kw.update(limit=int(limit)) except __HOLE__: # Example: invalid literal for int() with base 10: # 'fdpkvcnrfdybhur' raise SuspiciousOperation("Invalid value for limit or offset") return self.actor.parse_req(request, rqdata, **kw)
ValueError
dataset/ETHPy150Open lsaffre/lino/lino/core/tablerequest.py/TableRequest.parse_req
2,877
def get_latlng(address): address_query = urllib.quote(address, '') values = { 'format' : 'json', 'sensor' : 'false', 'address' : address_query, } url = GOOGLE_GEOCODING_API_GEOCODE_URL % values response = requests.get(url) data = json.loads(response.text) try: location = data['results'][0]['geometry']['location'] latitude = location['lat'] longitude = location['lng'] except __HOLE__, k: latitude = None longitude = None return (latitude, longitude,)
KeyError
dataset/ETHPy150Open hacktoolkit/hacktoolkit/apis/google/geocode/geocode.py/get_latlng
2,878
def reverse_geocode(latitude, longitude): values = { 'format' : 'json', 'sensor' : 'false', 'latlng' : '%s,%s' % (latitude, longitude,) } url = GOOGLE_GEOCODING_API_REVERSE_URL % values response = requests.get(url) data = json.loads(response.text) try: location = data['results'][0] address = location['formatted_address'] except __HOLE__, k: address = None return address
KeyError
dataset/ETHPy150Open hacktoolkit/hacktoolkit/apis/google/geocode/geocode.py/reverse_geocode
2,879
def doJoin(self, irc, msg): channel = msg.args[0] if ircutils.strEqual(irc.nick, msg.nick): return if not self.registryValue('enable', channel): return fallthrough = self.registryValue('fallthrough', channel) def do(type): cap = ircdb.makeChannelCapability(channel, type) cap_auto = ircdb.makeChannelCapability(channel, 'auto'+type) try: apply_mode = ircdb.checkCapability(msg.prefix, cap, ignoreOwner=not self.registryValue('owner'), ignoreChannelOp=True, ignoreDefaultAllow=True) except KeyError: apply_mode = False if self.registryValue('alternativeCapabilities', channel): try: override = ircdb.checkCapability(msg.prefix, cap_auto, ignoreOwner=not self.registryValue('owner'), ignoreChannelOp=True, ignoreDefaultAllow=True) except __HOLE__: override = False else: override = False if apply_mode or override: if override or self.registryValue(type, channel): self.log.info('Scheduling auto-%s of %s in %s.', type, msg.prefix, channel) def dismiss(): """Determines whether or not a mode has already been applied.""" l = getattr(irc.state.channels[channel], type+'s') return (msg.nick in l) msgmaker = getattr(ircmsgs, type) schedule_msg(msgmaker(channel, msg.nick), dismiss) raise Continue # Even if fallthrough, let's only do one. elif not fallthrough: self.log.debug('%s has %s, but supybot.plugins.AutoMode.%s' ' is not enabled in %s, refusing to fall ' 'through.', msg.prefix, cap, type, channel) raise Continue def schedule_msg(msg, dismiss): def f(): if not dismiss(): irc.queueMsg(msg) else: self.log.info('Dismissing auto-mode for %s.', msg.args[2]) delay = self.registryValue('delay', channel) if delay: schedule.addEvent(f, time.time() + delay) else: f() def extra_modes(): try: user = ircdb.users.getUser(ircdb.users.getUserId(msg.prefix)) except KeyError: return pattern = re.compile('-|\+') for item in self.registryValue('extra', channel): try: username, modes = pattern.split(item, maxsplit=1) modes = item[len(username)] + modes except ValueError: # No - or + in item log.error(('%r is not a valid item for ' 'supybot.plugins.AutoMode.extra') % item) continue if username != user.name: continue else: self.log.info('Scheduling auto-modes %s of %s in %s.', modes, msg.prefix, channel) modes = [modes] + \ ([msg.nick]*len(pattern.sub('', modes))) schedule_msg(ircmsgs.mode(channel, modes), lambda :False) break try: do('op') if 'h' in irc.state.supported['prefix']: do('halfop') except Continue: return finally: extra_modes() c = ircdb.channels.getChannel(channel) if c.checkBan(msg.prefix) and self.registryValue('ban', channel): period = self.registryValue('ban.period', channel) if period: def unban(): try: if msg.prefix in irc.state.channels[channel].bans: irc.queueMsg(ircmsgs.unban(channel, msg.prefix)) except KeyError: # We're not in the channel anymore. pass schedule.addEvent(unban, time.time()+period) banmask =conf.supybot.protocols.irc.banmask.makeBanmask(msg.prefix) irc.queueMsg(ircmsgs.ban(channel, banmask)) irc.queueMsg(ircmsgs.kick(channel, msg.nick)) try: do('voice') except Continue: return
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/AutoMode/plugin.py/AutoMode.doJoin
2,880
def _delete_files(): # we don't know the precise name the underlying database uses # so we use glob to locate all names for f in glob.glob(_fname + "*"): try: os.unlink(f) except __HOLE__: pass
OSError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_anydbm.py/_delete_files
2,881
def locate_imported_file(self, source_dir, import_path): """ Locate the imported file in the source directory. Return the path to the imported file relative to STATIC_ROOT :param source_dir: source directory :type source_dir: str :param import_path: path to the imported file :type import_path: str :returns: str """ path = posixpath.normpath(posixpath.join(source_dir, import_path)) try: self.get_full_source_path(path) except __HOLE__: raise exceptions.StaticCompilationError( "Can't locate the imported file: {0}".format(import_path) ) return path
ValueError
dataset/ETHPy150Open andreyfedoseev/django-static-precompiler/static_precompiler/compilers/stylus.py/Stylus.locate_imported_file
2,882
def find_dependencies(self, source_path): source = self.get_source(source_path) source_dir = posixpath.dirname(source_path) dependencies = set() imported_files = set() for import_path in self.find_imports(source): if import_path.endswith(".styl"): # @import "foo.styl" imported_files.add(self.locate_imported_file(source_dir, import_path)) elif import_path.endswith("/*"): # @import "foo/*" imported_dir = posixpath.join(source_dir, import_path[:-2]) try: imported_dir_full_path = self.get_full_source_path(imported_dir) except ValueError: raise exceptions.StaticCompilationError( "Can't locate the imported directory: {0}".format(import_path) ) if not os.path.isdir(imported_dir_full_path): raise exceptions.StaticCompilationError( "Imported path is not a directory: {0}".format(import_path) ) for filename in os.listdir(imported_dir_full_path): if filename.endswith(".styl"): imported_files.add(self.locate_imported_file(imported_dir, filename)) else: try: # @import "foo" -> @import "foo/index.styl" imported_dir = posixpath.join(source_dir, import_path) imported_dir_full_path = self.get_full_source_path(imported_dir) if os.path.isdir(imported_dir_full_path): imported_files.add(self.locate_imported_file(imported_dir, "index.styl")) except __HOLE__: # @import "foo" -> @import "foo.styl" imported_files.add(self.locate_imported_file(source_dir, import_path + ".styl")) dependencies.update(imported_files) for imported_file in imported_files: dependencies.update(self.find_dependencies(imported_file)) return sorted(dependencies)
ValueError
dataset/ETHPy150Open andreyfedoseev/django-static-precompiler/static_precompiler/compilers/stylus.py/Stylus.find_dependencies
2,883
@access.public def getRelevantAds(self, params): AD_LIMIT = 20 def sort_documents_by_url(documents): return sorted(documents, key=lambda x: x['url']) # @todo this assumes all URLs returned from Solr will properly urlparse def group_documents_by_domain(documents): return itertools.groupby(sort_documents_by_url(documents), lambda doc: urlparse(doc['url']).netloc) try: result = requests.get(os.environ['IMAGE_SPACE_SOLR'] + '/select', params={ 'wt': 'json', 'q': 'outpaths:"%s"' % params['solr_image_id'], 'fl': 'id,url', 'rows': str(AD_LIMIT) }, verify=False).json() except __HOLE__: return { 'numFound': 0, 'docs': [] } response = { 'numFound': result['response']['numFound'], 'docs': result['response']['docs'], 'groupedDocs': [] } for (domain, documents) in group_documents_by_domain(response['docs']): response['groupedDocs'].append([domain, list(documents)]) # Display the domain with the largest number of documents first response['groupedDocs'] = sorted(response['groupedDocs'], key=lambda (_, docs): len(docs), reverse=True) return response
ValueError
dataset/ETHPy150Open memex-explorer/image_space/imagespace/server/imagesearch_rest.py/ImageSearch.getRelevantAds
2,884
def _imageSearch(self, params): limit = params['limit'] if 'limit' in params else '100' query = params['query'] if 'query' in params else '*:*' offset = params['offset'] if 'offset' in params else '0' classifications = json.loads(params['classifications']) if 'classifications' in params else [] base = os.environ['IMAGE_SPACE_SOLR'] + '/select' if classifications: query += ' AND (%s)' % ' OR '.join(['%s:[.7 TO *]' % key for key in classifications]) qparams = { 'wt': 'json', 'hl': 'true', 'hl.fl': '*', 'q': query, 'start': offset, 'rows': limit } # Give plugins a chance to adjust the Solr query parameters event = events.trigger('imagespace.imagesearch.qparams', qparams) for response in event.responses: qparams = response try: result = requests.get(base, params=qparams, verify=False).json() except __HOLE__: return [] for image in result['response']['docs']: image['highlight'] = result['highlighting'][image['id']] response = { 'numFound': result['response']['numFound'], 'docs': result['response']['docs'] } # Give plugins a chance to adjust the end response of the imagesearch event = events.trigger('imagespace.imagesearch.results', response) for eventResponse in event.responses: response = eventResponse return response
ValueError
dataset/ETHPy150Open memex-explorer/image_space/imagespace/server/imagesearch_rest.py/ImageSearch._imageSearch
2,885
def printException (ex_args): data = {} try: if isinstance(ex_args, tuple): data[ex_args[0]] = ex_args[1] else: data['state'] = ex_args[0]['state'] if ex_args[0]['data']: for k,v in ex_args[0]['data'].items(): if k in ['ip', 'start', 'sender', 'recipients', 'subject']: data[k] = v except IndexError as i: pass except __HOLE__ as k: pass print 'Exception:', time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), data
KeyError
dataset/ETHPy150Open dpapathanasiou/intelligent-smtp-responder/server/smtp_server.py/printException
2,886
def data (cargo): stream = cargo[0] email_data = cargo[1] with_stream_write (stream, '354 End data with \\r\\n.\\r\\n'+cr_lf) contents = [] client_error = False subject = None while 1: client_msg = with_stream_read (stream) if client_is_idle(email_data['start']): client_error = True break elif end_pattern.search(client_msg.rstrip()): break else: contents.append(client_msg) if subject is None: if subject_pattern.search(client_msg): try: subject = filter(lambda x: x!='', re.split(subject_pattern, client_msg))[0].lstrip() email_data['subject'] = get_base_subject(subject) if not valid_subject(subject): client_error = True break except __HOLE__: pass if client_error or len(contents) == 0: with_stream_write (stream, bad_request+cr_lf) return ('done', cargo) else: with_stream_write (stream, '250 Ok: queued'+cr_lf) email_data['contents'] = ''.join(contents) return ('process', (stream, email_data))
IndexError
dataset/ETHPy150Open dpapathanasiou/intelligent-smtp-responder/server/smtp_server.py/data
2,887
@test.raises(TypeError) def test_wrap_int(self): text = int('1' * 80) try: new_text = misc.wrap(text, width=5) except __HOLE__ as e: self.eq(e.args[0], "Argument `text` must be one of [str, unicode].") raise
TypeError
dataset/ETHPy150Open datafolklabs/cement/tests/utils/misc_tests.py/BackendTestCase.test_wrap_int
2,888
@test.raises(TypeError) def test_wrap_none(self): text = None try: new_text = misc.wrap(text, width=5) except __HOLE__ as e: self.eq(e.args[0], "Argument `text` must be one of [str, unicode].") raise
TypeError
dataset/ETHPy150Open datafolklabs/cement/tests/utils/misc_tests.py/BackendTestCase.test_wrap_none
2,889
def oai_process_pcurio(*args): identifiers = helpers.gather_identifiers(args) provider_uris, object_uris = helpers.seperate_provider_object_uris(identifiers) for i, uri in enumerate(provider_uris): if 'resultadon' in uri: doc_id = provider_uris[i].replace('http://www.maxwell.vrac.puc-rio.br/Busca_etds.php?strSecao=resultadonrSeq=', '') provider_uris[i] = 'http://www.maxwell.vrac.puc-rio.br/Busca_etds.php?strSecao=resultado&nrSeq=' + doc_id for i, uri in enumerate(object_uris): if 'resultadon' in uri: doc_id = object_uris[i].replace('http://www.maxwell.vrac.puc-rio.br/Busca_etds.php?strSecao=resultadonrSeq=', '') object_uris[i] = 'http://www.maxwell.vrac.puc-rio.br/Busca_etds.php?strSecao=resultado&nrSeq=' + doc_id potential_uris = (provider_uris + object_uris) try: canonical_uri = potential_uris[0] except __HOLE__: raise ValueError('No Canonical URI was returned for this record.') return { 'canonicalUri': canonical_uri, 'objectUris': object_uris, 'providerUris': provider_uris }
IndexError
dataset/ETHPy150Open CenterForOpenScience/scrapi/scrapi/harvesters/pcurio.py/oai_process_pcurio
2,890
def _run_log(self, spec): def parse_int_list(s): """Parses strings like '[1, 2, 3]'.""" s = s.strip() assert s[0] == '[' and s[-1] == ']', s if not s[1:-1].strip(): return [] else: return [int(x) for x in s[1:-1].split(',')] def split_args(s): """Splits 'a, b, [c, d]' into ['a', 'b', '[c, d]'].""" args = [] start = 0 depth = 0 for ix in xrange(len(s)): c = s[ix] if c in '({[': depth += 1 elif c in ')}]': depth -= 1 elif c == ',' and depth == 0: args.append(s[start:ix].strip()) start = ix + 1 assert depth == 0, s args.append(s[start:].strip()) return args def parse(s, names): """Parse (recursive) 'Foo(arg, kw=arg)' for Foo in the names dict.""" s = s.strip() if s in names: return names[s] elif s[0] == '[': return parse_int_list(s) elif '(' in s: assert s[-1] == ')', s callee = parse(s[:s.index('(')], names) posargs = [] kwargs = {} for arg in split_args(s[s.index('(') + 1:-1]): if '=' in arg: kw, value = arg.split('=', 1) kwargs[kw] = parse(value, names) else: posargs.append(parse(arg, names)) return callee(*posargs, **kwargs) else: try: return int(s) except __HOLE__: raise ValueError('Unknown function: %s' % s) def parse_fn(s, names): """Like parse(), but implicitly calls no-arg constructors.""" fn = parse(s, names) if isinstance(fn, type): return fn() else: return fn # pylint: disable=g-import-not-at-top from google.cloud.dataflow.transforms import window as window_module from google.cloud.dataflow.transforms import trigger as trigger_module # pylint: enable=g-import-not-at-top window_fn_names = dict(window_module.__dict__) window_fn_names.update({'CustomTimestampingFixedWindowsWindowFn': CustomTimestampingFixedWindowsWindowFn}) trigger_names = {'Default': DefaultTrigger} trigger_names.update(trigger_module.__dict__) window_fn = parse_fn(spec.get('window_fn', 'GlobalWindows'), window_fn_names) trigger_fn = parse_fn(spec.get('trigger_fn', 'Default'), trigger_names) accumulation_mode = getattr( AccumulationMode, spec.get('accumulation_mode', 'ACCUMULATING').upper()) output_time_fn = getattr( OutputTimeFn, spec.get('output_time_fn', 'OUTPUT_AT_EOW').upper()) allowed_lateness = float(spec.get('allowed_lateness', '-inf')) driver = GeneralTriggerDriver( Windowing(window_fn, trigger_fn, accumulation_mode, output_time_fn)) state = InMemoryUnmergedState() output = [] watermark = MIN_TIMESTAMP def fire_timers(): to_fire = state.get_and_clear_timers(watermark) while to_fire: for timer_window, (name, time_domain, t_timestamp) in to_fire: for wvalue in driver.process_timer( timer_window, name, time_domain, t_timestamp, state): window, = wvalue.windows output.append({'window': [window.start, window.end - 1], 'values': sorted(wvalue.value), 'timestamp': wvalue.timestamp}) to_fire = state.get_and_clear_timers(watermark) for line in spec['transcript']: action, params = line.items()[0] if action != 'expect': # Fail if we have output that was not expected in the transcript. self.assertEquals( [], output, msg='Unexpected output: %s before %s' % (output, line)) if action == 'input': bundle = [ WindowedValue(t, t, window_fn.assign(WindowFn.AssignContext(t, t))) for t in params] output = [{'window': [wvalue.windows[0].start, wvalue.windows[0].end - 1], 'values': sorted(wvalue.value), 'timestamp': wvalue.timestamp} for wvalue in driver.process_elements(state, bundle, watermark)] fire_timers() elif action == 'watermark': watermark = params fire_timers() elif action == 'expect': for expected_output in params: for candidate in output: if all(candidate[k] == expected_output[k] for k in candidate if k in expected_output): output.remove(candidate) break else: self.fail('Unmatched output %s in %s' % (expected_output, output)) elif action == 'state': # TODO(robertwb): Implement once we support allowed lateness. pass else: self.fail('Unknown action: ' + action) # Fail if we have output that was not expected in the transcript. self.assertEquals([], output, msg='Unexpected output: %s' % output)
ValueError
dataset/ETHPy150Open GoogleCloudPlatform/DataflowPythonSDK/google/cloud/dataflow/transforms/trigger_test.py/TranscriptTest._run_log
2,891
def scan_command_buffers(state): try: state.expect(EOF) except __HOLE__: raise VimError('trailing characters') return None, [TokenCommandBuffers(), TokenEof()]
ValueError
dataset/ETHPy150Open guillermooo/Vintageous/ex/parser/scanner_command_buffers.py/scan_command_buffers
2,892
def datetime_u(s): fmt = "%Y-%m-%dT%H:%M:%S" try: return _strptime(s, fmt) except __HOLE__: try: # strip utc offset if s[-3] == ":" and s[-6] in (' ', '-', '+'): warnings.warn('removing unsupported UTC offset', RuntimeWarning) s = s[:-6] # parse microseconds try: return _strptime(s, fmt + ".%f") except: return _strptime(s, fmt) except ValueError: # strip microseconds (not supported in this platform) if "." in s: warnings.warn('removing unsuppported microseconds', RuntimeWarning) s = s[:s.index(".")] return _strptime(s, fmt)
ValueError
dataset/ETHPy150Open uwdata/termite-visualizations/web2py/gluon/contrib/pysimplesoap/simplexml.py/datetime_u
2,893
def get_namespace_uri(self, ns): "Return the namespace uri for a prefix" element = self._element while element is not None and element.attributes is not None: try: return element.attributes['xmlns:%s' % ns].value except __HOLE__: element = element.parentNode
KeyError
dataset/ETHPy150Open uwdata/termite-visualizations/web2py/gluon/contrib/pysimplesoap/simplexml.py/SimpleXMLElement.get_namespace_uri
2,894
def __call__(self, tag=None, ns=None, children=False, root=False, error=True, ): "Search (even in child nodes) and return a child tag by name" try: if root: # return entire document return SimpleXMLElement( elements=[self.__document.documentElement], document=self.__document, namespace=self.__ns, prefix=self.__prefix, namespaces_map=self.__namespaces_map ) if tag is None: # if no name given, iterate over siblings (same level) return self.__iter__() if children: # future: filter children? by ns? return self.children() elements = None if isinstance(tag, int): # return tag by index elements=[self.__elements[tag]] if ns and not elements: for ns_uri in isinstance(ns, (tuple, list)) and ns or (ns, ): log.debug('searching %s by ns=%s', tag, ns_uri) elements = self._element.getElementsByTagNameNS(ns_uri, tag) if elements: break if self.__ns and not elements: log.debug('searching %s by ns=%s', tag, self.__ns) elements = self._element.getElementsByTagNameNS(self.__ns, tag) if not elements: log.debug('searching %s', tag) elements = self._element.getElementsByTagName(tag) if not elements: #log.debug(self._element.toxml()) if error: raise AttributeError(u"No elements found") else: return return SimpleXMLElement( elements=elements, document=self.__document, namespace=self.__ns, prefix=self.__prefix, namespaces_map=self.__namespaces_map) except __HOLE__, e: raise AttributeError(u"Tag not found: %s (%s)" % (tag, unicode(e)))
AttributeError
dataset/ETHPy150Open uwdata/termite-visualizations/web2py/gluon/contrib/pysimplesoap/simplexml.py/SimpleXMLElement.__call__
2,895
def unmarshall(self, types, strict=True): "Convert to python values the current serialized xml element" # types is a dict of {tag name: convertion function} # strict=False to use default type conversion if not specified # example: types={'p': {'a': int,'b': int}, 'c': [{'d':str}]} # expected xml: <p><a>1</a><b>2</b></p><c><d>hola</d><d>chau</d> # returnde value: {'p': {'a':1,'b':2}, `'c':[{'d':'hola'},{'d':'chau'}]} d = {} for node in self(): name = str(node.get_local_name()) ref_name_type = None # handle multirefs: href="#id0" if 'href' in node.attributes().keys(): href = node['href'][1:] for ref_node in self(root=True)("multiRef"): if ref_node['id'] == href: node = ref_node ref_name_type = ref_node['xsi:type'].split(":")[1] break try: fn = types[name] except (KeyError, ), e: if node.get_namespace_uri("soapenc"): fn = None # ignore multirefs! elif 'xsi:type' in node.attributes().keys(): xsd_type = node['xsi:type'].split(":")[1] fn = REVERSE_TYPE_MAP[xsd_type] elif strict: raise TypeError(u"Tag: %s invalid (type not found)" % (name,)) else: # if not strict, use default type conversion fn = unicode if isinstance(fn, list): # append to existing list (if any) - unnested dict arrays - value = d.setdefault(name, []) children = node.children() for child in (children and children() or []): # Readability counts value.append(child.unmarshall(fn[0], strict)) elif isinstance(fn, tuple): value = [] _d = {} children = node.children() as_dict = len(fn) == 1 and isinstance(fn[0], dict) for child in (children and children() or []): # Readability counts if as_dict: _d.update(child.unmarshall(fn[0], strict)) # Merging pairs else: value.append(child.unmarshall(fn[0], strict)) if as_dict: value.append(_d) if name in d: _tmp = list(d[name]) _tmp.extend(value) value = tuple(_tmp) else: value = tuple(value) elif isinstance(fn, dict): ##if ref_name_type is not None: ## fn = fn[ref_name_type] children = node.children() value = children and children.unmarshall(fn, strict) else: if fn is None: # xsd:anyType not unmarshalled value = node elif str(node) or fn == str: try: # get special deserialization function (if any) fn = TYPE_UNMARSHAL_FN.get(fn,fn) if fn == str: # always return an unicode object: value = unicode(node) else: value = fn(unicode(node)) except (ValueError, __HOLE__), e: raise ValueError(u"Tag: %s: %s" % (name, unicode(e))) else: value = None d[name] = value return d
TypeError
dataset/ETHPy150Open uwdata/termite-visualizations/web2py/gluon/contrib/pysimplesoap/simplexml.py/SimpleXMLElement.unmarshall
2,896
def _update_ns(self, name): """Replace the defined namespace alias with tohse used by the client.""" pref = self.__ns_rx.search(name) if pref: pref = pref.groups()[0] try: name = name.replace(pref, self.__namespaces_map[pref]) except __HOLE__: log.warning('Unknown namespace alias %s' % name) return name
KeyError
dataset/ETHPy150Open uwdata/termite-visualizations/web2py/gluon/contrib/pysimplesoap/simplexml.py/SimpleXMLElement._update_ns
2,897
def make_algorithm(self, algorithm_name): try: return self.jws_algorithms[algorithm_name] except __HOLE__: raise NotImplementedError('Algorithm not supported')
KeyError
dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/itsdangerous.py/JSONWebSignatureSerializer.make_algorithm
2,898
def test_all(self): # Blacklisted modules and packages blacklist = set([ # Will raise a SyntaxError when compiling the exec statement '__future__', ]) if not sys.platform.startswith('java'): # In case _socket fails to build, make this test fail more gracefully # than an AttributeError somewhere deep in CGIHTTPServer. import _socket # rlcompleter needs special consideration; it import readline which # initializes GNU readline which calls setlocale(LC_CTYPE, "")... :-( try: import rlcompleter import locale except __HOLE__: pass else: locale.setlocale(locale.LC_CTYPE, 'C') ignored = [] failed_imports = [] lib_dir = os.path.dirname(os.path.dirname(__file__)) for path, modname in self.walk_modules(lib_dir, ""): m = modname blacklisted = False while m: if m in blacklist: blacklisted = True break m = m.rpartition('.')[0] if blacklisted: continue if support.verbose: print(modname) try: # This heuristic speeds up the process by removing, de facto, # most test modules (and avoiding the auto-executing ones). with open(path, "rb") as f: if b"__all__" not in f.read(): raise NoAll(modname) self.check_all(modname) except NoAll: ignored.append(modname) except FailedImport: failed_imports.append(modname) if support.verbose: print('Following modules have no __all__ and have been ignored:', ignored) print('Following modules failed to be imported:', failed_imports)
ImportError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test___all__.py/AllTest.test_all
2,899
def setup(self): try: extension = self._config['extension_url'] KUBERNETES_API_URL = self._config['kubernetes_api_url'] + extension user = self._config['user'] password = self._config['password'] verify = self._config['verify'] except __HOLE__: self._log.exception('Configuration file does not contain required fields.') raise self._log.debug('Connecting to Kubernetes endpoint %s via api_client.' % KUBERNETES_API_URL) self.client = requests.get(KUBERNETES_API_URL, auth=HTTPBasicAuth(user, password), verify=verify, stream=True)
KeyError
dataset/ETHPy150Open StackStorm/st2contrib/packs/kubernetes/sensors/third_party_resource.py/ThirdPartyResource.setup