Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
4,100
def main(): from optparse import OptionParser parser = OptionParser(usage = "usage: %prog [options] file(s)", version = __version__) parser.add_option( "-f", "--file", metavar = "FILE", dest = "tagfile", default = "tags", help = 'Write tags into FILE (default: "tags"). Use "-" to write ' 'tags to stdout.') parser.add_option( "", "--sort", metavar="[yes|foldcase|no]", dest = "sort", choices = ["yes", "no", "foldcase"], default = "yes", help = 'Produce sorted output. Acceptable values are "yes", ' '"no", and "foldcase". Default is "yes".') options, args = parser.parse_args() if options.tagfile == '-': output = sys.stdout else: output = open(options.tagfile, 'wb') for filename in args: f = open(filename, 'rb') buf = f.read() try: buf = buf.decode('utf-8') except __HOLE__: pass lines = buf.splitlines() f.close() del buf sections = findSections(filename, lines) genTagsFile(output, sectionsToTags(sections), sort=options.sort) output.flush() output.close()
UnicodeDecodeError
dataset/ETHPy150Open jszakmeister/rst2ctags/rst2ctags.py/main
4,101
def _handle_uploaded_file(self, field_name): "Process an uploaded file" try: file = self.files[field_name] filepath = self._get_upload_name(file.name) except __HOLE__: return '' destination = open(settings.MEDIA_ROOT + filepath, 'wb+') for chunk in file.chunks(): destination.write(chunk) destination.close() return settings.MEDIA_URL + filepath
KeyError
dataset/ETHPy150Open treeio/treeio/treeio/infrastructure/forms.py/ItemForm._handle_uploaded_file
4,102
def getPrivateKeys(self): """ Return the server private keys. """ privateKeys = {} for filename in os.listdir(self.dataRoot): if filename[:9] == 'ssh_host_' and filename[-4:]=='_key': fullPath = os.path.join(self.dataRoot, filename) try: key = keys.Key.fromFile(fullPath) except __HOLE__, e: if e.errno == errno.EACCES: # Not allowed, let's switch to root key = runAsEffectiveUser(0, 0, keys.Key.fromFile, fullPath) keyType = keys.objectType(key.keyObject) privateKeys[keyType] = key else: raise except Exception, e: log.msg('bad private key file %s: %s' % (filename, e)) else: keyType = keys.objectType(key.keyObject) privateKeys[keyType] = key return privateKeys
IOError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/conch/openssh_compat/factory.py/OpenSSHFactory.getPrivateKeys
4,103
def getPrimes(self): try: return primes.parseModuliFile(self.moduliRoot+'/moduli') except __HOLE__: return None
IOError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/conch/openssh_compat/factory.py/OpenSSHFactory.getPrimes
4,104
def files(b, r): logging.info('searching for configuration files') # Visit every file in `/etc` except those on the exclusion list above. for dirpath, dirnames, filenames in os.walk('/etc'): # Determine if this entire directory should be ignored by default. ignored = r.ignore_file(dirpath) # Collect up the full pathname to each file, `lstat` them all, and # note which ones will probably be ignored. files = [] for filename in filenames: pathname = os.path.join(dirpath, filename) try: files.append((pathname, os.lstat(pathname), r.ignore_file(pathname, ignored))) except OSError as e: logging.warning('{0} caused {1} - try running as root'. format(pathname, errno.errorcode[e.errno])) # Track the ctime of each file in this directory. Weed out false # positives by ignoring files with common ctimes. ctimes = defaultdict(lambda: 0) # Map the ctimes of each directory entry that isn't being ignored. for pathname, s, ignored in files: if not ignored: ctimes[s.st_ctime] += 1 for dirname in dirnames: try: ctimes[os.lstat(os.path.join(dirpath, dirname)).st_ctime] += 1 except OSError: pass for pathname, s, ignored in files: # Always ignore block special files, character special files, # pipes, and sockets. They end up looking like deadlocks. if stat.S_ISBLK(s.st_mode) \ or stat.S_ISCHR(s.st_mode) \ or stat.S_ISFIFO(s.st_mode) \ or stat.S_ISSOCK(s.st_mode): continue # Make sure this pathname will actually be able to be included # in the blueprint. This is a bit of a cop-out since the file # could be important but at least it's not a crashing bug. try: pathname = unicode(pathname) except UnicodeDecodeError: logging.warning('{0} not UTF-8 - skipping it'. format(repr(pathname)[1:-1])) continue # Ignore ignored files and files that share their ctime with other # files in the directory. This is a very strong indication that # the file is original to the system and should be ignored. if ignored \ or 1 < ctimes[s.st_ctime] and r.ignore_file(pathname, True): continue # Check for a Mustache template and an optional shell script # that templatize this file. try: template = open( '{0}.blueprint-template.mustache'.format(pathname)).read() except __HOLE__: template = None try: data = open( '{0}.blueprint-template.sh'.format(pathname)).read() except IOError: data = None # The content is used even for symbolic links to determine whether # it has changed from the packaged version. try: content = open(pathname).read() except IOError: #logging.warning('{0} not readable'.format(pathname)) continue # Ignore files that are unchanged from their packaged version. if _unchanged(pathname, content, r): continue # Resolve the rest of the file's metadata from the # `/etc/passwd` and `/etc/group` databases. try: pw = pwd.getpwuid(s.st_uid) owner = pw.pw_name except KeyError: owner = s.st_uid try: gr = grp.getgrgid(s.st_gid) group = gr.gr_name except KeyError: group = s.st_gid mode = '{0:o}'.format(s.st_mode) # A symbolic link's content is the link target. if stat.S_ISLNK(s.st_mode): content = os.readlink(pathname) # Ignore symbolic links providing backwards compatibility # between SystemV init and Upstart. if '/lib/init/upstart-job' == content: continue # Ignore symbolic links into the Debian alternatives system. # These are almost certainly managed by packages. if content.startswith('/etc/alternatives/'): continue b.add_file(pathname, content=content, encoding='plain', group=group, mode=mode, owner=owner) # A regular file is stored as plain text only if it is valid # UTF-8, which is required for JSON serialization. else: kwargs = dict(group=group, mode=mode, owner=owner) try: if template: if data: kwargs['data'] = data.decode('utf_8') kwargs['template'] = template.decode('utf_8') else: kwargs['content'] = content.decode('utf_8') kwargs['encoding'] = 'plain' except UnicodeDecodeError: if template: if data: kwargs['data'] = base64.b64encode(data) kwargs['template'] = base64.b64encode(template) else: kwargs['content'] = base64.b64encode(content) kwargs['encoding'] = 'base64' b.add_file(pathname, **kwargs) # If this file is a service init script or config , create a # service resource. try: manager, service = util.parse_service(pathname) if not r.ignore_service(manager, service): b.add_service(manager, service) b.add_service_package(manager, service, 'apt', *_dpkg_query_S(pathname)) b.add_service_package(manager, service, 'yum', *_rpm_qf(pathname)) except ValueError: pass
IOError
dataset/ETHPy150Open devstructure/blueprint/blueprint/backend/files.py/files
4,105
def _dpkg_query_S(pathname): """ Return a list of package names that contain `pathname` or `[]`. This really can be a list thanks to `dpkg-divert`(1). """ # Cache the pathname-to-package mapping. if not hasattr(_dpkg_query_S, '_cache'): _dpkg_query_S._cache = defaultdict(set) cache_ref = _dpkg_query_S._cache for listname in glob.iglob('/var/lib/dpkg/info/*.list'): package = os.path.splitext(os.path.basename(listname))[0] for line in open(listname): cache_ref[line.rstrip()].add(package) # Return the list of packages that contain this file, if any. if pathname in _dpkg_query_S._cache: return list(_dpkg_query_S._cache[pathname]) # If `pathname` isn't in a package but is a symbolic link, see if the # symbolic link is in a package. `postinst` programs commonly display # this pattern. try: return _dpkg_query_S(os.readlink(pathname)) except __HOLE__: pass return []
OSError
dataset/ETHPy150Open devstructure/blueprint/blueprint/backend/files.py/_dpkg_query_S
4,106
def _dpkg_md5sum(package, pathname): """ Find the MD5 sum of the packaged version of pathname or `None` if the `pathname` does not come from a Debian package. """ # Cache any MD5 sums stored in the status file. These are typically # conffiles and the like. if not hasattr(_dpkg_md5sum, '_status_cache'): _dpkg_md5sum._status_cache = {} cache_ref = _dpkg_md5sum._status_cache try: pattern = re.compile(r'^ (\S+) ([0-9a-f]{32})') for line in open('/var/lib/dpkg/status'): match = pattern.match(line) if not match: continue cache_ref[match.group(1)] = match.group(2) except IOError: pass # Return this file's MD5 sum, if it can be found. try: return _dpkg_md5sum._status_cache[pathname] except KeyError: pass # Cache the MD5 sums for files in this package. if not hasattr(_dpkg_md5sum, '_cache'): _dpkg_md5sum._cache = defaultdict(dict) if package not in _dpkg_md5sum._cache: cache_ref = _dpkg_md5sum._cache[package] try: for line in open('/var/lib/dpkg/info/{0}.md5sums'.format(package)): md5sum, rel_pathname = line.split(None, 1) cache_ref['/{0}'.format(rel_pathname.rstrip())] = md5sum except IOError: pass # Return this file's MD5 sum, if it can be found. try: return _dpkg_md5sum._cache[package][pathname] except __HOLE__: pass return None
KeyError
dataset/ETHPy150Open devstructure/blueprint/blueprint/backend/files.py/_dpkg_md5sum
4,107
def _rpm_qf(pathname): """ Return a list of package names that contain `pathname` or `[]`. RPM might not actually support a single pathname being claimed by more than one package but `dpkg` does so the interface is maintained. """ try: p = subprocess.Popen(['rpm', '--qf=%{NAME}', '-qf', pathname], close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except __HOLE__: return [] stdout, stderr = p.communicate() if 0 != p.returncode: return [] return [stdout]
OSError
dataset/ETHPy150Open devstructure/blueprint/blueprint/backend/files.py/_rpm_qf
4,108
def _rpm_md5sum(pathname): """ Find the MD5 sum of the packaged version of pathname or `None` if the `pathname` does not come from an RPM. """ if not hasattr(_rpm_md5sum, '_cache'): _rpm_md5sum._cache = {} symlinks = [] try: p = subprocess.Popen(['rpm', '-qa', '--dump'], close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) pattern = re.compile(r'^(/etc/\S+) \d+ \d+ ([0-9a-f]+) ' # No , '(0\d+) \S+ \S+ \d \d \d (\S+)$') for line in p.stdout: match = pattern.match(line) if match is None: continue if '0120777' == match.group(3): symlinks.append((match.group(1), match.group(4))) else: _rpm_md5sum._cache[match.group(1)] = match.group(2) # Find the MD5 sum of the targets of any symbolic links, even # if the target is outside of /etc. pattern = re.compile(r'^(/\S+) \d+ \d+ ([0-9a-f]+) ' # No , '(0\d+) \S+ \S+ \d \d \d (\S+)$') for pathname, target in symlinks: if '/' != target[0]: target = os.path.normpath(os.path.join( os.path.dirname(pathname), target)) if target in _rpm_md5sum._cache: _rpm_md5sum._cache[pathname] = _rpm_md5sum._cache[target] else: p = subprocess.Popen(['rpm', '-qf', '--dump', target], close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in p.stdout: match = pattern.match(line) if match is not None and target == match.group(1): _rpm_md5sum._cache[pathname] = match.group(2) except __HOLE__: pass return _rpm_md5sum._cache.get(pathname, None)
OSError
dataset/ETHPy150Open devstructure/blueprint/blueprint/backend/files.py/_rpm_md5sum
4,109
def load_jsonfile(pkg): data = None jsonFilepath = "/".join(['Packages', 'R-Box', 'packages', '%s.json' % pkg]) try: data = json.loads(sublime.load_resource(jsonFilepath)) except __HOLE__: pass if data: return data jsonFilepath = os.path.join(sublime.packages_path(), "User", 'R-Box', 'packages', '%s.json' % pkg) if os.path.exists(jsonFilepath): with open(jsonFilepath, "r") as f: data = json.load(f) return data
IOError
dataset/ETHPy150Open randy3k/R-Box/completions.py/load_jsonfile
4,110
def exit(self): try: os.remove(self.temp_file) except __HOLE__: pass if self.linter and self.linter.is_alive(): task_queue.put({"cmd": "exit"}) self.linter.join()
OSError
dataset/ETHPy150Open maralla/vim-linter/pythonx/linter/manager.py/Linter.exit
4,111
def __enter__(self): try: self._lock_state_file() if isfile(self.path): self._state = util.load_json(self.path) except __HOLE__: self._state = {} self._prev_state = deepcopy(self._state) return self._state
ValueError
dataset/ETHPy150Open platformio/platformio/platformio/app.py/State.__enter__
4,112
def derive_datasets(self, input_dataset_dict, stack_output_info, tile_type_info): """ Overrides abstract function in stacker class. Called in Stacker.stack_derived() function. Creates PQA-masked NDVI stack Arguments: input_dataset_dict: Dict keyed by processing level (e.g. ORTHO, NBAR, PQA, DEM) containing all tile info which can be used within the function A sample is shown below (including superfluous band-specific information): { 'NBAR': {'band_name': 'Visible Blue', 'band_tag': 'B10', 'end_datetime': datetime.datetime(2000, 2, 9, 23, 46, 36, 722217), 'end_row': 77, 'level_name': 'NBAR', 'nodata_value': -999L, 'path': 91, 'satellite_tag': 'LS7', 'sensor_name': 'ETM+', 'start_datetime': datetime.datetime(2000, 2, 9, 23, 46, 12, 722217), 'start_row': 77, 'tile_layer': 1, 'tile_pathname': '/g/data/v10/datacube/EPSG4326_1deg_0.00025pixel/LS7_ETM/150_-025/2000/LS7_ETM_NBAR_150_-025_2000-02-09T23-46-12.722217.tif', 'x_index': 150, 'y_index': -25}, 'ORTHO': {'band_name': 'Thermal Infrared (Low Gain)', 'band_tag': 'B61', 'end_datetime': datetime.datetime(2000, 2, 9, 23, 46, 36, 722217), 'end_row': 77, 'level_name': 'ORTHO', 'nodata_value': 0L, 'path': 91, 'satellite_tag': 'LS7', 'sensor_name': 'ETM+', 'start_datetime': datetime.datetime(2000, 2, 9, 23, 46, 12, 722217), 'start_row': 77, 'tile_layer': 1, 'tile_pathname': '/g/data/v10/datacube/EPSG4326_1deg_0.00025pixel/LS7_ETM/150_-025/2000/LS7_ETM_ORTHO_150_-025_2000-02-09T23-46-12.722217.tif', 'x_index': 150, 'y_index': -25}, 'PQA': {'band_name': 'Pixel Quality Assurance', 'band_tag': 'PQA', 'end_datetime': datetime.datetime(2000, 2, 9, 23, 46, 36, 722217), 'end_row': 77, 'level_name': 'PQA', 'nodata_value': None, 'path': 91, 'satellite_tag': 'LS7', 'sensor_name': 'ETM+', 'start_datetime': datetime.datetime(2000, 2, 9, 23, 46, 12, 722217), 'start_row': 77, 'tile_layer': 1, 'tile_pathname': '/g/data/v10/datacube/EPSG4326_1deg_0.00025pixel/LS7_ETM/150_-025/2000/LS7_ETM_PQA_150_-025_2000-02-09T23-46-12.722217.tif, 'x_index': 150, 'y_index': -25} } Arguments (Cont'd): tile_type_info: dict containing tile type information. Obtained from stacker object (e.g: stacker.tile_type_dict[tile_type_id]). A sample is shown below {'crs': 'EPSG:4326', 'file_extension': '.tif', 'file_format': 'GTiff', 'format_options': 'COMPRESS=LZW,BIGTIFF=YES', 'tile_directory': 'EPSG4326_1deg_0.00025pixel', 'tile_type_id': 1L, 'tile_type_name': 'Unprojected WGS84 1-degree at 4000 pixels/degree', 'unit': 'degree', 'x_origin': 0.0, 'x_pixel_size': Decimal('0.00025000000000000000'), 'x_pixels': 4000L, 'x_size': 1.0, 'y_origin': 0.0, 'y_pixel_size': Decimal('0.00025000000000000000'), 'y_pixels': 4000L, 'y_size': 1.0} Function must create one or more GDAL-supported output datasets. Useful functions in the Stacker class include Stacker.get_pqa_mask(), but it is left to the coder to produce exactly what is required for a single slice of the temporal stack of derived quantities. Returns: output_dataset_info: Dict keyed by stack filename containing metadata info for GDAL-supported output datasets created by this function. Note that the key(s) will be used as the output filename for the VRT temporal stack and each dataset created must contain only a single band. An example is as follows: {'/g/data/v10/tmp/ndvi/NDVI_stack_150_-025.vrt': {'band_name': 'Normalised Differential Vegetation Index with PQA applied', 'band_tag': 'NDVI', 'end_datetime': datetime.datetime(2000, 2, 9, 23, 46, 36, 722217), 'end_row': 77, 'level_name': 'NDVI', 'nodata_value': None, 'path': 91, 'satellite_tag': 'LS7', 'sensor_name': 'ETM+', 'start_datetime': datetime.datetime(2000, 2, 9, 23, 46, 12, 722217), 'start_row': 77, 'tile_layer': 1, 'tile_pathname': '/g/data/v10/tmp/ndvi/LS7_ETM_NDVI_150_-025_2000-02-09T23-46-12.722217.tif', 'x_index': 150, 'y_index': -25} } """ assert type(input_dataset_dict) == dict, 'input_dataset_dict must be a dict' log_multiline(logger.debug, input_dataset_dict, 'input_dataset_dict', '\t') # Test function to copy ORTHO & NBAR band datasets with pixel quality mask applied # to an output directory for stacking output_dataset_dict = {} nbar_dataset_info = input_dataset_dict.get('NBAR') # Only need NBAR data for NDVI if nbar_dataset_info is None: return #thermal_dataset_info = input_dataset_dict['ORTHO'] # Could have one or two thermal bands # Instantiate band lookup object with all required lookup parameters lookup = BandLookup(data_cube=self, lookup_scheme_name='LANDSAT-LS5/7', tile_type_id=tile_type_info['tile_type_id'], satellite_tag=nbar_dataset_info['satellite_tag'], sensor_name=nbar_dataset_info['sensor_name'], level_name=nbar_dataset_info['level_name'] ) nbar_dataset_path = nbar_dataset_info['tile_pathname'] #======================================================================= # # Generate sorted list of band info for this tile type, satellite and sensor # band_dict = self.bands[tile_type_info['tile_type_id']][(nbar_dataset_info['satellite_tag'], nbar_dataset_info['sensor_name'])] # band_info_list = [band_dict[tile_layer] for tile_layer in sorted(band_dict.keys()) if band_dict[tile_layer]['level_name'] == 'NBAR'] #======================================================================= # Get a boolean mask from the PQA dataset (use default parameters for mask and dilation) pqa_mask = self.get_pqa_mask(input_dataset_dict['PQA']['tile_pathname']) nbar_dataset = gdal.Open(nbar_dataset_path) assert nbar_dataset, 'Unable to open dataset %s' % nbar_dataset logger.debug('Opened NBAR dataset %s', nbar_dataset_path) #no_data_value = nbar_dataset_info['nodata_value'] no_data_value = -32767 # Need a value outside the scaled range -10000 - +10000 for output_tag in ['NDVI']: # List of outputs to generate from each file - just NDVI at this stage. output_stack_path = os.path.join(self.output_dir, '%s_pqa_masked.vrt' % output_tag) output_tile_path = os.path.join(self.output_dir, re.sub('\.\w+$', '_%s%s' % (output_tag, tile_type_info['file_extension']), os.path.basename(nbar_dataset_path) ) ) # Copy metadata for eventual inclusion in stack file output # This could also be written to the output tile if required output_dataset_info = dict(nbar_dataset_info) output_dataset_info['tile_pathname'] = output_tile_path # This is the most important modification - used to find output_dataset_info['band_name'] = '%s with PQA mask applied' % output_tag output_dataset_info['band_tag'] = '%s-PQA' % output_tag output_dataset_info['tile_layer'] = 1 # Check for existing, valid file if self.refresh or not os.path.exists(output_tile_path) or not gdal.Open(output_tile_path): gdal_driver = gdal.GetDriverByName(tile_type_info['file_format']) output_dataset = gdal_driver.Create(output_tile_path, nbar_dataset.RasterXSize, nbar_dataset.RasterYSize, 1, nbar_dataset.GetRasterBand(1).DataType, tile_type_info['format_options'].split(',')) assert output_dataset, 'Unable to open output dataset %s'% output_dataset output_dataset.SetGeoTransform(nbar_dataset.GetGeoTransform()) output_dataset.SetProjection(nbar_dataset.GetProjection()) output_band = output_dataset.GetRasterBand(1) # Calculate NDVI here # Remember band indices are one-based try: # Read and adjust arrays for NIR and R NIR_array = nbar_dataset.GetRasterBand(lookup.band_no['NIR']).ReadAsArray() * lookup.adjustment_multiplier['NIR'] + lookup.adjustment_offset['NIR'] * SCALE_FACTOR R_array = nbar_dataset.GetRasterBand(lookup.band_no['R']).ReadAsArray() * lookup.adjustment_multiplier['R'] + lookup.adjustment_offset['R'] * SCALE_FACTOR except __HOLE__: return data_array = numpy.true_divide(NIR_array - R_array, NIR_array + R_array) * SCALE_FACTOR self.apply_pqa_mask(data_array, pqa_mask, no_data_value) output_band.WriteArray(data_array) output_band.SetNoDataValue(no_data_value) output_band.FlushCache() # This is not strictly necessary - copy metadata to output dataset output_dataset_metadata = nbar_dataset.GetMetadata() if output_dataset_metadata: output_dataset.SetMetadata(output_dataset_metadata) log_multiline(logger.debug, output_dataset_metadata, 'output_dataset_metadata', '\t') output_dataset.FlushCache() logger.info('Finished writing %s', output_tile_path) else: logger.info('Skipped existing, valid dataset %s', output_tile_path) output_dataset_dict[output_stack_path] = output_dataset_info # log_multiline(logger.debug, output_dataset_info, 'output_dataset_info', '\t') log_multiline(logger.debug, output_dataset_dict, 'output_dataset_dict', '\t') # NDVI dataset processed - return info return output_dataset_dict
TypeError
dataset/ETHPy150Open GeoscienceAustralia/agdc/examples/ndvi_stacker.py/NDVIStacker.derive_datasets
4,113
def _flatten_params(params): """Converts a dictionary of parameters to a list of parameters. Any unicode strings in keys or values will be encoded as UTF-8. Args: params: Dictionary mapping parameter keys to values. Values will be converted to a string and added to the list as tuple (key, value). If a values is iterable and not a string, each contained value will be added as a separate (key, value) tuple. Returns: List of (key, value) tuples. """ def get_string(value): if isinstance(value, unicode): return unicode(value).encode('utf-8') else: return str(value) param_list = [] for key, value in params.iteritems(): key = get_string(key) if isinstance(value, basestring): param_list.append((key, get_string(value))) else: try: iterator = iter(value) except __HOLE__: param_list.append((key, str(value))) else: param_list.extend((key, get_string(v)) for v in iterator) return param_list
TypeError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/api/taskqueue/taskqueue.py/_flatten_params
4,114
@staticmethod def __determine_eta_posix(eta=None, countdown=None, current_time=None): """Determines the ETA for a task. If 'eta' and 'countdown' are both None, the current time will be used. Otherwise, only one of them may be specified. Args: eta: A datetime.datetime specifying the absolute ETA or None; this may be timezone-aware or timezone-naive. countdown: Count in seconds into the future from the present time that the ETA should be assigned to. current_time: Function that returns the current datetime. (Defaults to time.time if None is provided.) Returns: A float giving a POSIX timestamp containing the ETA. Raises: InvalidTaskError if the parameters are invalid. """ if not current_time: current_time = time.time if eta is not None and countdown is not None: raise InvalidTaskError('May not use a countdown and ETA together') elif eta is not None: if not isinstance(eta, datetime.datetime): raise InvalidTaskError('ETA must be a datetime.datetime instance') elif eta.tzinfo is None: return time.mktime(eta.timetuple()) + eta.microsecond*1e-6 else: return calendar.timegm(eta.utctimetuple()) + eta.microsecond*1e-6 elif countdown is not None: try: countdown = float(countdown) except __HOLE__: raise InvalidTaskError('Countdown must be a number') except OverflowError: raise InvalidTaskError('Countdown out of range') else: return current_time() + countdown else: return current_time()
ValueError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/api/taskqueue/taskqueue.py/Task.__determine_eta_posix
4,115
@classmethod def fetch_async(cls, queue_or_queues, rpc=None): """Asynchronously get the queue details for multiple queues. Args: queue_or_queues: An iterable of Queue instances, or an iterable of strings corresponding to queue names, or a Queue instance or a string corresponding to a queue name. rpc: An optional UserRPC object. Returns: A UserRPC object, call get_result to complete the RPC and obtain the result. If an iterable (other than string) is provided as input, the result will be a list of of QueueStatistics objects, one for each queue in the order requested. Otherwise, if a single item was provided as input, then the result will be a single QueueStatistics object. Raises: TypeError: If queue_or_queues is not one of: Queue instance, string, an iterable containing only Queue instances or an iterable containing only strings. """ wants_list = True if isinstance(queue_or_queues, basestring): queue_or_queues = [queue_or_queues] wants_list = False try: queues_list = [queue for queue in queue_or_queues] except __HOLE__: queues_list = [queue_or_queues] wants_list = False contains_strs = any(isinstance(queue, basestring) for queue in queues_list) contains_queues = any(isinstance(queue, Queue) for queue in queues_list) if contains_strs and contains_queues: raise TypeError('queue_or_queues must contain either strings or Queue ' 'instances, not both.') if contains_strs: queues_list = [Queue(queue_name) for queue_name in queues_list] return cls._FetchMultipleQueues(queues_list, wants_list, rpc)
TypeError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/api/taskqueue/taskqueue.py/QueueStatistics.fetch_async
4,116
def delete_tasks_async(self, task, rpc=None): """Asynchronously deletes a Task or list of Tasks in this Queue. This function is identical to delete_tasks() except that it returns an asynchronous object. You can call get_result() on the return value to block on the call. Args: task: A Task instance or a list of Task instances that will be deleted from the Queue. rpc: An optional UserRPC object. Returns: A UserRPC object, call get_result to complete the RPC and obtain the Task or list of tasks passed into this call. Raises: BadTaskStateError: if the Task(s) to be deleted do not have task names or have already been deleted. DuplicateTaskNameError: if a Task is repeated in the request. """ try: tasks = list(iter(task)) except __HOLE__: tasks = [task] multiple = False else: multiple = True return self.__DeleteTasks(tasks, multiple, rpc)
TypeError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/api/taskqueue/taskqueue.py/Queue.delete_tasks_async
4,117
def add_async(self, task, transactional=False, rpc=None): """Asynchronously adds a Task or list of Tasks into this Queue. This function is identical to add() except that it returns an asynchronous object. You can call get_result() on the return value to block on the call. Args: task: A Task instance or a list of Task instances that will be added to the queue. transactional: If True, transactional Tasks will be added to the queue but cannot be run or leased until after the transaction succeeds. If the transaction fails then the Tasks will be removed from the queue (and therefore never run). If False, the added task(s) are available to run immediately; any enclosing transaction's success or failure is ignored. rpc: An optional UserRPC object. Returns: A UserRPC object; call get_result to complete the RPC and obtain the Task or list of Tasks that was supplied to this method. Successfully queued Tasks will have a valid queue name and task name after the call; such Task objects are marked as queued and cannot be added again. Note: Task objects returned from transactional adds are not notified or updated when the enclosing transaction succeeds or fails. Raises: BadTaskStateError: if the Task(s) has already been added to a queue. BadTransactionStateError: if the transactional argument is true but this call is being made outside of the context of a transaction. DuplicateTaskNameError: if a Task name is repeated in the request. InvalidTaskError: if both push and pull tasks exist in the task list. InvalidTaskNameError: if a Task name is provided but is not legal. TooManyTasksError: if task contains more than MAX_TASKS_PER_ADD tasks. TransactionalRequestTooLargeError: if transactional is True and the total size of the tasks and supporting request data exceeds MAX_TRANSACTIONAL_REQUEST_SIZE_BYTES. """ try: tasks = list(iter(task)) except __HOLE__: tasks = [task] multiple = False else: multiple = True has_push_task = False has_pull_task = False for task in tasks: if task.method == 'PULL': has_pull_task = True else: has_push_task = True if has_push_task and has_pull_task: raise InvalidTaskError( 'Can not add both push and pull tasks in a single call.') if has_push_task: fill_function = self.__FillAddPushTasksRequest else: fill_function = self.__FillAddPullTasksRequest return self.__AddTasks(tasks, transactional, fill_function, multiple, rpc)
TypeError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/api/taskqueue/taskqueue.py/Queue.add_async
4,118
@property def fieldnames(self): if self._fieldnames is not None: return self._fieldnames # Create a new reader just to figure out which row is the header args, kwds = self._readeropts data = csv.reader(*args, **kwds) rows = [] for i in range(self.max_header_row): try: rows.append(next(data)) except StopIteration: pass header_row, field_names = self.choose_header(rows) # Reset file and advance reader so it starts in the right spot self._file.seek(0) for i in range(header_row + 1): try: next(self.reader) except __HOLE__: pass self._fieldnames = field_names self._header_row = header_row return field_names
StopIteration
dataset/ETHPy150Open wq/wq.io/parsers/readers.py/SkipPreludeReader.fieldnames
4,119
def __call__(self, environ, start_response): def replacement_start_response(status, headers, exc_info=None): ''' Overrides the default response if the status is defined in the Pecan app error map configuration. ''' try: status_code = int(status.split(' ')[0]) except (ValueError, __HOLE__): # pragma: nocover raise Exception(( 'ErrorDocumentMiddleware received an invalid ' 'status %s' % status )) if status_code in self.error_map: def factory(app): return StatusPersist( app, status, self.error_map[status_code] ) raise ForwardRequestException(factory=factory) return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) return app_iter
TypeError
dataset/ETHPy150Open pecan/pecan/pecan/middleware/errordocument.py/ErrorDocumentMiddleware.__call__
4,120
def test(self): # And we must unpickle one x = dumps(C()) try: loads(x) except __HOLE__, e: self.fail('Unpickling raised an AttributeError: %s' % e)
AttributeError
dataset/ETHPy150Open enthought/traits/traits/tests/test_pickle_validated_dict.py/PickleValidatedDictTestCase.test
4,121
def get_last_input(data): """Attempts to get the deepest possible data value in the pipeline. Used when probing a selected point.""" tmp = inp = data while tmp: try: tmp = inp.input if tmp: inp = tmp except __HOLE__: tmp = None return inp ###################################################################### # `PickedData` class. ######################################################################
AttributeError
dataset/ETHPy150Open enthought/mayavi/tvtk/pyface/picker.py/get_last_input
4,122
def pick_cell (self, x, y): """ Picks the nearest cell. Returns a `PickedData` instance.""" try: self.cellpicker.pick(float(x), float(y), 0.0, self.renwin.renderer) except __HOLE__: # On old versions of VTK, the signature used to be different self.cellpicker.pick((float(x), float(y), 0.0), self.renwin.renderer) cp = self.cellpicker id = cp.cell_id picked_data = PickedData() coord = cp.pick_position picked_data.coordinate = coord if id > -1: data = cp.mapper.input.cell_data bounds = cp.mapper.input.bounds picked_data.valid = 1 picked_data.cell_id = id picked_data.data = data self._update_actor(coord, bounds) else: self.p_actor.visibility = 0 self.renwin.render() return picked_data
TypeError
dataset/ETHPy150Open enthought/mayavi/tvtk/pyface/picker.py/Picker.pick_cell
4,123
def pick_world(self, x, y): """ Picks a world point and probes for data there. Returns a `PickedData` instance.""" self.worldpicker.pick((float(x), float(y), 0.0), self.renwin.renderer) # Use the cell picker to get the data that needs to be probed. try: self.cellpicker.pick( (float(x), float(y), 0.0), self.renwin.renderer) except __HOLE__: self.cellpicker.pick( float(x), float(y), 0.0, self.renwin.renderer) wp = self.worldpicker cp = self.cellpicker coord = wp.pick_position self.probe_data.points = [list(coord)] picked_data = PickedData() picked_data.coordinate = coord if cp.mapper: data = get_last_input(cp.mapper.input) # Need to create the probe each time because otherwise it # does not seem to work properly. probe = tvtk.ProbeFilter() probe.source = data probe.input = self.probe_data probe.update() data = probe.output.point_data bounds = cp.mapper.input.bounds picked_data.valid = 1 picked_data.world_pick = 1 picked_data.point_id = 0 picked_data.data = data self._update_actor(coord, bounds) else: self.p_actor.visibility = 0 self.renwin.render() return picked_data
TypeError
dataset/ETHPy150Open enthought/mayavi/tvtk/pyface/picker.py/Picker.pick_world
4,124
def _setup_gui(self): """Pops up the GUI control widget.""" # Popup the GUI control. if self.ui is None: self.ui = self.edit_traits() # Note that we add actors to the renderer rather than to # renwin to prevent event notifications on actor # additions. self.renwin.renderer.add_actor(self.p_actor) elif self.auto_raise: try: self.ui.control.Raise() except __HOLE__: pass
AttributeError
dataset/ETHPy150Open enthought/mayavi/tvtk/pyface/picker.py/Picker._setup_gui
4,125
def verify_baseUrl(self, name, required, scheme_optional=False): """Verify value of variable |name| is a valid url. Args: name [string]: variable name. required [bool]: If True value cannot be empty. scheme_optional: True if the URL protocol scheme is not required. """ try: value = self.__bindings.get(name) except __HOLE__: if not required: return True self.__errors.append('Missing "{name}".'.format(name=name)) return False if self.is_reference(value): if not required: return True self.__errors.append('Missing "{name}".'.format(name=name)) return False # We don't really need a full URL since we're validating base urls, # (without query parameters and fragments), so the scheme will be optional. scheme_token = '[a-z0-9]+' host_token = _host_regex_token() port_token = '[1-9][0-9]*' path_token = '(?:[-\._+a-zA-Z0-9]|(?:%[0-9a-fA-F]{2}))+' url_re = re.compile('^' '({scheme}://){scheme_optional}' '({host})(:{port})?' '((?:/{path})*/?)' '$' .format( scheme=scheme_token, scheme_optional = '?' if scheme_optional else '', host=host_token, port=port_token, path=path_token )) match = url_re.match(value) return match != None
KeyError
dataset/ETHPy150Open spinnaker/spinnaker/pylib/spinnaker/validate_configuration.py/ValidateConfig.verify_baseUrl
4,126
def verify_host(self, name, required): """Verify value of variable |name| is a valid hostname. Args: name [string]: variable name. required [bool]: If True, the value of variable |name| cannot be empty. """ try: value = self.__bindings.get(name) except __HOLE__: if not required: return True self.__errors.append('Missing "{name}".'.format(name=name)) return False if self.is_reference(value): if not required: return True self.__errors.append('Missing "{name}".'.format(name=name)) return False host_token = _host_regex_token() host_regex = '^({host})$'.format(host=host_token) if not value: if not required: return True else: self.__errors.append( 'No host provided for "{name}".'.format(name=name)) return False if re.match(host_regex, value): return True self.__errors.append( 'name="{value}" does not look like {regex}'.format( value=value, regex=host_regex)) return False
KeyError
dataset/ETHPy150Open spinnaker/spinnaker/pylib/spinnaker/validate_configuration.py/ValidateConfig.verify_host
4,127
def parse_external_rspec(rspec): """Parse the given rspec and create dicts of the given switches. Parses the RSpec and creates a dict mapping switches to the ports they have. It also creates a list of (dpid, port, dpid, port) describing the links. @param rspec: The advertisement RSpec @type rspec: XML C{str} @return: tuple of a dict mapping datapath ID strings to list of port numbers and a list of (src dpid, src port num, dst dpid, dst port num, attrs) describing the links. @rtype: (C{dict} mapping C{str} to C{list} of C{int}, C{list} of (C{str}, C{int}, C{str}, C{int}, C{dict})) """ root = et.fromstring(rspec) switch_elems = root.findall(".//%s" % SWITCH_TAG) switches = {} for switch_elem in switch_elems: urn = switch_elem.get(URN) dpid = _urn_to_dpid(urn) switches[dpid] = [] port_elems = root.findall(".//%s" % PORT_TAG) for port_elem in port_elems: urn = port_elem.get(URN) dpid, port = _urn_to_port(urn) try: switches[dpid].append(port) except __HOLE__: raise Exception("No switch with datapath ID %s found" " for port with URN %s" % (dpid, urn)) link_elems = root.findall(".//%s" % LINK_TAG) links = [] for link_elem in link_elems: src_urn = link_elem.get(SRC_URN) dst_urn = link_elem.get(DST_URN) src_dpid, src_port = _urn_to_port(src_urn) dst_dpid, dst_port = _urn_to_port(dst_urn) links.append((src_dpid, src_port, dst_dpid, dst_port, {})) return switches, links
KeyError
dataset/ETHPy150Open fp7-ofelia/ocf/expedient/src/python/plugins/openflow/plugin/gapi/rspec.py/parse_external_rspec
4,128
def testGetViaHttpsKeyCert(self): # At this point I can only test # that the key and cert files are passed in # correctly to httplib. It would be nice to have # a real https endpoint to test against. http = httplib2.Http(timeout=2) http.add_certificate("akeyfile", "acertfile", "bitworking.org") try: (response, content) = http.request("https://bitworking.org", "GET") except __HOLE__: self.assertEqual(http.connections["https:bitworking.org"].key_file, "akeyfile") self.assertEqual(http.connections["https:bitworking.org"].cert_file, "acertfile") except IOError: # Skip on 3.2 pass try: (response, content) = http.request("https://notthere.bitworking.org", "GET") except httplib2.ServerNotFoundError: self.assertEqual(http.connections["https:notthere.bitworking.org"].key_file, None) self.assertEqual(http.connections["https:notthere.bitworking.org"].cert_file, None) except IOError: # Skip on 3.2 pass
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/httplib2-0.8/python3/httplib2test.py/HttpTest.testGetViaHttpsKeyCert
4,129
def _gevent_sendfile(fdout, fdin, offset, nbytes): while True: try: return o_sendfile(fdout, fdin, offset, nbytes) except __HOLE__ as e: if e.args[0] == errno.EAGAIN: wait_write(fdout) else: raise
OSError
dataset/ETHPy150Open benoitc/gunicorn/gunicorn/workers/ggevent.py/_gevent_sendfile
4,130
def handle_request(self, *args): try: super(GeventWorker, self).handle_request(*args) except gevent.GreenletExit: pass except __HOLE__: pass
SystemExit
dataset/ETHPy150Open benoitc/gunicorn/gunicorn/workers/ggevent.py/GeventWorker.handle_request
4,131
def __init__(self, *args, **kw): from django.conf import settings gettext_module.GNUTranslations.__init__(self, *args, **kw) # Starting with Python 2.4, there's a function to define # the output charset. Before 2.4, the output charset is # identical with the translation file charset. try: self.set_output_charset('utf-8') except __HOLE__: pass self.django_output_charset = 'utf-8' self.__language = '??'
AttributeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/utils/translation/trans_real.py/DjangoTranslation.__init__
4,132
def translation(language): """ Returns a translation object. This translation object will be constructed out of multiple GNUTranslations objects by merging their catalogs. It will construct a object for the requested language and add a fallback to the default language, if it's different from the requested language. """ global _translations t = _translations.get(language, None) if t is not None: return t from django.conf import settings globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale') if settings.SETTINGS_MODULE is not None: parts = settings.SETTINGS_MODULE.split('.') project = import_module(parts[0]) projectpath = os.path.join(os.path.dirname(project.__file__), 'locale') else: projectpath = None def _fetch(lang, fallback=None): global _translations loc = to_locale(lang) res = _translations.get(lang, None) if res is not None: return res def _translation(path): try: t = gettext_module.translation('django', path, [loc], DjangoTranslation) t.set_language(lang) return t except __HOLE__, e: return None res = _translation(globalpath) # We want to ensure that, for example, "en-gb" and "en-us" don't share # the same translation object (thus, merging en-us with a local update # doesn't affect en-gb), even though they will both use the core "en" # translation. So we have to subvert Python's internal gettext caching. base_lang = lambda x: x.split('-', 1)[0] if base_lang(lang) in [base_lang(trans) for trans in _translations]: res._info = res._info.copy() res._catalog = res._catalog.copy() def _merge(path): t = _translation(path) if t is not None: if res is None: return t else: res.merge(t) return res for localepath in settings.LOCALE_PATHS: if os.path.isdir(localepath): res = _merge(localepath) for appname in settings.INSTALLED_APPS: app = import_module(appname) apppath = os.path.join(os.path.dirname(app.__file__), 'locale') if os.path.isdir(apppath): res = _merge(apppath) if projectpath and os.path.isdir(projectpath): res = _merge(projectpath) if res is None: if fallback is not None: res = fallback else: return gettext_module.NullTranslations() _translations[lang] = res return res default_translation = _fetch(settings.LANGUAGE_CODE) current_translation = _fetch(language, fallback=default_translation) return current_translation
IOError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/utils/translation/trans_real.py/translation
4,133
def get_language(): """Returns the currently selected language.""" t = _active.get(currentThread(), None) if t is not None: try: return to_language(t.language()) except __HOLE__: pass # If we don't have a real translation object, assume it's the default language. from django.conf import settings return settings.LANGUAGE_CODE
AttributeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/utils/translation/trans_real.py/get_language
4,134
def filesizeformat(bytes, sep=' '): """ Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102 B, 2.3 GB etc). Grabbed from Django (http://www.djangoproject.com), slightly modified. :param bytes: size in bytes (as integer) :param sep: string separator between number and abbreviation """ try: bytes = float(bytes) except (TypeError, __HOLE__, UnicodeDecodeError): return '0%sB' % sep if bytes < 1024: size = bytes template = '%.0f%sB' elif bytes < 1024 * 1024: size = bytes / 1024 template = '%.0f%sKB' elif bytes < 1024 * 1024 * 1024: size = bytes / 1024 / 1024 template = '%.1f%sMB' else: size = bytes / 1024 / 1024 / 1024 template = '%.2f%sGB' return template % (size, sep)
ValueError
dataset/ETHPy150Open codeinn/vcs/vcs/utils/filesize.py/filesizeformat
4,135
def _get_small_message_ids(self, message_ids): # Using existing message uids, get the sizes and # return only those that are under the size # limit safe_message_ids = [] status, data = self.server.uid( 'fetch', ','.join(message_ids), '(RFC822.SIZE)' ) for each_msg in data: each_msg = each_msg.decode() try: uid = each_msg.split(' ')[2] size = each_msg.split(' ')[4].rstrip(')') if int(size) <= int(self.max_message_size): safe_message_ids.append(uid) except __HOLE__ as e: logger.warning( "ValueError: %s working on %s" % (e, each_msg[0]) ) pass return safe_message_ids
ValueError
dataset/ETHPy150Open coddingtonbear/django-mailbox/django_mailbox/transports/imap.py/ImapTransport._get_small_message_ids
4,136
def get_message(self, condition=None): message_ids = self._get_all_message_ids() if not message_ids: return # Limit the uids to the small ones if we care about that if self.max_message_size: message_ids = self._get_small_message_ids(message_ids) if self.archive: typ, folders = self.server.list(pattern=self.archive) if folders[0] is None: # If the archive folder does not exist, create it self.server.create(self.archive) for uid in message_ids: try: typ, msg_contents = self.server.uid('fetch', uid, '(RFC822)') if not msg_contents: continue try: message = self.get_email_from_bytes(msg_contents[0][1]) except __HOLE__: # This happens if another thread/process deletes the # message between our generating the ID list and our # processing it here. continue if condition and not condition(message): continue yield message except MessageParseError: continue if self.archive: self.server.uid('copy', uid, self.archive) self.server.uid('store', uid, "+FLAGS", "(\\Deleted)") self.server.expunge() return
TypeError
dataset/ETHPy150Open coddingtonbear/django-mailbox/django_mailbox/transports/imap.py/ImapTransport.get_message
4,137
@classmethod def _new(cls, poly, index): """Construct new ``CRootOf`` object from raw data. """ obj = Expr.__new__(cls) obj.poly = PurePoly(poly) obj.index = index try: _reals_cache[obj.poly] = _reals_cache[poly] _complexes_cache[obj.poly] = _complexes_cache[poly] except __HOLE__: pass return obj
KeyError
dataset/ETHPy150Open sympy/sympy/sympy/polys/rootoftools.py/ComplexRootOf._new
4,138
def _eval_evalf(self, prec): """Evaluate this complex root to the given precision. """ with workprec(prec): g = self.poly.gen if not g.is_Symbol: d = Dummy('x') func = lambdify(d, self.expr.subs(g, d)) else: func = lambdify(g, self.expr) interval = self._get_interval() if not self.is_real: # For complex intervals, we need to keep refining until the # imaginary interval is disjunct with other roots, that is, # until both ends get refined. ay = interval.ay by = interval.by while interval.ay == ay or interval.by == by: interval = interval.refine() while True: if self.is_real: a = mpf(str(interval.a)) b = mpf(str(interval.b)) if a == b: root = a break x0 = mpf(str(interval.center)) else: ax = mpf(str(interval.ax)) bx = mpf(str(interval.bx)) ay = mpf(str(interval.ay)) by = mpf(str(interval.by)) if ax == bx and ay == by: # the sign of the imaginary part will be assigned # according to the desired index using the fact that # roots are sorted with negative imag parts coming # before positive (and all imag roots coming after real # roots) deg = self.poly.degree() i = self.index # a positive attribute after creation if (deg - i) % 2: if ay < 0: ay = -ay else: if ay > 0: ay = -ay root = mpc(ax, ay) break x0 = mpc(*map(str, interval.center)) try: root = findroot(func, x0) # If the (real or complex) root is not in the 'interval', # then keep refining the interval. This happens if findroot # accidentally finds a different root outside of this # interval because our initial estimate 'x0' was not close # enough. It is also possible that the secant method will # get trapped by a max/min in the interval; the root # verification by findroot will raise a ValueError in this # case and the interval will then be tightened -- and # eventually the root will be found. # # It is also possible that findroot will not have any # successful iterations to process (in which case it # will fail to initialize a variable that is tested # after the iterations and raise an UnboundLocalError). if self.is_real: if (a <= root <= b): break elif (ax <= root.real <= bx and ay <= root.imag <= by): break except (UnboundLocalError, __HOLE__): pass interval = interval.refine() return (Float._new(root.real._mpf_, prec) + I*Float._new(root.imag._mpf_, prec))
ValueError
dataset/ETHPy150Open sympy/sympy/sympy/polys/rootoftools.py/ComplexRootOf._eval_evalf
4,139
def __new__(cls, expr, func=None, x=None, auto=True, quadratic=False): """Construct a new ``RootSum`` instance of roots of a polynomial.""" coeff, poly = cls._transform(expr, x) if not poly.is_univariate: raise MultivariatePolynomialError( "only univariate polynomials are allowed") if func is None: func = Lambda(poly.gen, poly.gen) else: try: is_func = func.is_Function except __HOLE__: is_func = False if is_func and 1 in func.nargs: if not isinstance(func, Lambda): func = Lambda(poly.gen, func(poly.gen)) else: raise ValueError( "expected a univariate function, got %s" % func) var, expr = func.variables[0], func.expr if coeff is not S.One: expr = expr.subs(var, coeff*var) deg = poly.degree() if not expr.has(var): return deg*expr if expr.is_Add: add_const, expr = expr.as_independent(var) else: add_const = S.Zero if expr.is_Mul: mul_const, expr = expr.as_independent(var) else: mul_const = S.One func = Lambda(var, expr) rational = cls._is_func_rational(poly, func) (_, factors), terms = poly.factor_list(), [] for poly, k in factors: if poly.is_linear: term = func(roots_linear(poly)[0]) elif quadratic and poly.is_quadratic: term = sum(map(func, roots_quadratic(poly))) else: if not rational or not auto: term = cls._new(poly, func, auto) else: term = cls._rational_case(poly, func) terms.append(k*term) return mul_const*Add(*terms) + deg*add_const
AttributeError
dataset/ETHPy150Open sympy/sympy/sympy/polys/rootoftools.py/RootSum.__new__
4,140
def get_git_changeset(): """Returns a numeric identifier of the latest git changeset. The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format. This value isn't guaranteed to be unique, but collisions are very unlikely, so it's sufficient for generating the development version numbers. """ repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=repo_dir, universal_newlines=True) timestamp = git_log.communicate()[0] try: timestamp = datetime.datetime.utcfromtimestamp(int(timestamp)) except __HOLE__: return None return timestamp.strftime('%Y%m%d%H%M%S')
ValueError
dataset/ETHPy150Open beerfactory/hbmqtt/hbmqtt/version.py/get_git_changeset
4,141
def __init__(self, subj, filestore=default_filestore): self.subject = subj self._warning = None self._transforms = None self._surfaces = None self.filestore = filestore try: with open(os.path.join(filestore, subj, "warning.txt")) as fp: self._warning = fp.read() except __HOLE__: pass
IOError
dataset/ETHPy150Open gallantlab/pycortex/cortex/database.py/SubjectDB.__init__
4,142
def get_surfinfo(self, subject, type="curvature", recache=False, **kwargs): """Return auxillary surface information from the filestore. Surface info is defined as anatomical information specific to a subject in surface space. A Vertex class will be returned as necessary. Info not found in the filestore will be automatically generated. See documentation in cortex.surfinfo for auto-generation code Parameters ---------- subject: str Subject name for which to return info type: str Type of surface info returned, IE. curvature, distortion, sulcaldepth, etc. recache: bool Regenerate the information Returns ------- verts : Vertex class If the surface information has "left" and "right" entries, a Vertex class is returned - OR - npz : npzfile Otherwise, an npz object is returned. Remember to close it! """ opts = "" if len(kwargs) > 0: opts = "[%s]"%','.join(["%s=%s"%i for i in kwargs.items()]) try: self.auxfile.get_surf(subject, "fiducial") surfifile = os.path.join(self.get_cache(subject),"%s%s.npz"%(type, opts)) except (AttributeError, __HOLE__): surfiform = self.get_paths(subject)['surfinfo'] surfifile = surfiform.format(type=type, opts=opts) if not os.path.exists(os.path.join(self.filestore, subject, "surface-info")): os.makedirs(os.path.join(self.filestore, subject, "surface-info")) if not os.path.exists(surfifile) or recache: print ("Generating %s surface info..."%type) from . import surfinfo getattr(surfinfo, type)(surfifile, subject, **kwargs) npz = np.load(surfifile) if "left" in npz and "right" in npz: from .dataset import Vertex verts = np.hstack([npz['left'], npz['right']]) npz.close() return Vertex(verts, subject) return npz
IOError
dataset/ETHPy150Open gallantlab/pycortex/cortex/database.py/Database.get_surfinfo
4,143
def get_overlay(self, subject, otype='rois', **kwargs): from . import svgroi pts, polys = self.get_surf(subject, "flat", merge=True, nudge=True) if otype in ["rois", "cutouts", "sulci"] or isinstance(otype, (list,tuple)): # Assumes that all lists or tuples will only consist of "rois","cutouts",and "sulci"... # Prevents combining external files with sulci, e.g. svgfile = self.get_paths(subject)["rois"] if self.auxfile is not None: try: tf = self.auxfile.get_overlay(subject, otype) # kwargs?? svgfile = tf.name except (AttributeError, __HOLE__): # NOTE: This is better error handling, but does not account for # case in which self.auxfile is None - when is that?? I (ML) think # it only comes up with new svg layer variants in extra_layers branch... # svgfile = self.get_paths(subject)["rois"] # Layer type does not exist or has been temporarily removed pass if 'pts' in kwargs: pts = kwargs['pts'] del kwargs['pts'] return svgroi.get_roipack(svgfile, pts, polys, layer=otype, **kwargs) if otype == "external": layer = kwargs['layer'] del kwargs['layer'] svgfile = kwargs["svgfile"] del kwargs["svgfile"] if 'pts' in kwargs: pts = kwargs['pts'] del kwargs['pts'] return svgroi.get_roipack(svgfile, pts, polys, layer=layer,**kwargs) raise TypeError('Invalid overlay type')
IOError
dataset/ETHPy150Open gallantlab/pycortex/cortex/database.py/Database.get_overlay
4,144
def get_xfm(self, subject, name, xfmtype="coord"): """Retrieves a transform from the filestore Parameters ---------- subject : str Name of the subject name : str Name of the transform xfmtype : str, optional Type of transform to return. Defaults to coord. """ from .xfm import Transform if xfmtype == 'coord': try: return self.auxfile.get_xfm(subject, name) except (__HOLE__, IOError): pass if name == "identity": nib = self.get_anat(subject, 'raw') return Transform(np.linalg.inv(nib.get_affine()), nib) fname = os.path.join(self.filestore, subject, "transforms", name, "matrices.xfm") reference = os.path.join(self.filestore, subject, "transforms", name, "reference.nii.gz") xfmdict = json.load(open(fname)) return Transform(xfmdict[xfmtype], reference)
AttributeError
dataset/ETHPy150Open gallantlab/pycortex/cortex/database.py/Database.get_xfm
4,145
@_memo def get_surf(self, subject, type, hemisphere="both", merge=False, nudge=False): '''Return the surface pair for the given subject, surface type, and hemisphere. Parameters ---------- subject : str Name of the subject type : str Type of surface to return, probably in (fiducial, inflated, veryinflated, hyperinflated, superinflated, flat) hemisphere : "lh", "rh" Which hemisphere to return merge : bool Vstack the hemispheres, if requesting both nudge : bool Nudge the hemispheres apart from each other, for overlapping surfaces (inflated, etc) Returns ------- left, right : If request is for both hemispheres, otherwise: pts, polys, norms : ((p,3) array, (f,3) array, (p,3) array or None) For single hemisphere ''' try: return self.auxfile.get_surf(subject, type, hemisphere, merge=merge, nudge=nudge) except (AttributeError, __HOLE__): pass files = self.get_paths(subject)['surfs'] if hemisphere.lower() == "both": left, right = [ self.get_surf(subject, type, hemisphere=h) for h in ["lh", "rh"]] if type != "fiducial" and nudge: left[0][:,0] -= left[0].max(0)[0] right[0][:,0] -= right[0].min(0)[0] if merge: pts = np.vstack([left[0], right[0]]) polys = np.vstack([left[1], right[1]+len(left[0])]) return pts, polys return left, right elif hemisphere.lower() in ("lh", "left"): hemi = "lh" elif hemisphere.lower() in ("rh", "right"): hemi = "rh" else: raise TypeError("Not a valid hemisphere name") if type == 'fiducial' and 'fiducial' not in files: wpts, polys = self.get_surf(subject, 'wm', hemi) ppts, _ = self.get_surf(subject, 'pia', hemi) return (wpts + ppts) / 2, polys try: from . import formats return formats.read(os.path.splitext(files[type][hemi])[0]) except KeyError: raise IOError
IOError
dataset/ETHPy150Open gallantlab/pycortex/cortex/database.py/Database.get_surf
4,146
def get_mask(self, subject, xfmname, type='thick'): try: self.auxfile.get_mask(subject, xfmname, type) except (AttributeError, IOError): pass fname = self.get_paths(subject)['masks'].format(xfmname=xfmname, type=type) try: import nibabel nib = nibabel.load(fname) return nib.get_data().T != 0 except __HOLE__: print('Mask not found, generating...') from .utils import get_cortical_mask mask = get_cortical_mask(subject, xfmname, type) self.save_mask(subject, xfmname, type, mask) return mask
IOError
dataset/ETHPy150Open gallantlab/pycortex/cortex/database.py/Database.get_mask
4,147
def get_cache(self, subject): try: self.auxfile.get_surf(subject, "fiducial") #generate the hashed name of the filename and subject as the directory name import hashlib hashname = "pycx_%s"%hashlib.md5(self.auxfile.h5.filename).hexdigest()[-8:] cachedir = os.path.join(tempfile.gettempdir(), hashname, subject) except (__HOLE__, IOError): cachedir = os.path.join(self.filestore, subject, "cache") if not os.path.exists(cachedir): os.makedirs(cachedir) return cachedir
AttributeError
dataset/ETHPy150Open gallantlab/pycortex/cortex/database.py/Database.get_cache
4,148
def make_subj(self, subject): if os.path.exists(os.path.join(self.filestore, subject)): if raw_input("Are you sure you want to overwrite this existing subject? Type YES\n") == "YES": shutil.rmtree(os.path.join(self.filestore, subject)) for dirname in ['transforms', 'anatomicals', 'cache', 'surfaces', 'surface-info','views']: try: path = os.path.join(self.filestore, subject, dirname) os.makedirs(path) except __HOLE__: print("Error making directory %s"%path)
OSError
dataset/ETHPy150Open gallantlab/pycortex/cortex/database.py/Database.make_subj
4,149
def _key(self, key): try: dialect, value_key = key.split("_", 1) except __HOLE__: raise KeyError(key) else: return dialect, value_key
ValueError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/sql/base.py/_DialectArgView._key
4,150
def __setitem__(self, key, value): try: dialect, value_key = self._key(key) except __HOLE__: raise exc.ArgumentError( "Keys must be of the form <dialectname>_<argname>") else: self.obj.dialect_options[dialect][value_key] = value
KeyError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/sql/base.py/_DialectArgView.__setitem__
4,151
def get(self, name, default=None): u"""Answer the value of *name*. Answer *default* of the key does not exist. Dict and list values are wrapped. >>> td = ADict() >>> print td.get("123") None >>> print td.get("123", 456) 456 >>> td["a"] = [1, 2, 3] >>> td["a"] <AList:[1, 2, 3]> """ try: return self[name] except __HOLE__: return default
KeyError
dataset/ETHPy150Open petrvanblokland/Xierpa3/xierpa3/toolbox/storage/adict.py/ADict.get
4,152
def __getattr__(self, name): u"""Answer the named attribute. If the answered value is a dictionary, then create a new ADict (AttributeDict) as wrapper around that dictionary. This allows a chained attribute query. Otherwise answer the plain value. >>> td = ADict() >>> td["a"] = 123 >>> td.a 123 >>> td.b = 125 >>> td.b 125 >>> td["b"] 125 >>> td.c Traceback (most recent call last): ... AttributeError: 'c' """ try: return self[name] except __HOLE__: raise AttributeError(repr(name))
KeyError
dataset/ETHPy150Open petrvanblokland/Xierpa3/xierpa3/toolbox/storage/adict.py/ADict.__getattr__
4,153
def __delattr__(self, key): r""" >>> td = ADict(dict(a=12)) >>> del td.a >>> td <ADict:{}> >>> del td.aaaa Traceback (most recent call last): ... AttributeError: 'aaaa' """ try: self.__delitem__(key) except __HOLE__: raise AttributeError(repr(key))
KeyError
dataset/ETHPy150Open petrvanblokland/Xierpa3/xierpa3/toolbox/storage/adict.py/ADict.__delattr__
4,154
def naive_transform( self, docs, vocabulary ): x = np.zeros(( len( docs ), len( vocabulary ))) for row_i, doc in enumerate( docs ): for word in doc: x[row_i,:] *= self.alpha try: col_i = vocabulary[word] except __HOLE__: # not in vocabulary: a one-letter word or a word from a test set continue x[row_i, col_i] += 1 return x
KeyError
dataset/ETHPy150Open zygmuntz/classifying-text/fofe/fofe.py/FofeVectorizer.naive_transform
4,155
def transform( self, docs, vocabulary ): # pre-compute alpha powers alpha_powers = { x: self.alpha ** x for x in range( 10000 ) } data = [] i = [] j = [] for r, doc in enumerate( docs ): doc_len = len( doc ) # row indices for the doc i += [ r for _ in range( doc_len ) ] for word_pos, word in enumerate( doc ): # column index for the word try: word_i = vocabulary[word] j.append( word_i ) except __HOLE__: # not in vocabulary: a one-letter word or a word from a test set i.pop() continue # value at [i,j]; duplicates will be added up try: data.append( alpha_powers[ doc_len - word_pos - 1 ] ) except KeyError: data.append( alpha ** ( doc_len - word_pos - 1 )) """ print data print i print j """ return coo_matrix(( data, ( i, j )), ( len( docs ), len( vocabulary )))
KeyError
dataset/ETHPy150Open zygmuntz/classifying-text/fofe/fofe.py/FofeVectorizer.transform
4,156
def check_render_pipe_str(pipestr, renderers): ''' Check that all renderers specified in the pipe string are available. If so, return the list of render functions in the pipe as (render_func, arg_str) tuples; otherwise return []. ''' parts = [r.strip() for r in pipestr.split('|')] # Note: currently, | is not allowed anywhere in the shebang line except # as pipes between renderers. results = [] try: if parts[0] == pipestr and pipestr in OLD_STYLE_RENDERERS: parts = OLD_STYLE_RENDERERS[pipestr].split('|') for part in parts: name, argline = (part + ' ').split(' ', 1) results.append((renderers[name], argline.strip())) return results except __HOLE__: log.error('The renderer "{0}" is not available'.format(pipestr)) return []
KeyError
dataset/ETHPy150Open saltstack/salt/salt/template.py/check_render_pipe_str
4,157
def load(self): """Reads any tokens not already in memory from the specified file""" try: with open(self.filename) as f: result = yaml.load(f.read()) except __HOLE__: raise IOError("Access token file %s couldn't be opened. Try running authenticate_twitter_cherrypy ?" % self.filename) if result: self.tokens.update(result)
IOError
dataset/ETHPy150Open domrout/python-twitter-wrapper/twitterwrapper/access_tokens.py/AccessTokenStore.load
4,158
def save(self): """Overwrites the filestore and saves all tokens""" try: with open(self.filename, "w") as f: yaml.dump(self.tokens, f, default_flow_style=False) except __HOLE__: raise IOError("Access token file %s couldn't be opened." % self.filename)
IOError
dataset/ETHPy150Open domrout/python-twitter-wrapper/twitterwrapper/access_tokens.py/AccessTokenStore.save
4,159
def verify_authorization(self, oauth_verifier, oauth_token = None): """Verifies the authentication given by a user after they've been to twitter. Adds the new token to the store and saves. Returns a complete set of auth data.""" # Hack to maintain backwards compatibility when only one token is used. if oauth_token == None: try: oauth_token = self.request_tokens.keys()[0] except IndexError: raise Exception("No access token exists currently") try: # Get the actual token for this request request_token = self.request_tokens[oauth_token] except __HOLE__: raise Exception("Supplied access token has not been seen before") token = oauth2.Token(request_token['oauth_token'], request_token['oauth_token_secret']) token.set_verifier(oauth_verifier) client = oauth2.Client(self.consumer, token) resp, content = client.request(self.access_token_url, "POST") access_token = dict(urlparse.parse_qsl(content)) return { "screen_name": access_token['screen_name'], "access_token_key": access_token['oauth_token'], "access_token_secret": access_token['oauth_token_secret'], "consumer_key": self.oauth_consumer_token, "consumer_secret": self.oauth_consumer_secret }
KeyError
dataset/ETHPy150Open domrout/python-twitter-wrapper/twitterwrapper/access_tokens.py/AuthenticationProcess.verify_authorization
4,160
def read_batchfile(pythonpath, file_ending='.py'): """ This reads the contents of a batch-file. Filename is considered to be a python path to a batch file relative the directory specified in `settings.py`. file_ending specify which batchfile ending should be assumed (.ev or .py). The ending should not be included in the python path. Args: pythonpath (str): A dot-python path to a file. file_ending (str): The file ending of this file (.ev or .py) Returns: text (str): The text content of the batch file. Raises: IOError: If problems reading file. """ # open the file abspaths = [] for basepath in settings.BASE_BATCHPROCESS_PATHS: # note that pypath_to_realpath has already checked the file for existence if basepath.startswith("evennia"): basepath = basepath.split("evennia", 1)[-1] abspaths.extend(utils.pypath_to_realpath("%s.%s" % (basepath, pythonpath), file_ending)) if not abspaths: raise IOError text = None decoderr = [] for abspath in abspaths: # try different paths, until we get a match # we read the file directly into unicode. for file_encoding in ENCODINGS: # try different encodings, in order try: with codecs.open(abspath, 'r', encoding=file_encoding) as fobj: text = fobj.read() except (__HOLE__, UnicodeDecodeError) as e: # this means an encoding error; try another encoding decoderr.append(str(e)) continue break if not text and decoderr: raise UnicodeDecodeError("\n".join(decoderr)) return text #------------------------------------------------------------ # # Batch-command processor # #------------------------------------------------------------
ValueError
dataset/ETHPy150Open evennia/evennia/evennia/utils/batchprocessors.py/read_batchfile
4,161
def do_GET(self): dirs=self.path.split("/") if(len(dirs)>1): resource=dirs[1] if(resource == "addresses"): try: queryStr=parse_qs(urlparse(self.path).query) reader=str(queryStr["reader"][0]) self.send_response(200) self.send_header('Content-type',"application/json") self.end_headers() self.wfile.write(json.dumps(self.server.getAddresses())) except IOError as e: self.send_response(404) self.server.log("Error with processing readers request for addresses: "+str(e)) elif(resource == "states"): try: self.send_response(200) self.send_header('Content-type','application/json') self.end_headers() self.wfile.write(json.dumps(self.server.assetToPayload.values())) except __HOLE__ as e: self.send_response(404) self.server.log("Error with processing request for asset states: "+str(e)) else: fileName="web/view.html" contentType="text/html" if("style.css" in dirs): fileName="web/style.css" contentType="text/css" elif("coords.cfg" in dirs): fileName="web/coords.cfg" contentType="application/json" elif("blueprint.png" in dirs): fileName="web/blueprint.png" contentType="image/png" elif("coord.html" in dirs): fileName="web/coord.html" contentType="text/html" elif("logo.png" in dirs): fileName="web/logo.png" contentType="image/png" f = open(fileName) self.send_response(200) self.send_header('Content-type',contentType) self.end_headers() self.wfile.write(f.read()) f.close() else: self.send_response(404) self.server.log("Error with request. No resource specified")
IOError
dataset/ETHPy150Open SteveAbb/Vestigo/Vestigo Base/server.py/HTTPHandler.do_GET
4,162
def call(cmd): try: response = subcall(cmd,shell=True) print time.sleep(1) if response < 0: sys.exit(response) except __HOLE__, E: sys.exit(E)
OSError
dataset/ETHPy150Open springmeyer/tilelite/deploy.py/call
4,163
def LoadTextFile(self, fileName): textFile = None try: textFile = open(fileName, 'r') except __HOLE__: tkMessageBox.showerror(title='File Load Error', message='Unable to load file '+`fileName`+' .') else: self.textView.insert(0.0,textFile.read())
IOError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/etc/textView.py/TextViewer.LoadTextFile
4,164
@classmethod def parse(cls, buf): (type_, length) = struct.unpack_from(cls._PACK_STR, buf, 0) rest = buf[utils.round_up(length, 8):] try: subcls = cls._TYPES[type_] except __HOLE__: subcls = OFPPropUnknown prop = subcls.parser(buf) prop.type = type_ prop.length = length return prop, rest
KeyError
dataset/ETHPy150Open osrg/ryu/ryu/ofproto/ofproto_v1_5_parser.py/OFPPropBase.parse
4,165
def __init__(self, *args, **kw): super(EditProfileForm, self).__init__(*args, **kw) # Put the first and last name at the top try: # in Django < 1.7 new_order = self.fields.keyOrder[:-2] new_order.insert(0, 'first_name') new_order.insert(1, 'last_name') self.fields.keyOrder = new_order except __HOLE__: # in Django > 1.7 new_order = [('first_name', self.fields['first_name']), ('last_name', self.fields['last_name'])] new_order.extend(list(self.fields.items())[:-2]) self.fields = OrderedDict(new_order)
AttributeError
dataset/ETHPy150Open bread-and-pepper/django-userena/userena/forms.py/EditProfileForm.__init__
4,166
def __call__(self, a): m = _get_backing_memmap(a) if m is not None: # a is already backed by a memmap file, let's reuse it directly return _reduce_memmap_backed(a, m) if (not a.dtype.hasobject and self._max_nbytes is not None and a.nbytes > self._max_nbytes): # check that the folder exists (lazily create the pool temp folder # if required) try: os.makedirs(self._temp_folder) os.chmod(self._temp_folder, FOLDER_PERMISSIONS) except __HOLE__ as e: if e.errno != errno.EEXIST: raise e # Find a unique, concurrent safe filename for writing the # content of this array only once. basename = "%d-%d-%s.pkl" % ( os.getpid(), id(threading.current_thread()), hash(a)) filename = os.path.join(self._temp_folder, basename) # In case the same array with the same content is passed several # times to the pool subprocess children, serialize it only once # XXX: implement an explicit reference counting scheme to make it # possible to delete temporary files as soon as the workers are # done processing this data. if not os.path.exists(filename): if self.verbose > 0: print("Memmaping (shape=%r, dtype=%s) to new file %s" % ( a.shape, a.dtype, filename)) for dumped_filename in dump(a, filename): os.chmod(dumped_filename, FILE_PERMISSIONS) if self._prewarm: # Warm up the data to avoid concurrent disk access in # multiple children processes load(filename, mmap_mode=self._mmap_mode).max() elif self.verbose > 1: print("Memmaping (shape=%s, dtype=%s) to old file %s" % ( a.shape, a.dtype, filename)) # The worker process will use joblib.load to memmap the data return (load, (filename, self._mmap_mode)) else: # do not convert a into memmap, let pickler do its usual copy with # the default system pickler if self.verbose > 1: print("Pickling array (shape=%r, dtype=%s)." % ( a.shape, a.dtype)) return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),)) ############################################################################### # Enable custom pickling in Pool queues
OSError
dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/externals/joblib/pool.py/ArrayMemmapReducer.__call__
4,167
def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6, mmap_mode='r', forward_reducers=None, backward_reducers=None, verbose=0, context_id=None, prewarm=False, **kwargs): if forward_reducers is None: forward_reducers = dict() if backward_reducers is None: backward_reducers = dict() if context_id is not None: warnings.warn('context_id is deprecated and ignored in joblib' ' 0.9.4 and will be removed in 0.11', DeprecationWarning) # Prepare a sub-folder name for the serialization of this particular # pool instance (do not create in advance to spare FS write access if # no array is to be dumped): use_shared_mem = False pool_folder_name = "joblib_memmaping_pool_%d_%d" % ( os.getpid(), id(self)) if temp_folder is None: temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None) if temp_folder is None: if os.path.exists(SYSTEM_SHARED_MEM_FS): try: temp_folder = SYSTEM_SHARED_MEM_FS pool_folder = os.path.join(temp_folder, pool_folder_name) if not os.path.exists(pool_folder): os.makedirs(pool_folder) use_shared_mem = True except __HOLE__: # Missing rights in the /dev/shm partition, # fallback to regular temp folder. temp_folder = None if temp_folder is None: # Fallback to the default tmp folder, typically /tmp temp_folder = tempfile.gettempdir() temp_folder = os.path.abspath(os.path.expanduser(temp_folder)) pool_folder = os.path.join(temp_folder, pool_folder_name) self._temp_folder = pool_folder # Register the garbage collector at program exit in case caller forgets # to call terminate explicitly: note we do not pass any reference to # self to ensure that this callback won't prevent garbage collection of # the pool instance and related file handler resources such as POSIX # semaphores and pipes atexit.register(lambda: delete_folder(pool_folder)) if np is not None: # Register smart numpy.ndarray reducers that detects memmap backed # arrays and that is alse able to dump to memmap large in-memory # arrays over the max_nbytes threshold if prewarm == "auto": prewarm = not use_shared_mem forward_reduce_ndarray = ArrayMemmapReducer( max_nbytes, pool_folder, mmap_mode, verbose, prewarm=prewarm) forward_reducers[np.ndarray] = forward_reduce_ndarray forward_reducers[np.memmap] = reduce_memmap # Communication from child process to the parent process always # pickles in-memory numpy.ndarray without dumping them as memmap # to avoid confusing the caller and make it tricky to collect the # temporary folder backward_reduce_ndarray = ArrayMemmapReducer( None, pool_folder, mmap_mode, verbose) backward_reducers[np.ndarray] = backward_reduce_ndarray backward_reducers[np.memmap] = reduce_memmap poolargs = dict( processes=processes, forward_reducers=forward_reducers, backward_reducers=backward_reducers) poolargs.update(kwargs) super(MemmapingPool, self).__init__(**poolargs)
IOError
dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/externals/joblib/pool.py/MemmapingPool.__init__
4,168
@memoize def new_dataset(expr, deltas, missing_values): """ Creates or returns a dataset from a pair of blaze expressions. Parameters ---------- expr : Expr The blaze expression representing the first known values. deltas : Expr The blaze expression representing the deltas to the data. missing_values : frozenset((name, value) pairs Association pairs column name and missing_value for that column. This needs to be a frozenset rather than a dict or tuple of tuples because we want a collection that's unordered but still hashable. Returns ------- ds : type A new dataset type. Notes ----- This function is memoized. repeated calls with the same inputs will return the same type. """ missing_values = dict(missing_values) columns = {} for name, type_ in expr.dshape.measure.fields: # Don't generate a column for sid or timestamp, since they're # implicitly the labels if the arrays that will be passed to pipeline # Terms. if name in (SID_FIELD_NAME, TS_FIELD_NAME): continue try: # TODO: This should support datetime and bool columns. if promote(type_, float64, promote_option=False) != float64: raise NotPipelineCompatible() if isinstance(type_, Option): type_ = type_.ty except NotPipelineCompatible: col = NonPipelineField(name, type_) except __HOLE__: col = NonNumpyField(name, type_) else: col = Column( type_.to_numpy_dtype(), missing_values.get(name, NotSpecified), ) columns[name] = col name = expr._name if name is None: name = next(_new_names) # unicode is a name error in py3 but the branch is only hit # when we are in python 2. if PY2 and isinstance(name, unicode): # noqa name = name.encode('utf-8') return type(name, (DataSet,), columns)
TypeError
dataset/ETHPy150Open quantopian/zipline/zipline/pipeline/loaders/blaze/core.py/new_dataset
4,169
def get_deltas(expr, deltas, no_deltas_rule): """Find the correct deltas for the expression. Parameters ---------- expr : Expr The baseline expression. deltas : Expr, 'auto', or None The deltas argument. If this is 'auto', then the deltas table will be searched for by walking up the expression tree. If this cannot be reflected, then an action will be taken based on the ``no_deltas_rule``. no_deltas_rule : no_deltas_rule How to handle the case where deltas='auto' but no deltas could be found. Returns ------- deltas : Expr or None The deltas table to use. """ if isinstance(deltas, bz.Expr) or deltas != 'auto': return deltas try: return expr._child[(expr._name or '') + '_deltas'] except (ValueError, __HOLE__): if no_deltas_rule == no_deltas_rules.raise_: raise ValueError( "no deltas table could be reflected for %s" % expr ) elif no_deltas_rule == no_deltas_rules.warn: warnings.warn(NoDeltasWarning(expr)) return None
AttributeError
dataset/ETHPy150Open quantopian/zipline/zipline/pipeline/loaders/blaze/core.py/get_deltas
4,170
def _load_dataset(self, dates, assets, mask, columns): try: (dataset,) = set(map(getdataset, columns)) except __HOLE__: raise AssertionError('all columns must come from the same dataset') expr, deltas, odo_kwargs = self[dataset] have_sids = SID_FIELD_NAME in expr.fields asset_idx = pd.Series(index=assets, data=np.arange(len(assets))) assets = list(map(int, assets)) # coerce from numpy.int64 added_query_fields = [AD_FIELD_NAME, TS_FIELD_NAME] + ( [SID_FIELD_NAME] if have_sids else [] ) data_query_time = self._data_query_time data_query_tz = self._data_query_tz lower_dt, upper_dt = normalize_data_query_bounds( dates[0], dates[-1], data_query_time, data_query_tz, ) def where(e): """Create the query to run against the resources. Parameters ---------- e : Expr The baseline or deltas expression. Returns ------- q : Expr The query to run. """ def lower_for_col(column): pred = e[TS_FIELD_NAME] <= lower_dt colname = column.name schema = e[colname].schema.measure if isinstance(schema, Option): pred &= e[colname].notnull() schema = schema.ty if schema in floating: pred &= ~e[colname].isnan() filtered = e[pred] lower = filtered[TS_FIELD_NAME].max() if have_sids: # If we have sids, then we need to take the earliest of the # greatest date that has a non-null value by sid. lower = bz.by( filtered[SID_FIELD_NAME], timestamp=lower, ).timestamp.min() return lower lower = odo( reduce( bz.least, map(lower_for_col, columns), ), pd.Timestamp, **odo_kwargs ) if lower is pd.NaT: lower = lower_dt return e[ (e[TS_FIELD_NAME] >= lower) & (e[TS_FIELD_NAME] <= upper_dt) ][added_query_fields + list(map(getname, columns))] def collect_expr(e): """Execute and merge all of the per-column subqueries. Parameters ---------- e : Expr The baseline or deltas expression. Returns ------- result : pd.DataFrame The resulting dataframe. Notes ----- This can return more data than needed. The in memory reindex will handle this. """ df = odo(where(e), pd.DataFrame, **odo_kwargs) df.sort(TS_FIELD_NAME, inplace=True) # sort for the groupby later return df materialized_expr = collect_expr(expr) materialized_deltas = ( collect_expr(deltas) if deltas is not None else pd.DataFrame( columns=added_query_fields + list(map(getname, columns)), ) ) # It's not guaranteed that assets returned by the engine will contain # all sids from the deltas table; filter out such mismatches here. if not materialized_deltas.empty and have_sids: materialized_deltas = materialized_deltas[ materialized_deltas[SID_FIELD_NAME].isin(assets) ] if data_query_time is not None: for m in (materialized_expr, materialized_deltas): m.loc[:, TS_FIELD_NAME] = m.loc[ :, TS_FIELD_NAME ].astype('datetime64[ns]') normalize_timestamp_to_query_time( m, data_query_time, data_query_tz, inplace=True, ts_field=TS_FIELD_NAME, ) # Inline the deltas that changed our most recently known value. # Also, we reindex by the dates to create a dense representation of # the data. sparse_output, non_novel_deltas = overwrite_novel_deltas( materialized_expr, materialized_deltas, dates, ) sparse_output.drop(AD_FIELD_NAME, axis=1, inplace=True) def last_in_date_group(df, reindex, have_sids=have_sids): idx = dates[dates.searchsorted( df[TS_FIELD_NAME].values.astype('datetime64[D]') )] if have_sids: idx = [idx, SID_FIELD_NAME] last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby( idx, sort=False, ).last() if have_sids: last_in_group = last_in_group.unstack() if reindex: if have_sids: cols = last_in_group.columns last_in_group = last_in_group.reindex( index=dates, columns=pd.MultiIndex.from_product( (cols.levels[0], assets), names=cols.names, ), ) else: last_in_group = last_in_group.reindex(dates) return last_in_group sparse_deltas = last_in_date_group(non_novel_deltas, reindex=False) dense_output = last_in_date_group(sparse_output, reindex=True) dense_output.ffill(inplace=True) if have_sids: adjustments_from_deltas = adjustments_from_deltas_with_sids column_view = identity else: # We use the column view to make an array per asset. column_view = compose( # We need to copy this because we need a concrete ndarray. # The `repeat_last_axis` call will give us a fancy strided # array which uses a buffer to represent `len(assets)` columns. # The engine puts nans at the indicies for which we do not have # sid information so that the nan-aware reductions still work. # A future change to the engine would be to add first class # support for macro econimic datasets. copy, partial(repeat_last_axis, count=len(assets)), ) adjustments_from_deltas = adjustments_from_deltas_no_sids for column_idx, column in enumerate(columns): column_name = column.name yield column, AdjustedArray( column_view( dense_output[column_name].values.astype(column.dtype), ), mask, adjustments_from_deltas( dates, sparse_output[TS_FIELD_NAME].values, column_idx, column_name, asset_idx, sparse_deltas, ), column.missing_value, )
ValueError
dataset/ETHPy150Open quantopian/zipline/zipline/pipeline/loaders/blaze/core.py/BlazeLoader._load_dataset
4,171
def validate_unique(self): try: self.instance.validate_unique() except __HOLE__, e: if 'full_name' in e.message_dict: e.message_dict['__all__'] = e.message_dict['full_name'] self._update_errors(e.message_dict)
ValidationError
dataset/ETHPy150Open mozilla/inventory/core/site/forms.py/SiteForm.validate_unique
4,172
def main(): d = VStrokerDevice.getDeviceList() if len(d) == 0: print "No devices found!" return 1 print d v = VStrokerDevice() v.open(d[0]) try: while True: l = v.getParsedData() if l is None: time.sleep(.004) continue print l except __HOLE__: return 0
KeyboardInterrupt
dataset/ETHPy150Open metafetish/buttplug/plugins/vstroker/vstroker.py/main
4,173
def test_validate(self): validator = Enum(['foo', 'bar', 'baz']) self.assertEqual('foo', validator.validate('foo')) self.assertEqual('bar', validator.validate('bar')) self.assertEqual('baz', validator.validate('baz')) try: validator.validate('foooo') self.fail('ValidationError not raised') except __HOLE__ as e: self.assertRegex( str(e), 'Invalid value \'foooo\' \(str\): must be one of \{%s\}' % ', '.join(map(repr, ['foo', 'bar', 'baz'])) )
ValidationError
dataset/ETHPy150Open sdispater/cleo/tests/validators/test_validators.py/EnumTestCase.test_validate
4,174
def test_validate(self): validator = Choice(['foo', 'bar', 'baz']) self.assertEqual('foo', validator.validate('foo')) self.assertEqual('bar', validator.validate('bar')) self.assertEqual('baz', validator.validate('baz')) try: validator.validate('foooo') self.fail('ValidationError not raised') except __HOLE__ as e: self.assertRegex( str(e), 'Invalid value \'foooo\' \(str\): must be one of \{%s\}' % ', '.join(map(repr, ['foo', 'bar', 'baz'])) )
ValidationError
dataset/ETHPy150Open sdispater/cleo/tests/validators/test_validators.py/ChoiceTestCase.test_validate
4,175
def dispatch_request(self, request): adapter = self.url_map.bind_to_environ(request.environ) try: endpoint_name, values = adapter.match() except WerkzeugNotFound: return error_response("Not Found", 404) if endpoint_name == 'spec': endpoint = SpecEndpoint(self.api.spec) elif endpoint_name == 'action': action_name = values.pop('action') if action_name not in self.api.spec['actions'].keys(): return error_response("Not Found", 404) endpoint = ActionEndpoint( self.api.spec, action_name, getattr(self.api.actions, action_name)) else: model_name = values.pop('model') if model_name not in self.api.spec['models'].keys(): return error_response("Not Found", 404) model_spec = self.api.spec['models'][model_name] if not model_spec['methods'][endpoint_name]: return error_response("Method Not Allowed", 405) model_obj = getattr(self.api.models, model_name) endpoints = { 'get_by_id': GetByIdEndpoint, 'create': CreateEndpoint, 'update': UpdateEndpoint, 'delete': DeleteEndpoint, 'get_list': GetListEndpoint, } args = { "api_spec": self.api.spec, "model_name": model_name, "func": getattr(model_obj, endpoint_name), } endpoint = endpoints[endpoint_name](**args) try: return self.view(endpoint, request, **values) except __HOLE__ as err: return error_response(err.message, err.code)
HTTPError
dataset/ETHPy150Open cosmic-api/cosmic.py/cosmic/http.py/Server.dispatch_request
4,176
def view(self, endpoint, request, **url_args): try: func_input = self.parse_request(endpoint, request, **url_args) except __HOLE__ as err: return error_response(str(err), 400) func_output = endpoint.handler(**func_input) return self.build_response(endpoint, func_input=func_input, func_output=func_output)
ValidationError
dataset/ETHPy150Open cosmic-api/cosmic.py/cosmic/http.py/Server.view
4,177
def get_payload_from_http_message(req): bytes = req.data if not bytes: return None if req.mimetype != "application/json": raise SpecError('Content-Type must be "application/json" got "%s" instead' % req.mimetype) charset = req.mimetype_params.get("charset", "utf-8") if charset.lower() != "utf-8": raise SpecError('Content-Type charset must be "utf-8" got %s instead' % charset) try: data = bytes.decode('utf-8') except __HOLE__: raise SpecError("Unicode Decode Error") try: return Box(json.loads(data)) except ValueError: raise SpecError("Invalid JSON")
UnicodeDecodeError
dataset/ETHPy150Open cosmic-api/cosmic.py/cosmic/http.py/get_payload_from_http_message
4,178
def parse_response(self, res): is_empty = res.text == "" if ((self.response_must_be_empty == True and not is_empty) or (is_empty and self.response_can_be_empty == False)): raise SpecError("Invalid response") r = { 'code': res.status_code, 'headers': res.headers } try: r['json'] = string_to_json(res.text) except __HOLE__: raise SpecError("Unparseable response") if r['code'] not in self.acceptable_response_codes: message = None if 'json' in r and r['json'] and type(r['json'].datum) == dict and 'error' in r['json'].datum: message = r['json'].datum['error'] raise RemoteHTTPError(code=r['code'], message=message) return r
ValueError
dataset/ETHPy150Open cosmic-api/cosmic.py/cosmic/http.py/Endpoint.parse_response
4,179
def main(): try: get_ipython except __HOLE__: pass else: exit("Running ipython inside ipython isn't supported. :(") options, basic_auth, oauth = get_config() if basic_auth: basic_auth = (basic_auth['username'], basic_auth['password']) if oauth.get('oauth_dance') is True: oauth = oauth_dance( options['server'], oauth['consumer_key'], oauth['key_cert'], oauth['print_tokens'], options['verify']) elif not all((oauth.get('access_token'), oauth.get('access_token_secret'), oauth.get('consumer_key'), oauth.get('key_cert'))): oauth = None jira = JIRA(options=options, basic_auth=basic_auth, oauth=oauth) from IPython.frontend.terminal.embed import InteractiveShellEmbed ipshell = InteractiveShellEmbed( banner1='<JIRA Shell ' + __version__ + ' (' + jira.client_info() + ')>') ipshell("*** JIRA shell active; client is in 'jira'." ' Press Ctrl-D to exit.')
NameError
dataset/ETHPy150Open pycontribs/jira/jira/jirashell.py/main
4,180
def query_terms(model, path): """ Yields QueryTerms of given path starting from given model. - model can be either a regular model or a translatable model """ bits = path.split('__') field = None for depth, bit in enumerate(bits): # STEP 1 -- Resolve the field if bit == 'pk': # handle 'pk' alias bit = model._meta.pk.name try: if django.VERSION >= (1, 8): try: # is field on the shared model? field = model._meta.get_field.real(bit) translated = False except FieldDoesNotExist: # nope, get field from translations model field = model._meta.translations_model._meta.get_field(bit) translated = True except AttributeError: # current model is a standard model field = model._meta.get_field(bit) translated = False direct = ( not field.auto_created or getattr(field, 'db_column', None) or getattr(field, 'attname', None) ) else: # older versions do not retrieve reverse/m2m with get_field, we must use the obsolete api try: field, _, direct, _ = model._meta.get_field_by_name.real(bit) translated = False except FieldDoesNotExist: field, _, direct, _ = model._meta.translations_model._meta.get_field_by_name(bit) translated = True except __HOLE__: field, _, direct, _ = model._meta.get_field_by_name(bit) translated = False except FieldDoesNotExist: break # STEP 2 -- Find out the target of the relation, if it is one if direct: # field is on model if field.rel: # field is a foreign key, follow it target = field.rel.to._meta.concrete_model else: # field is a regular field target = None else: # field is a m2m or reverse fk, follow it target = (field.related_model._meta.concrete_model if django.VERSION >= (1, 8) else field.model._meta.concrete_model) yield QueryTerm( depth=depth, term=bit, model=model, field=field, translated=translated, target=target, many=not direct ) # Onto next iteration if target is None: depth += 1 # we hit a regular field, mark it as yielded then break break # through to lookup/transform flushing model = target else: return # all bits were recognized as fields, job done # STEP 3 -- Flush lookup/transform bits - do not handle invalid stuff, Django will do it for depth, bit in enumerate(bits[depth:], depth): yield QueryTerm( depth=depth, term=bit, model=model, field=None, translated=None, target=None, many=False )
AttributeError
dataset/ETHPy150Open KristianOellegaard/django-hvad/hvad/query.py/query_terms
4,181
def where_node_children(node): ''' Recursively visit all children of a where node, yielding each field in turn. - node: the node to visit ''' todo = [node] get_field_name = ((lambda n: n.lhs.target.name) if django.VERSION >= (1, 7) else (lambda n: n[0].field.name)) while todo: node = todo.pop() for child in node.children: try: field_name = get_field_name(child) except (__HOLE__, AttributeError): pass else: yield child, field_name if isinstance(child, WhereNode): todo.append(child) #=============================================================================== # Query manipulations
TypeError
dataset/ETHPy150Open KristianOellegaard/django-hvad/hvad/query.py/where_node_children
4,182
@strFields(t = None, v = "") @checkAuth(write=False) def troveInfo(self, auth, t, v): t = unquote(t) leaves = {} for serverName in self.serverNameList: newLeaves = self.repos.getTroveVersionList(serverName, {t: [None]}) leaves.update(newLeaves) if t not in leaves: return self._write("error", error = '%s was not found on this server.' %t) versionList = sorted(leaves[t].keys(), reverse = True) if not v: reqVer = versionList[0] else: try: reqVer = versions.VersionFromString(v) except (versions.ParseError, __HOLE__): try: reqVer = versions.ThawVersion(v) except: return self._write("error", error = "Invalid version: %s" %v) try: query = [(t, reqVer, x) for x in leaves[t][reqVer]] except KeyError: return self._write("error", error = "Version %s of %s was not found on this server." %(reqVer, t)) troves = self.repos.getTroves(query, withFiles = False) mdata = self.repos.getMetadata([t, reqVer.branch()], reqVer.branch().label()) if t in mdata: mdata = mdata[t] return self._write("trove_info", troveName = t, troves = troves, versionList = versionList, reqVer = reqVer, metadata = mdata)
ValueError
dataset/ETHPy150Open sassoftware/conary/conary/web/repos_web.py/ReposWeb.troveInfo
4,183
@strFields(t = None, v = None, f = "") @checkAuth(write=False) def files(self, auth, t, v, f): try: v = versions.VersionFromString(v) except (versions.ParseError, __HOLE__): v = versions.ThawVersion(v) f = deps.ThawFlavor(f) parentTrove = self.repos.getTrove(t, v, f, withFiles = False) # non-source group troves only show contained troves if trove.troveIsGroup(t): troves = sorted(parentTrove.iterTroveList(strongRefs=True)) return self._write("group_contents", troveName = t, troves = troves) fileIters = [] for n, v, f in self.repos.walkTroveSet(parentTrove, withFiles = False): files = self.repos.iterFilesInTrove(n, v, f, withFiles = True, sortByPath = True) fileIters.append(files) return self._write("files", troveName = t, fileIters = itertools.chain(*fileIters))
ValueError
dataset/ETHPy150Open sassoftware/conary/conary/web/repos_web.py/ReposWeb.files
4,184
def heapiter(heap): # An iterator returning a heap's elements, smallest-first. try: while 1: yield heappop(heap) except __HOLE__: pass
IndexError
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_heapq.py/heapiter
4,185
def test_push_pop(self): # 1) Push 256 random numbers and pop them off, verifying all's OK. heap = [] data = [] self.check_invariant(heap) for i in range(256): item = random.random() data.append(item) heappush(heap, item) self.check_invariant(heap) results = [] while heap: item = heappop(heap) self.check_invariant(heap) results.append(item) data_sorted = data[:] data_sorted.sort() self.assertEqual(data_sorted, results) # 2) Check that the invariant holds for a sorted array self.check_invariant(results) self.assertRaises(TypeError, heappush, []) try: self.assertRaises(TypeError, heappush, None, None) self.assertRaises(TypeError, heappop, None) except __HOLE__: pass
AttributeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_heapq.py/TestHeap.test_push_pop
4,186
def sort_flavor_list(request, flavors): """Utility method to sort a list of flavors. By default, returns the available flavors, sorted by RAM usage (ascending). Override these behaviours with a CREATE_INSTANCE_FLAVOR_SORT dict in local_settings.py. """ def get_key(flavor, sort_key): try: return getattr(flavor, sort_key) except __HOLE__: LOG.warning('Could not find sort key "%s". Using the default ' '"ram" instead.', sort_key) return getattr(flavor, 'ram') try: flavor_sort = getattr(settings, 'CREATE_INSTANCE_FLAVOR_SORT', {}) sort_key = flavor_sort.get('key', 'ram') rev = flavor_sort.get('reverse', False) if not callable(sort_key): key = lambda flavor: get_key(flavor, sort_key) else: key = sort_key flavor_list = [(flavor.id, '%s' % flavor.name) for flavor in sorted(flavors, key=key, reverse=rev)] return flavor_list except Exception: exceptions.handle(request, _('Unable to sort instance flavors.')) return []
AttributeError
dataset/ETHPy150Open openstack/horizon/openstack_dashboard/dashboards/project/instances/utils.py/sort_flavor_list
4,187
def encode_kw11(p): if not type(p) is dict: return {} ret = p.copy() removes = [] for k, v in iteritems(ret): try: int(k) except __HOLE__: pass else: removes.append(k) for k in removes: ret.pop(k) return ret
ValueError
dataset/ETHPy150Open cenobites/flask-jsonrpc/flask_jsonrpc/site.py/encode_kw11
4,188
def encode_arg11(p): if type(p) is list: return p elif not type(p) is dict: return [] else: pos = [] d = encode_kw(p) for k, v in iteritems(d): try: pos.append(int(k)) except __HOLE__: pass pos = list(set(pos)) pos.sort() return [d[text_type(i)] for i in pos]
ValueError
dataset/ETHPy150Open cenobites/flask-jsonrpc/flask_jsonrpc/site.py/encode_arg11
4,189
def validate_params(method, D): if type(D['params']) == Object: keys = method.json_arg_types.keys() if len(keys) != len(D['params']): raise InvalidParamsError('Not enough params provided for {0}' \ .format(method.json_sig)) for k in keys: if not k in D['params']: raise InvalidParamsError('{0} is not a valid parameter for {1}' \ .format(k, method.json_sig)) if not Any.kind(D['params'][k]) == method.json_arg_types[k]: raise InvalidParamsError('{0} is not the correct type {1} for {2}' \ .format(type(D['params'][k]), method.json_arg_types[k], method.json_sig)) elif type(D['params']) == Array: arg_types = list(method.json_arg_types.values()) try: for i, arg in enumerate(D['params']): if not Any.kind(arg) == arg_types[i]: raise InvalidParamsError('{0} is not the correct type {1} for {2}' \ .format(type(arg), arg_types[i], method.json_sig)) except __HOLE__: raise InvalidParamsError('Too many params provided for {0}'.format(method.json_sig)) else: if len(D['params']) != len(arg_types): raise InvalidParamsError('Not enough params provided for {0}'.format(method.json_sig))
IndexError
dataset/ETHPy150Open cenobites/flask-jsonrpc/flask_jsonrpc/site.py/validate_params
4,190
def response_obj(self, request, D, version_hint=JSONRPC_VERSION_DEFAULT): version = version_hint response = self.empty_response(version=version) apply_version = { '2.0': self.apply_version_2_0, '1.1': self.apply_version_1_1, '1.0': self.apply_version_1_0 } try: try: # determine if an object is iterable? iter(D) except __HOLE__ as e: raise InvalidRequestError(getattr(e, 'message', e.args[0] if len(e.args) > 0 else None)) # version: validate if 'jsonrpc' in D: if text_type(D['jsonrpc']) not in apply_version: raise InvalidRequestError('JSON-RPC version {0} not supported.'.format(D['jsonrpc'])) version = request.jsonrpc_version = response['jsonrpc'] = text_type(D['jsonrpc']) elif 'version' in D: if text_type(D['version']) not in apply_version: raise InvalidRequestError('JSON-RPC version {0} not supported.'.format(D['version'])) version = request.jsonrpc_version = response['version'] = text_type(D['version']) else: version = request.jsonrpc_version = JSONRPC_VERSION_DEFAULT # params: An Array or Object, that holds the actual parameter values # for the invocation of the procedure. Can be omitted if empty. if 'params' not in D or not D['params']: D['params'] = [] if 'method' not in D or 'params' not in D: raise InvalidParamsError('Request requires str:"method" and list:"params"') if D['method'] not in self.urls: raise MethodNotFoundError('Method not found. Available methods: {0}' \ .format('\n'.join(list(self.urls.keys())))) method = self.urls[text_type(D['method'])] if getattr(method, 'json_validate', False): validate_params(method, D) if 'id' in D and D['id'] is not None: # regular request response['id'] = D['id'] if version in ('1.1', '2.0'): response.pop('error', None) else: # notification return None, 204 R = apply_version[version](method, D['params']) if 'id' not in D or ('id' in D and D['id'] is None): # notification return None, 204 if isinstance(R, Response): if R.status_code == 200: return R, R.status_code if R.status_code == 401: raise InvalidCredentialsError(R.status) raise OtherError(R.status, R.status_code) try: # New in Flask version 0.10. encoder = current_app.json_encoder() except AttributeError: encoder = json.JSONEncoder() # type of `R` should be one of these or... if not sum([isinstance(R, e) for e in \ string_types + integer_types + \ (float, complex, dict, list, tuple, set, frozenset, NoneType, bool)]): try: rs = encoder.default(R) # ...or something this thing supports except TypeError as exc: raise TypeError('Return type not supported, for {0!r}'.format(R)) response['result'] = R status = 200 except Error as e: response['error'] = e.json_rpc_format if version in ('1.1', '2.0'): response.pop('result', None) status = e.status except HTTPException as e: other_error = OtherError(e) response['error'] = other_error.json_rpc_format response['error']['code'] = e.code if version in ('1.1', '2.0'): response.pop('result', None) status = e.code except Exception as e: other_error = OtherError(e) response['error'] = other_error.json_rpc_format status = other_error.status if version in ('1.1', '2.0'): response.pop('result', None) # Exactly one of result or error MUST be specified. It's not # allowed to specify both or none. if version in ('1.1', '2.0') and 'result' in response: response.pop('error', None) return response, status
TypeError
dataset/ETHPy150Open cenobites/flask-jsonrpc/flask_jsonrpc/site.py/JSONRPCSite.response_obj
4,191
def verify(self): for key, value in KEYWORDS.items(): if value: # mandatory keyword if key not in self._keywords: raise ValueError, "%s: mandatory keyword '%s' not present" % (self.__class__.__name__, key,) try: bs = self._keywords["bug_severity"] except __HOLE__: pass else: if bs not in SEVERITY_VALUES: raise ValueError, "%s: invalid severity '%s'" % (self.__class__.__name__, bs,) try: pv = self._keywords["priority"] except KeyError: pass else: if pv not in PRIORITY_VALUES: raise ValueError, "%s: invalid priority '%s'" % (self.__class__.__name__, pv,)
KeyError
dataset/ETHPy150Open kdart/pycopia/QA/pycopia/bugzillaclient.py/BugzillaBody.verify
4,192
def _installSignalHandlersAgain(self): """ wx sometimes removes our own signal handlers, so re-add them. """ try: # make _handleSignals happy: import signal signal.signal(signal.SIGINT, signal.default_int_handler) except __HOLE__: return self._handleSignals()
ImportError
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/internet/wxreactor.py/WxReactor._installSignalHandlersAgain
4,193
def get_api_results(self, response): if response.error: return self.send_error() try: return json.loads(response.body) except __HOLE__: return self.send_error(500)
ValueError
dataset/ETHPy150Open YelpArchive/pushmanager/pushmanager/core/requesthandler.py/RequestHandler.get_api_results
4,194
def Link(self, name): """Update the repo metadata to use a different manifest. """ self.Override(name) try: if os.path.exists(self._manifestFile): os.remove(self._manifestFile) os.symlink('manifests/%s' % name, self._manifestFile) except __HOLE__, e: raise ManifestParseError('cannot link manifest %s' % name)
OSError
dataset/ETHPy150Open android/tools_repo/manifest_xml.py/XmlManifest.Link
4,195
def _ParseManifest(self, is_root_file): root = xml.dom.minidom.parse(self._manifestFile) if not root or not root.childNodes: raise ManifestParseError, \ "no root node in %s" % \ self._manifestFile config = root.childNodes[0] if config.nodeName != 'manifest': raise ManifestParseError, \ "no <manifest> in %s" % \ self._manifestFile for node in config.childNodes: if node.nodeName == 'remove-project': name = self._reqatt(node, 'name') try: del self._projects[name] except __HOLE__: raise ManifestParseError, \ 'project %s not found' % \ (name) for node in config.childNodes: if node.nodeName == 'remote': remote = self._ParseRemote(node) if self._remotes.get(remote.name): raise ManifestParseError, \ 'duplicate remote %s in %s' % \ (remote.name, self._manifestFile) self._remotes[remote.name] = remote for node in config.childNodes: if node.nodeName == 'default': if self._default is not None: raise ManifestParseError, \ 'duplicate default in %s' % \ (self._manifestFile) self._default = self._ParseDefault(node) if self._default is None: self._default = _Default() for node in config.childNodes: if node.nodeName == 'notice': if self._notice is not None: raise ManifestParseError, \ 'duplicate notice in %s' % \ (self.manifestFile) self._notice = self._ParseNotice(node) for node in config.childNodes: if node.nodeName == 'manifest-server': url = self._reqatt(node, 'url') if self._manifest_server is not None: raise ManifestParseError, \ 'duplicate manifest-server in %s' % \ (self.manifestFile) self._manifest_server = url for node in config.childNodes: if node.nodeName == 'project': project = self._ParseProject(node) if self._projects.get(project.name): raise ManifestParseError, \ 'duplicate project %s in %s' % \ (project.name, self._manifestFile) self._projects[project.name] = project
KeyError
dataset/ETHPy150Open android/tools_repo/manifest_xml.py/XmlManifest._ParseManifest
4,196
def read(self, index='PRIMARY', mode=None, where=None, limit=None, **kwargs): if not self.open: raise RuntimeError("This handler isn't open yet") index_op, index_value = self._parse_index_value(kwargs) if index_op is not None and mode is not None: raise ValueError("You cannot use an index operator and mode " "together in a handler read") elif index_op is None and mode is None: # Default mode = 'first' sql = ["HANDLER {} READ".format(self._handler_name)] params = () # Caller's responsibility to ensure the index name is correct sql.append("`{}`".format(index)) if index_op is not None: sql.append(index_op) if isinstance(index_value, tuple): sql.append("(") sql.append(",".join("%s" for x in index_value)) sql.append(")") params += index_value else: sql.append("(%s)") params += (index_value,) if index_op is None: try: sql.append(self._read_modes[mode]) except __HOLE__: raise ValueError( "'mode' must be one of: {}" .format(",".join(self._read_modes.keys())) ) if where is None: # Use default if self._where: sql.append(self._where) params += self._params else: # 'where' is another queryset to use the clause from if isinstance(where, tuple): # Allow parsing in a pre-extracted where clause + params - # as iter() does where, where_params = where else: where, where_params = self._extract_where(where) sql.append(where) params += where_params if limit is not None: sql.append("LIMIT %s") params += (limit,) return self._model.objects.using(self.db).raw(" ".join(sql), params)
KeyError
dataset/ETHPy150Open adamchainz/django-mysql/django_mysql/models/handler.py/Handler.read
4,197
def _parse_index_value(self, kwargs): """ Parse the HANDLER-supported subset of django's __ expression syntax """ if len(kwargs) == 0: return None, None elif len(kwargs) > 1: raise ValueError("You can't pass more than one value expression, " "you passed {}".format(",".join(kwargs.keys()))) name, value = list(kwargs.items())[0] if not name.startswith('value'): raise ValueError("The keyword arg {} is not valid for this " "function".format(name)) if name == 'value': return ('=', value) if not name.startswith('value__'): raise ValueError("The keyword arg {} is not valid for this " "function".format(name)) operator = name[name.find('__') + 2:] try: return (self._operator_values[operator], value) except __HOLE__: raise ValueError( "The operator {op} is not valid for index value matching. " "Valid operators are {valid}" .format( op=operator, valid=",".join(self._operator_values.keys()) ) )
KeyError
dataset/ETHPy150Open adamchainz/django-mysql/django_mysql/models/handler.py/Handler._parse_index_value
4,198
def build_hmac_signature(self, http_request, oauth_params, consumer_secret, token_secret): raw = build_signature_base_string(http_request, oauth_params) key = None hashed = None if token_secret: key = '%s&%s' % (escape(consumer_secret), escape(token_secret)) else: key = '%s&' % escape(consumer_secret) try: import hashlib hashed = hmac.new(key, raw, hashlib.sha1) except __HOLE__: import sha hashed = hmac.new(key, raw, sha) # Calculate the digest base 64. return binascii.b2a_base64(hashed.digest())[:-1] #?
ImportError
dataset/ETHPy150Open kuri65536/python-for-android/python-build/python-libs/gdata/build/lib/gdata/experimental_oauth.py/build_hmac_signature
4,199
def _parse_specs(specs, Ks, dim, ns): ''' Set up the different functions we need to call. Returns: - a dict mapping base estimator functions to _FuncInfo objects. If the function needs_alpha, then the alphas attribute is an array of alpha values and pos is a corresponding array of indices. Otherwise, alphas is None and pos is a list containing a single index. Indices are >= 0 if they correspond to something in a spec, and negative if they're just used for a meta estimator but not directly requested. - an OrderedDict mapping functions to _MetaFuncInfo objects. alphas and pos are like for _FuncInfo; deps is a list of indices which should be passed to the estimator. Note that these might be other meta functions; this list is guaranteed to be in an order such that all dependencies are resolved before calling that function. If no such order is possible, raise ValueError. - the number of meta-only results # TODO: update doctests for _parse_specs >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3])}, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[-4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[-4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 4) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2', 'linear']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3) ''' funcs = {} metas = {} meta_deps = defaultdict(set) def add_func(func, alpha=None, pos=None): needs_alpha = getattr(func, 'needs_alpha', False) is_meta = hasattr(func, 'needs_results') d = metas if is_meta else funcs if func not in d: if needs_alpha: args = {'alphas': [alpha], 'pos': [pos]} else: args = {'alphas': None, 'pos': [pos]} if not is_meta: d[func] = _FuncInfo(**args) else: d[func] = _MetaFuncInfo(deps=[], **args) for req in func.needs_results: if callable(req.alpha): req_alpha = req.alpha(alpha) else: req_alpha = req.alpha add_func(req.func, alpha=req_alpha) meta_deps[func].add(req.func) meta_deps[req.func] # make sure required func is in there else: # already have an entry for the func # need to give it this pos, if it's not None # and also make sure that the alpha is present info = d[func] if not needs_alpha: if pos is not None: if info.pos != [None]: msg = "{} passed more than once" raise ValueError(msg.format(func_name)) info.pos[0] = pos else: # needs alpha try: idx = info.alphas.index(alpha) except ValueError: # this is a new alpha value we haven't seen yet info.alphas.append(alpha) info.pos.append(pos) if is_meta: for req in func.needs_results: if callable(req.alpha): req_alpha = req.alpha(alpha) else: req_alpha = req.alpha add_func(req.func, alpha=req_alpha) else: # repeated alpha value if pos is not None: if info.pos[idx] is not None: msg = "{} with alpha {} passed more than once" raise ValueError(msg.format(func_name, alpha)) info.pos[idx] = pos # add functions for each spec for i, spec in enumerate(specs): func_name, alpha = (spec.split(':', 1) + [None])[:2] if alpha is not None: alpha = float(alpha) try: func = func_mapping[func_name] except __HOLE__: msg = "'{}' is not a known function type" raise ValueError(msg.format(func_name)) needs_alpha = getattr(func, 'needs_alpha', False) if needs_alpha and alpha is None: msg = "{} needs alpha but not passed in spec '{}'" raise ValueError(msg.format(func_name, spec)) elif not needs_alpha and alpha is not None: msg = "{} doesn't need alpha but is passed in spec '{}'" raise ValueError(msg.format(func_name, spec)) add_func(func, alpha, i) # number things that are dependencies only meta_counter = itertools.count(-1, step=-1) for info in itertools.chain(itervalues(funcs), itervalues(metas)): for i, pos in enumerate(info.pos): if pos is None: info.pos[i] = next(meta_counter) # fill in the dependencies for metas for func, info in iteritems(metas): deps = info.deps assert deps == [] for req in func.needs_results: f = req.func req_info = (metas if hasattr(f, 'needs_results') else funcs)[f] if req.alpha is not None: if callable(req.alpha): req_alpha = req.alpha(info.alphas) else: req_alpha = req.alpha find_alpha = np.vectorize(req_info.alphas.index, otypes=[int]) pos = np.asarray(req_info.pos)[find_alpha(req_alpha)] if np.isscalar(pos): deps.append(pos[()]) else: deps.extend(pos) else: pos, = req_info.pos deps.append(pos) # topological sort of metas meta_order = topological_sort(meta_deps) metas_ordered = OrderedDict( (f, metas[f]) for f in meta_order if hasattr(f, 'needs_results')) # replace functions with partials of args def replace_func(func, info): needs_alpha = getattr(func, 'needs_alpha', False) new = None args = (Ks, dim) if needs_alpha: args = (info.alphas,) + args if hasattr(func, 'chooser_fn'): args += (ns,) if (getattr(func, 'needs_all_ks', False) and getattr(func.chooser_fn, 'returns_ks', False)): new, k = func.chooser_fn(*args) new.k_needed = k else: new = func.chooser_fn(*args) else: new = partial(func, *args) for attr in dir(func): if not (attr.startswith('__') or attr.startswith('func_')): setattr(new, attr, getattr(func, attr)) return new rep_funcs = dict( (replace_func(f, info), info) for f, info in iteritems(funcs)) rep_metas_ordered = OrderedDict( (replace_func(f, info), info) for f, info in iteritems(metas_ordered)) return rep_funcs, rep_metas_ordered, -next(meta_counter) - 1
KeyError
dataset/ETHPy150Open dougalsutherland/py-sdm/sdm/np_divs.py/_parse_specs