Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
3,800
def __init__(self, typemap=None, namespace=None, nsmap=None, makeelement=None): if namespace is not None: self._namespace = '{' + namespace + '}' else: self._namespace = None if nsmap: self._nsmap = dict(nsmap) else: self._nsmap = None if makeelement is not None: assert callable(makeelement) self._makeelement = makeelement else: self._makeelement = ET.Element # initialize type map for this element factory if typemap: typemap = typemap.copy() else: typemap = {} def add_text(elem, item): try: elem[-1].tail = (elem[-1].tail or "") + item except __HOLE__: elem.text = (elem.text or "") + item if str not in typemap: typemap[str] = add_text if unicode not in typemap: typemap[unicode] = add_text def add_dict(elem, item): attrib = elem.attrib for k, v in item.items(): if isinstance(v, basestring): attrib[k] = v else: attrib[k] = typemap[type(v)](None, v) if dict not in typemap: typemap[dict] = add_dict self._typemap = typemap
IndexError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/lxml-3.3.6/src/lxml/builder.py/ElementMaker.__init__
3,801
def centerOn(self, name): return """ if set, keeps camera to the given ODE object name. """ try: self.getRenderer().setCenterObj(self.root.namedChild(name).getODEObject()) except __HOLE__: # name not found, unset centerObj print(("Warning: Cannot center on " + name)) self.centerObj = None
KeyError
dataset/ETHPy150Open pybrain/pybrain/pybrain/rl/environments/ode/environment.py/ODEEnvironment.centerOn
3,802
def loadXODE(self, filename, reload=False): """ loads an XODE file (xml format) and parses it. """ f = open(filename) self._currentXODEfile = filename p = xode.parser.Parser() self.root = p.parseFile(f) f.close() try: # filter all xode "world" objects from root, take only the first one world = filter(lambda x: isinstance(x, xode.parser.World), self.root.getChildren())[0] except IndexError: # malicious format, no world tag found print(("no <world> tag found in " + filename + ". quitting.")) sys.exit() self.world = world.getODEObject() self._setWorldParameters() try: # filter all xode "space" objects from world, take only the first one space = filter(lambda x: isinstance(x, xode.parser.Space), world.getChildren())[0] except __HOLE__: # malicious format, no space tag found print(("no <space> tag found in " + filename + ". quitting.")) sys.exit() self.space = space.getODEObject() # load bodies and geoms for painting self.body_geom = [] self._parseBodies(self.root) if self.verbosity > 0: print("-------[body/mass list]-----") for (body, _) in self.body_geom: try: print((body.name, body.getMass())) except AttributeError: print("<Nobody>") # now parse the additional parameters at the end of the xode file self.loadConfig(filename, reload)
IndexError
dataset/ETHPy150Open pybrain/pybrain/pybrain/rl/environments/ode/environment.py/ODEEnvironment.loadXODE
3,803
def loadConfig(self, filename, reload=False): # parameters are given in (our own brand of) config-file syntax self.config = ConfigGrabber(filename, sectionId="<!--odeenvironment parameters", delim=("<", ">")) # <passpairs> self.passpairs = [] for passpairstring in self.config.getValue("passpairs")[:]: self.passpairs.append(eval(passpairstring)) if self.verbosity > 0: print("-------[pass tuples]--------") print((self.passpairs)) print("----------------------------") # <centerOn> # set focus of camera to the first object specified in the section, if any if self.render: try: self.centerOn(self.config.getValue("centerOn")[0]) except __HOLE__: pass # <affixToEnvironment> for jointName in self.config.getValue("affixToEnvironment")[:]: try: # find first object with that name obj = self.root.namedChild(jointName).getODEObject() except IndexError: print(("ERROR: Could not affix object '" + jointName + "' to environment!")) sys.exit(1) if isinstance(obj, ode.Joint): # if it is a joint, use this joint to fix to environment obj.attach(obj.getBody(0), ode.environment) elif isinstance(obj, ode.Body): # if it is a body, create new joint and fix body to environment j = ode.FixedJoint(self.world) j.attach(obj, ode.environment) j.setFixed() # <colors> for coldefstring in self.config.getValue("colors")[:]: # ('name', (0.3,0.4,0.5)) objname, coldef = eval(coldefstring) for (body, _) in self.body_geom: if hasattr(body, 'name'): if objname == body.name: body.color = coldef break if not reload: # add the JointSensor as default self.sensors = [] ## self.addSensor(self._jointSensor) # <sensors> # expects a list of strings, each of which is the executable command to create a sensor object # example: DistToPointSensor('legSensor', (0.0, 0.0, 5.0)) sens = self.config.getValue("sensors")[:] for s in sens: try: self.addSensor(eval('sensors.' + s)) except AttributeError: print((dir(sensors))) warnings.warn("Sensor name with name " + s + " not found. skipped.") else: for s in self.sensors: s._connect(self) for a in self.actuators: a._connect(self)
IndexError
dataset/ETHPy150Open pybrain/pybrain/pybrain/rl/environments/ode/environment.py/ODEEnvironment.loadConfig
3,804
def _parseBodies(self, node): """ parses through the xode tree recursively and finds all bodies and geoms for drawing. """ # body (with nested geom) if isinstance(node, xode.body.Body): body = node.getODEObject() body.name = node.getName() try: # filter all xode geom objects and take the first one xgeom = filter(lambda x: isinstance(x, xode.geom.Geom), node.getChildren())[0] except __HOLE__: return() # no geom object found, skip this node # get the real ode object geom = xgeom.getODEObject() # if geom doesn't have own name, use the name of its body geom.name = node.getName() self.body_geom.append((body, geom)) # geom on its own without body elif isinstance(node, xode.geom.Geom): try: node.getFirstAncestor(ode.Body) except xode.node.AncestorNotFoundError: body = None geom = node.getODEObject() geom.name = node.getName() self.body_geom.append((body, geom)) # special cases for joints: universal, fixed, amotor elif isinstance(node, xode.joint.Joint): joint = node.getODEObject() if type(joint) == ode.UniversalJoint: # insert an additional AMotor joint to read the angles from and to add torques # amotor = ode.AMotor(self.world) # amotor.attach(joint.getBody(0), joint.getBody(1)) # amotor.setNumAxes(3) # amotor.setAxis(0, 0, joint.getAxis2()) # amotor.setAxis(2, 0, joint.getAxis1()) # amotor.setMode(ode.AMotorEuler) # xode_amotor = xode.joint.Joint(node.getName() + '[amotor]', node.getParent()) # xode_amotor.setODEObject(amotor) # node.getParent().addChild(xode_amotor, None) pass if type(joint) == ode.AMotor: # do the euler angle calculations automatically (ref. ode manual) joint.setMode(ode.AMotorEuler) if type(joint) == ode.FixedJoint: # prevent fixed joints from bouncing to center of first body joint.setFixed() # recursive call for all child nodes for c in node.getChildren(): self._parseBodies(c)
IndexError
dataset/ETHPy150Open pybrain/pybrain/pybrain/rl/environments/ode/environment.py/ODEEnvironment._parseBodies
3,805
def getSensorByName(self, name): try: idx = self.getSensorNames().index(name) except __HOLE__: warnings.warn('sensor ' + name + ' is not in sensor list.') return [] return self.sensors[idx].getValues()
ValueError
dataset/ETHPy150Open pybrain/pybrain/pybrain/rl/environments/ode/environment.py/ODEEnvironment.getSensorByName
3,806
def build(self): # If there is a docker file or url hand off to Docker builder if 'buildspec' in self.config: if self.config['buildspec']: if 'dockerfile' in self.config['buildspec']: self._build(dockerfile=self.config['buildspec']['dockerfile']) elif 'url' in self.config['buildspec']: self._build(url=self.config['buildspec']['url']) else: raise exceptions.TemplateError("Template: " + self.name + " Buildspec specified but no dockerfile or url found.") else: # verify the base image and pull it if necessary try: base = self.config['base_image'] self.backend.inspect_image(base) except HTTPError: # Attempt to pull the image. self.log.info('Attempting to pull base: %s', base) result = self.backend.pull_image(base) if 'error' in result: self.log.error('No base image could be pulled under the name: %s', base) raise exceptions.TemplateError("No base image could be pulled under the name: " + base) except __HOLE__: raise exceptions.TemplateError("Template: " + self.name + "No base image specified.") # There doesn't seem to be a way to currently remove tags so we'll generate a new image. # More consistent for all cases this way too but it does feel kinda wrong. dockerfile = """ FROM %s MAINTAINER %s """ % (base, self._mid()) self._build(dockerfile=dockerfile) return True # Launches an instance of the template in a new container
KeyError
dataset/ETHPy150Open toscanini/maestro/maestro/template.py/Template.build
3,807
def _apply_patch(source, patch_text, forwards, name): # Cached ? try: return _patching_cache.retrieve(source, patch_text, forwards) except __HOLE__: pass # Write out files tempdir = mkdtemp(prefix='patchy') try: source_path = os.path.join(tempdir, name + '.py') with open(source_path, 'w') as source_file: source_file.write(source) patch_path = os.path.join(tempdir, name + '.patch') with open(patch_path, 'w') as patch_file: patch_file.write(patch_text) if not patch_text.endswith('\n'): patch_file.write('\n') # Call `patch` command command = ['patch'] if not forwards: command.append('--reverse') command.extend([source_path, patch_path]) proc = subprocess.Popen( command, stderr=subprocess.PIPE, stdout=subprocess.PIPE ) stdout, stderr = proc.communicate() if proc.returncode != 0: msg = "Could not {action} the patch {prep} '{name}'.".format( action=("apply" if forwards else "unapply"), prep=("to" if forwards else "from"), name=name ) if stdout or stderr: msg += " The message from `patch` was:\n{}\n{}".format( stdout.decode('utf-8'), stderr.decode('utf-8') ) msg += ( "\nThe code to patch was:\n{}\nThe patch was:\n{}" .format(source, patch_text) ) raise ValueError(msg) with open(source_path, 'r') as source_file: new_source = source_file.read() finally: shutil.rmtree(tempdir) _patching_cache.store(source, patch_text, forwards, new_source) return new_source
KeyError
dataset/ETHPy150Open adamchainz/patchy/patchy/api.py/_apply_patch
3,808
def _get_source(func): real_func = _get_real_func(func) try: return _source_map[real_func] except __HOLE__: source = inspect.getsource(func) source = dedent(source) return source
KeyError
dataset/ETHPy150Open adamchainz/patchy/patchy/api.py/_get_source
3,809
def _Run(self, argv): result = 0 name = None glob = [] for i in range(len(argv)): if not argv[i].startswith('-'): name = argv[i] if i > 0: glob = argv[:i] argv = argv[i + 1:] break if not name: glob = argv name = 'help' argv = [] gopts, _gargs = global_options.parse_args(glob) if gopts.trace: SetTrace() if gopts.show_version: if name == 'help': name = 'version' else: print('fatal: invalid usage of --version', file=sys.stderr) return 1 SetDefaultColoring(gopts.color) try: cmd = self.commands[name] except __HOLE__: print("repo: '%s' is not a repo command. See 'repo help'." % name, file=sys.stderr) return 1 cmd.repodir = self.repodir cmd.manifest = XmlManifest(cmd.repodir) cmd.gitc_manifest = None gitc_client_name = gitc_utils.parse_clientdir(os.getcwd()) if gitc_client_name: cmd.gitc_manifest = GitcManifest(cmd.repodir, gitc_client_name) cmd.manifest.isGitcClient = True Editor.globalConfig = cmd.manifest.globalConfig if not isinstance(cmd, MirrorSafeCommand) and cmd.manifest.IsMirror: print("fatal: '%s' requires a working directory" % name, file=sys.stderr) return 1 if isinstance(cmd, GitcAvailableCommand) and not gitc_utils.get_gitc_manifest_dir(): print("fatal: '%s' requires GITC to be available" % name, file=sys.stderr) return 1 if isinstance(cmd, GitcClientCommand) and not gitc_client_name: print("fatal: '%s' requires a GITC client" % name, file=sys.stderr) return 1 try: copts, cargs = cmd.OptionParser.parse_args(argv) copts = cmd.ReadEnvironmentOptions(copts) except NoManifestException as e: print('error: in `%s`: %s' % (' '.join([name] + argv), str(e)), file=sys.stderr) print('error: manifest missing or unreadable -- please run init', file=sys.stderr) return 1 if not gopts.no_pager and not isinstance(cmd, InteractiveCommand): config = cmd.manifest.globalConfig if gopts.pager: use_pager = True else: use_pager = config.GetBoolean('pager.%s' % name) if use_pager is None: use_pager = cmd.WantPager(copts) if use_pager: # RunPager(cmd) portable.RunPager(cmd) else: portable.NoPager(cmd) start = time.time() try: result = cmd.Execute(copts, cargs) except (DownloadError, ManifestInvalidRevisionError, NoManifestException) as e: print('error: in `%s`: %s' % (' '.join([name] + argv), str(e)), file=sys.stderr) if isinstance(e, NoManifestException): print('error: manifest missing or unreadable -- please run init', file=sys.stderr) result = 1 except NoSuchProjectError as e: if e.name: print('error: project %s not found' % e.name, file=sys.stderr) else: print('error: no project in current directory', file=sys.stderr) result = 1 except InvalidProjectGroupsError as e: if e.name: print('error: project group must be enabled for project %s' % e.name, file=sys.stderr) else: print('error: project group must be enabled for the project in the current directory', file=sys.stderr) result = 1 finally: elapsed = time.time() - start hours, remainder = divmod(elapsed, 3600) minutes, seconds = divmod(remainder, 60) if gopts.time: if hours == 0: print('real\t%dm%.3fs' % (minutes, seconds), file=sys.stderr) else: print('real\t%dh%dm%.3fs' % (hours, minutes, seconds), file=sys.stderr) return result
KeyError
dataset/ETHPy150Open esrlabs/git-repo/main.py/_Repo._Run
3,810
def _AddPasswordFromUserInput(handler, msg, req): # If repo could not find auth info from netrc, try to get it from user input url = req.get_full_url() user, password = handler.passwd.find_user_password(None, url) if user is None: print(msg) try: user = input('User: ') password = getpass.getpass() except __HOLE__: return handler.passwd.add_password(None, url, user, password)
KeyboardInterrupt
dataset/ETHPy150Open esrlabs/git-repo/main.py/_AddPasswordFromUserInput
3,811
def init_http(): handlers = [_UserAgentHandler()] mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm() try: n = netrc.netrc() for host in n.hosts: p = n.hosts[host] mgr.add_password(p[1], 'http://%s/' % host, p[0], p[2]) mgr.add_password(p[1], 'https://%s/' % host, p[0], p[2]) except netrc.NetrcParseError: pass except __HOLE__: pass handlers.append(_BasicAuthHandler(mgr)) handlers.append(_DigestAuthHandler(mgr)) if kerberos: handlers.append(_KerberosAuthHandler()) if 'http_proxy' in os.environ: url = os.environ['http_proxy'] handlers.append(urllib.request.ProxyHandler({'http': url, 'https': url})) if 'REPO_CURL_VERBOSE' in os.environ: handlers.append(urllib.request.HTTPHandler(debuglevel=1)) handlers.append(urllib.request.HTTPSHandler(debuglevel=1)) urllib.request.install_opener(urllib.request.build_opener(*handlers))
IOError
dataset/ETHPy150Open esrlabs/git-repo/main.py/init_http
3,812
def _Main(argv): result = 0 opt = optparse.OptionParser(usage="repo wrapperinfo -- ...") opt.add_option("--repo-dir", dest="repodir", help="path to .repo/") opt.add_option("--wrapper-version", dest="wrapper_version", help="version of the wrapper script") opt.add_option("--wrapper-path", dest="wrapper_path", help="location of the wrapper script") _PruneOptions(argv, opt) opt, argv = opt.parse_args(argv) _CheckWrapperVersion(opt.wrapper_version, opt.wrapper_path) _CheckRepoDir(opt.repodir) Version.wrapper_version = opt.wrapper_version Version.wrapper_path = opt.wrapper_path repo = _Repo(opt.repodir) try: try: init_ssh() init_http() result = repo._Run(argv) or 0 finally: close_ssh() except __HOLE__: print('aborted by user', file=sys.stderr) result = 1 except ManifestParseError as mpe: print('fatal: %s' % mpe, file=sys.stderr) result = 1 except RepoChangedException as rce: # If repo changed, re-exec ourselves. # argv = list(sys.argv) argv.extend(rce.extra_args) try: # os.execv(__file__, argv) result = subprocess.call([sys.executable] + argv) except OSError as e: print('fatal: cannot restart repo after upgrade', file=sys.stderr) print('fatal: %s' % e, file=sys.stderr) result = 128 portable.WaitForProcess() sys.exit(result)
KeyboardInterrupt
dataset/ETHPy150Open esrlabs/git-repo/main.py/_Main
3,813
def connect(self, name, fn): """ Connect a function ``fn`` to the template hook ``name``. An example hook could look like this:: function my_hook(sender, **kwargs): # Get the request from context request = kwargs['context']['request'] kwargs['content'].append("Hello, {0}!".format(request.user)) registry.connect('hookname', name) If the given hook name does not exist at runtime, it is created dynamically. """ with _registry_lock: try: signal = self._registry[name] except __HOLE__: signal = self.register(name) signal.connect(fn)
KeyError
dataset/ETHPy150Open weluse/django-templatehooks/templatehooks/registry.py/HookRegistry.connect
3,814
def get_content(self, name, context): """ Get the content of a template hook. Used internally by the hook templatetag. If the referenced hook name has not been manually registered and there are no hooks attached to it, a warning is issued. """ try: signal = self._registry[name] except __HOLE__: message = ("There are no connected functions for the hook '%s'." % name) warnings.warn(message, RuntimeWarning) return u'' content = [] signal.send(sender=self, context=context, content=content) return u'\n'.join(content)
KeyError
dataset/ETHPy150Open weluse/django-templatehooks/templatehooks/registry.py/HookRegistry.get_content
3,815
def on_button_release(self, vtk_picker, event): """ If the mouse has not moved, pick with our pickers. """ if self._mouse_no_mvt: x, y = vtk_picker.GetEventPosition() for picker in self._active_pickers.values(): try: picker.pick((x, y, 0), self.scene.scene.renderer) except __HOLE__: picker.pick(x, y, 0, self.scene.scene.renderer) self._mouse_no_mvt = 0
TypeError
dataset/ETHPy150Open enthought/mayavi/mayavi/core/mouse_pick_dispatcher.py/MousePickDispatcher.on_button_release
3,816
@cache_page(60 * 30) def detail(request, slug=None, year=None, month=None, day=None, id=None): """Show a particular episode.""" # strip = get_object_or_404(ComicStrip, slug=slug) try: id = int(id) if year: year = int(year) if month: month = int(month) if day: day = int(day) except __HOLE__: raise Http404 # Some old URLs only used the episode ID, even though we now consider the # date-based form as canonical. if not year: episode = get_object_or_404(ComicEpisode, strip__slug=slug, id=id) else: episode = get_object_or_404(ComicEpisode, strip__slug=slug, pub_date__year=year, pub_date__month=month, pub_date__day=day, id=id) try: previous = episode.get_previous_by_pub_date(strip__slug=slug) except ComicEpisode.DoesNotExist: previous = None try: next = episode.get_next_by_pub_date(strip__slug=slug) except ComicEpisode.DoesNotExist: next = None page = { 'episode': episode, 'next': next, 'previous': previous } return render_to_response('comics/detail.html', page, context_instance=RequestContext(request))
ValueError
dataset/ETHPy150Open albatrossandco/brubeck_cms/brubeck/comics/views.py/detail
3,817
def deserialize_upload(value, url): """ Restore file and name and storage from serialized value and the upload url. """ result = {'name': None, 'storage': None} try: result = signing.loads(value, salt=url) except signing.BadSignature: # TODO: Log invalid signature pass else: try: result['storage'] = get_storage_class(result['storage']) except (ImproperlyConfigured, __HOLE__): # TODO: Log invalid class result = {'name': None, 'storage': None} return result
ImportError
dataset/ETHPy150Open caktus/django-sticky-uploads/stickyuploads/utils.py/deserialize_upload
3,818
@dec.slow def test_inline(self): #TODO: THIS NEEDS TO MOVE TO THE INLINE TEST SUITE a = Foo() a.b = 12345 code = """ throw_error(PyExc_AttributeError,"bummer"); """ try: before = sys.getrefcount(a) inline_tools.inline(code,['a']) except __HOLE__: after = sys.getrefcount(a) try: inline_tools.inline(code,['a']) except: after2 = sys.getrefcount(a) debug_print("after and after2 should be equal in the following") debug_print('before, after, after2:', before, after, after2)
AttributeError
dataset/ETHPy150Open scipy/scipy/scipy/weave/tests/test_scxx_object.py/TestObjectHasattr.test_inline
3,819
@dec.slow def test_noargs_with_args_not_instantiated(self): # calling a function that doesn't take args with args should fail. # Note: difference between this test add ``test_noargs_with_args`` # below is that here Foo is not instantiated. def Foo(): return "blah" code = """ py::tuple args(2); args[0] = 1; args[1] = "hello"; return_val = Foo.call(args); """ try: first = sys.getrefcount(Foo) inline_tools.inline(code,['Foo']) except __HOLE__: second = sys.getrefcount(Foo) try: inline_tools.inline(code,['Foo']) except TypeError: third = sys.getrefcount(Foo) # first should == second, but the weird refcount error assert_equal(second,third)
TypeError
dataset/ETHPy150Open scipy/scipy/scipy/weave/tests/test_scxx_object.py/TestObjectCall.test_noargs_with_args_not_instantiated
3,820
@dec.slow def test_noargs_with_args(self): # calling a function that doesn't take args with args should fail. a = Foo() code = """ py::tuple args(2); args[0] = 1; args[1] = "hello"; return_val = a.mcall("bar",args); """ try: first = sys.getrefcount(a) inline_tools.inline(code,['a']) except __HOLE__: second = sys.getrefcount(a) try: inline_tools.inline(code,['a']) except TypeError: third = sys.getrefcount(a) # first should == second, but the weird refcount error assert_equal(second,third)
TypeError
dataset/ETHPy150Open scipy/scipy/scipy/weave/tests/test_scxx_object.py/TestObjectMcall.test_noargs_with_args
3,821
def authenticate(self, request): try: payload = jwt.decode( jwe.decrypt(request.body, settings.JWE_SECRET), settings.JWT_SECRET, options={'verify_exp': False}, algorithm='HS256' ) except (jwt.InvalidTokenError, __HOLE__): raise AuthenticationFailed # The JWT `data` payload is expected in the following structure. # # {"provider": { # "idp": "https://login.circle.edu/idp/shibboleth", # "id": "CIR", # "user": { # "middleNames": "", # "familyName": "", # "givenName": "", # "fullname": "Circle User", # "suffix": "", # "username": "[email protected]" # } # }} data = json.loads(payload['data']) provider = data['provider'] institution = Institution.load(provider['id']) if not institution: raise AuthenticationFailed('Invalid institution id specified "{}"'.format(provider['id'])) username = provider['user']['username'] fullname = provider['user']['fullname'] user, created = get_or_create_user(fullname, username) if created: user.given_name = provider['user'].get('givenName') user.middle_names = provider['user'].get('middleNames') user.family_name = provider['user'].get('familyName') user.suffix = provider['user'].get('suffix') user.date_last_login = datetime.utcnow() user.save() # User must be saved in order to have a valid _id user.register(username) send_mail( to_addr=user.username, mail=WELCOME_OSF4I, mimetype='html', user=user ) if institution not in user.affiliated_institutions: user.affiliated_institutions.append(institution) user.save() return user, None
TypeError
dataset/ETHPy150Open CenterForOpenScience/osf.io/api/institutions/authentication.py/InstitutionAuthentication.authenticate
3,822
@jit.unroll_safe def UNPACK_SEQUENCE(self, space, bytecode, frame, pc, n_items): w_obj = frame.pop() items_w = space.listview(w_obj) for i in xrange(n_items - 1, -1, -1): try: w_obj = items_w[i] except __HOLE__: w_obj = space.w_nil frame.push(w_obj)
IndexError
dataset/ETHPy150Open topazproject/topaz/topaz/interpreter.py/Interpreter.UNPACK_SEQUENCE
3,823
def __init__(self, *args, **config): super(ResourceWatcher, self).__init__(*args, **config) self.watcher = config.get("watcher", None) self.service = config.get("service", None) if self.service is not None: warnings.warn("ResourceWatcher.service is deprecated " "please use ResourceWatcher.watcher instead.", category=DeprecationWarning) if self.watcher is None: self.watcher = self.service if self.watcher is None: self.statsd.stop() self.loop.close() raise NotImplementedError('watcher is mandatory for now.') self.max_cpu = float(config.get("max_cpu", 90)) # in % self.max_mem = config.get("max_mem") if self.max_mem is None: self.max_mem = 90. self._max_percent = True else: try: self.max_mem = float(self.max_mem) # float -> % self._max_percent = True except __HOLE__: self.max_mem = human2bytes(self.max_mem) # int -> absolute self._max_percent = False self.min_cpu = config.get("min_cpu") if self.min_cpu is not None: self.min_cpu = float(self.min_cpu) # in % self.min_mem = config.get("min_mem") if self.min_mem is not None: try: self.min_mem = float(self.min_mem) # float -> % self._min_percent = True except ValueError: self.min_mem = human2bytes(self.min_mem) # int -> absolute self._min_percent = True self.health_threshold = float(config.get("health_threshold", 75)) # in % self.max_count = int(config.get("max_count", 3)) self.process_children = to_bool(config.get("process_children", '0')) self.child_signal = int(config.get("child_signal", signal.SIGTERM)) self._count_over_cpu = {} self._count_over_mem = {} self._count_under_cpu = {} self._count_under_mem = {} self._count_health = {}
ValueError
dataset/ETHPy150Open circus-tent/circus/circus/plugins/resource_watcher.py/ResourceWatcher.__init__
3,824
def list_updated ( self, values ): """ Handles updates to the list of legal checklist values. """ sv = self.string_value if (len( values ) > 0) and isinstance( values[0], basestring ): values = [ ( x, sv( x, capitalize ) ) for x in values ] self.values = valid_values = [ x[0] for x in values ] self.names = [ x[1] for x in values ] # Make sure the current value is still legal: modified = False cur_value = parse_value( self.value ) for i in range( len( cur_value ) - 1, -1, -1 ): if cur_value[i] not in valid_values: try: del cur_value[i] modified = True except __HOLE__ as e: logger.warn('Unable to remove non-current value [%s] from ' 'values %s', cur_value[i], values) if modified: if isinstance( self.value, basestring ): cur_value = ','.join( cur_value ) self.value = cur_value self.rebuild_editor() #--------------------------------------------------------------------------- # Rebuilds the editor after its definition is modified: #---------------------------------------------------------------------------
TypeError
dataset/ETHPy150Open enthought/traitsui/traitsui/wx/check_list_editor.py/SimpleEditor.list_updated
3,825
@staticmethod def convert(filename): try: from hashlib import md5 except __HOLE__: from md5 import md5 _f, ext = os.path.splitext(filename) f = md5( md5("%f%s%f%s" % (time.time(), id({}), random.random(), os.getpid())).hexdigest(), ).hexdigest() return f + ext
ImportError
dataset/ETHPy150Open limodou/uliweb/uliweb/contrib/upload/__init__.py/MD5FilenameConverter.convert
3,826
def normalize_column_type(l, normal_type=None, blanks_as_nulls=True): """ Attempts to normalize a list (column) of string values to booleans, integers, floats, dates, times, datetimes, or strings. NAs and missing values are converted to empty strings. Empty strings are converted to nulls in the case of non-string types. For string types (unicode), empty strings are converted to nulls unless blanks_as_nulls is false. Optional accepts a "normal_type" argument which specifies a type that the values must conform to (rather than inferring). Will raise InvalidValueForTypeException if a value is not coercable. Returns a tuple of (type, normal_values). """ # Optimizations lower = six.text_type.lower replace = six.text_type.replace # Convert "NA", "N/A", etc. to null types. for i, x in enumerate(l): if x is not None and lower(x) in NULL_VALUES: l[i] = '' # Are they null? if not normal_type or normal_type == NoneType: try: for i, x in enumerate(l): if x != '' and x is not None: raise ValueError('Not null') return NoneType, [None] * len(l) except ValueError: if normal_type: raise InvalidValueForTypeException(i, x, normal_type) # Are they boolean? if not normal_type or normal_type == bool: try: normal_values = [] append = normal_values.append for i, x in enumerate(l): if x == '' or x is None: append(None) elif x.lower() in TRUE_VALUES: append(True) elif x.lower() in FALSE_VALUES: append(False) else: raise ValueError('Not boolean') return bool, normal_values except __HOLE__: if normal_type: raise InvalidValueForTypeException(i, x, normal_type) # Are they integers? if not normal_type or normal_type == int: try: normal_values = [] append = normal_values.append for i, x in enumerate(l): if x == '' or x is None: append(None) continue int_x = int(replace(x, ',', '')) if x[0] == '0' and int(x) != 0: raise TypeError('Integer is padded with 0s, so treat it as a string instead.') append(int_x) return int, normal_values except TypeError: if normal_type == int: raise InvalidValueForTypeException(i, x, int) if blanks_as_nulls: return six.text_type, [x if x != '' else None for x in l] else: return six.text_type, l except ValueError: if normal_type: raise InvalidValueForTypeException(i, x, normal_type) # Are they floats? if not normal_type or normal_type == float: try: normal_values = [] append = normal_values.append for i, x in enumerate(l): if x == '' or x is None: append(None) continue float_x = float(replace(x, ',', '')) append(float_x) return float, normal_values except ValueError: if normal_type: raise InvalidValueForTypeException(i, x, normal_type) # Are they datetimes? if not normal_type or normal_type in [datetime.time, datetime.date, datetime.datetime]: try: normal_values = [] append = normal_values.append normal_types_set = set() add = normal_types_set.add for i, x in enumerate(l): if x == '' or x is None: append(None) add(NoneType) continue d = parse(x, default=DEFAULT_DATETIME) # Is it only a time? if d.date() == NULL_DATE: if normal_type and normal_type != datetime.time: raise InvalidValueForTypeException(i, x, normal_type) d = d.time() add(datetime.time) # Is it only a date? elif d.time() == NULL_TIME: if normal_type and normal_type not in [datetime.date, datetime.datetime]: raise InvalidValueForTypeException(i, x, normal_type) d = d.date() add(datetime.date) # It must be a date and time else: if normal_type and normal_type != datetime.datetime: raise InvalidValueForTypeException(i, x, normal_type) add(datetime.datetime) append(d) # This case can only happen if normal_type was specified and the column contained all nulls if normal_type and normal_types_set == set([NoneType]): return normal_type, normal_values normal_types_set.discard(NoneType) # If a mix of dates and datetimes, up-convert dates to datetimes if normal_types_set == set([datetime.datetime, datetime.date]) or (normal_types_set == set([datetime.date]) and normal_type is datetime.datetime): for i, v in enumerate(normal_values): if v.__class__ == datetime.date: normal_values[i] = datetime.datetime.combine(v, NULL_TIME) if datetime.datetime in normal_types_set: normal_types_set.discard(datetime.date) # Datetimes and times don't mix -- fallback to using strings elif normal_types_set == set([datetime.datetime, datetime.time]) or (normal_types_set == set([datetime.time]) and normal_type is datetime.datetime): raise ValueError('Cant\'t coherently mix datetimes and times in a single column.') # Dates and times don't mix -- fallback to using strings elif normal_types_set == set([datetime.date, datetime.time]) or (normal_types_set == set([datetime.time]) and normal_type is datetime.date) or (normal_types_set == set([datetime.date]) and normal_type is datetime.time): raise ValueError('Can\'t coherently mix dates and times in a single column.') return normal_types_set.pop(), normal_values except ValueError: if normal_type: raise InvalidValueForTypeException(i, x, normal_type) except OverflowError: if normal_type: raise InvalidValueForTypeException(i, x, normal_type) except TypeError: if normal_type: raise InvalidValueForTypeException(i, x, normal_type) # Don't know what they are, so they must just be strings if blanks_as_nulls: return six.text_type, [x if x != '' else None for x in l] else: return six.text_type, l
ValueError
dataset/ETHPy150Open wireservice/csvkit/csvkit/typeinference.py/normalize_column_type
3,827
def _open_local_shell(self): imported_objects = self.get_imported_objects() try: import IPython except __HOLE__: IPython = None if IPython: IPython.start_ipython( argv=[], user_ns=imported_objects, banner1='Welcome to the lymph shell' ) else: import code code.interact(local=imported_objects)
ImportError
dataset/ETHPy150Open deliveryhero/lymph/lymph/cli/shell.py/ShellCommand._open_local_shell
3,828
def _get_backdoor_endpoint(self, service_fullname): try: name, identity_prefix = service_fullname.split(':') except __HOLE__: sys.exit("Malformed argument it should be in the format 'name:identity'") service = self.client.container.lookup(name) instance = service.get_instance(identity_prefix) if instance is None: sys.exit('Unkown instance %s' % service_fullname) return instance.backdoor_endpoint
ValueError
dataset/ETHPy150Open deliveryhero/lymph/lymph/cli/shell.py/ShellCommand._get_backdoor_endpoint
3,829
def _calculate(self, data): x = data.pop('x') try: float(x.iloc[0]) except: try: # try to use it as a pandas.tslib.Timestamp x = [ts.toordinal() for ts in x] except: raise GgplotError("stat_density(): aesthetic x mapping " + "needs to be convertable to float!") # TODO: Implement weight try: weight = data.pop('weight') except __HOLE__: weight = np.ones(len(x)) # TODO: Get "full" range of densities # i.e tail off to zero like ggplot2? But there is nothing # wrong with the current state. kde = gaussian_kde(x) bottom = np.min(x) top = np.max(x) step = (top - bottom) / 1000.0 x = np.arange(bottom, top, step) y = kde.evaluate(x) new_data = pd.DataFrame({'x': x, 'y': y}) # Copy the other aesthetics into the new dataframe n = len(x) for ae in data: new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n) return new_data
KeyError
dataset/ETHPy150Open yhat/ggplot/ggplot/stats/stat_density.py/stat_density._calculate
3,830
def multiplicity(p, n): """ Find the greatest integer m such that p**m divides n. Examples ======== >>> from sympy.ntheory import multiplicity >>> from sympy.core.numbers import Rational as R >>> [multiplicity(5, n) for n in [8, 5, 25, 125, 250]] [0, 1, 2, 3, 3] >>> multiplicity(3, R(1, 9)) -2 """ try: p, n = as_int(p), as_int(n) except ValueError: if all(isinstance(i, (SYMPY_INTS, Rational)) for i in (p, n)): try: p = Rational(p) n = Rational(n) if p.q == 1: if n.p == 1: return -multiplicity(p.p, n.q) return S.Zero elif p.p == 1: return multiplicity(p.q, n.q) else: like = min( multiplicity(p.p, n.p), multiplicity(p.q, n.q)) cross = min( multiplicity(p.q, n.p), multiplicity(p.p, n.q)) return like - cross except __HOLE__: pass raise ValueError('expecting ints or fractions, got %s and %s' % (p, n)) if n == 0: raise ValueError('no such integer exists: multiplicity of %s is not-defined' %(n)) if p == 2: return trailing(n) if p < 2: raise ValueError('p must be an integer, 2 or larger, but got %s' % p) if p == n: return 1 m = 0 n, rem = divmod(n, p) while not rem: m += 1 if m > 5: # The multiplicity could be very large. Better # to increment in powers of two e = 2 while 1: ppow = p**e if ppow < n: nnew, rem = divmod(n, ppow) if not rem: m += e e *= 2 n = nnew continue return m + multiplicity(p, n) n, rem = divmod(n, p) return m
AttributeError
dataset/ETHPy150Open sympy/sympy/sympy/ntheory/factor_.py/multiplicity
3,831
def factorint(n, limit=None, use_trial=True, use_rho=True, use_pm1=True, verbose=False, visual=None): r""" Given a positive integer ``n``, ``factorint(n)`` returns a dict containing the prime factors of ``n`` as keys and their respective multiplicities as values. For example: >>> from sympy.ntheory import factorint >>> factorint(2000) # 2000 = (2**4) * (5**3) {2: 4, 5: 3} >>> factorint(65537) # This number is prime {65537: 1} For input less than 2, factorint behaves as follows: - ``factorint(1)`` returns the empty factorization, ``{}`` - ``factorint(0)`` returns ``{0:1}`` - ``factorint(-n)`` adds ``-1:1`` to the factors and then factors ``n`` Partial Factorization: If ``limit`` (> 3) is specified, the search is stopped after performing trial division up to (and including) the limit (or taking a corresponding number of rho/p-1 steps). This is useful if one has a large number and only is interested in finding small factors (if any). Note that setting a limit does not prevent larger factors from being found early; it simply means that the largest factor may be composite. Since checking for perfect power is relatively cheap, it is done regardless of the limit setting. This number, for example, has two small factors and a huge semi-prime factor that cannot be reduced easily: >>> from sympy.ntheory import isprime >>> from sympy.core.compatibility import long >>> a = 1407633717262338957430697921446883 >>> f = factorint(a, limit=10000) >>> f == {991: 1, long(202916782076162456022877024859): 1, 7: 1} True >>> isprime(max(f)) False This number has a small factor and a residual perfect power whose base is greater than the limit: >>> factorint(3*101**7, limit=5) {3: 1, 101: 7} Visual Factorization: If ``visual`` is set to ``True``, then it will return a visual factorization of the integer. For example: >>> from sympy import pprint >>> pprint(factorint(4200, visual=True)) 3 1 2 1 2 *3 *5 *7 Note that this is achieved by using the evaluate=False flag in Mul and Pow. If you do other manipulations with an expression where evaluate=False, it may evaluate. Therefore, you should use the visual option only for visualization, and use the normal dictionary returned by visual=False if you want to perform operations on the factors. You can easily switch between the two forms by sending them back to factorint: >>> from sympy import Mul, Pow >>> regular = factorint(1764); regular {2: 2, 3: 2, 7: 2} >>> pprint(factorint(regular)) 2 2 2 2 *3 *7 >>> visual = factorint(1764, visual=True); pprint(visual) 2 2 2 2 *3 *7 >>> print(factorint(visual)) {2: 2, 3: 2, 7: 2} If you want to send a number to be factored in a partially factored form you can do so with a dictionary or unevaluated expression: >>> factorint(factorint({4: 2, 12: 3})) # twice to toggle to dict form {2: 10, 3: 3} >>> factorint(Mul(4, 12, evaluate=False)) {2: 4, 3: 1} The table of the output logic is: ====== ====== ======= ======= Visual ------ ---------------------- Input True False other ====== ====== ======= ======= dict mul dict mul n mul dict dict mul mul dict dict ====== ====== ======= ======= Notes ===== Algorithm: The function switches between multiple algorithms. Trial division quickly finds small factors (of the order 1-5 digits), and finds all large factors if given enough time. The Pollard rho and p-1 algorithms are used to find large factors ahead of time; they will often find factors of the order of 10 digits within a few seconds: >>> factors = factorint(12345678910111213141516) >>> for base, exp in sorted(factors.items()): ... print('%s %s' % (base, exp)) ... 2 2 2507191691 1 1231026625769 1 Any of these methods can optionally be disabled with the following boolean parameters: - ``use_trial``: Toggle use of trial division - ``use_rho``: Toggle use of Pollard's rho method - ``use_pm1``: Toggle use of Pollard's p-1 method ``factorint`` also periodically checks if the remaining part is a prime number or a perfect power, and in those cases stops. If ``verbose`` is set to ``True``, detailed progress is printed. See Also ======== smoothness, smoothness_p, divisors """ factordict = {} if visual and not isinstance(n, Mul) and not isinstance(n, dict): factordict = factorint(n, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose, visual=False) elif isinstance(n, Mul): factordict = dict([(int(k), int(v)) for k, v in list(n.as_powers_dict().items())]) elif isinstance(n, dict): factordict = n if factordict and (isinstance(n, Mul) or isinstance(n, dict)): # check it for k in list(factordict.keys()): if isprime(k): continue e = factordict.pop(k) d = factorint(k, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose, visual=False) for k, v in d.items(): if k in factordict: factordict[k] += v*e else: factordict[k] = v*e if visual or (type(n) is dict and visual is not True and visual is not False): if factordict == {}: return S.One if -1 in factordict: factordict.pop(-1) args = [S.NegativeOne] else: args = [] args.extend([Pow(*i, evaluate=False) for i in sorted(factordict.items())]) return Mul(*args, evaluate=False) elif isinstance(n, dict) or isinstance(n, Mul): return factordict assert use_trial or use_rho or use_pm1 n = as_int(n) if limit: limit = int(limit) # special cases if n < 0: factors = factorint( -n, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose, visual=False) factors[-1] = 1 return factors if limit and limit < 2: if n == 1: return {} return {n: 1} elif n < 10: # doing this we are assured of getting a limit > 2 # when we have to compute it later return [{0: 1}, {}, {2: 1}, {3: 1}, {2: 2}, {5: 1}, {2: 1, 3: 1}, {7: 1}, {2: 3}, {3: 2}][n] factors = {} # do simplistic factorization if verbose: sn = str(n) if len(sn) > 50: print('Factoring %s' % sn[:5] + \ '..(%i other digits)..' % (len(sn) - 10) + sn[-5:]) else: print('Factoring', n) if use_trial: # this is the preliminary factorization for small factors small = 2**15 fail_max = 600 small = min(small, limit or small) if verbose: print(trial_int_msg % (2, small, fail_max)) n, next_p = _factorint_small(factors, n, small, fail_max) else: next_p = 2 if factors and verbose: for k in sorted(factors): print(factor_msg % (k, factors[k])) if next_p == 0: if n > 1: factors[int(n)] = 1 if verbose: print(complete_msg) return factors # continue with more advanced factorization methods # first check if the simplistic run didn't finish # because of the limit and check for a perfect # power before exiting try: if limit and next_p > limit: if verbose: print('Exceeded limit:', limit) _check_termination(factors, n, limit, use_trial, use_rho, use_pm1, verbose) if n > 1: factors[int(n)] = 1 return factors else: # Before quitting (or continuing on)... # ...do a Fermat test since it's so easy and we need the # square root anyway. Finding 2 factors is easy if they are # "close enough." This is the big root equivalent of dividing by # 2, 3, 5. sqrt_n = integer_nthroot(n, 2)[0] a = sqrt_n + 1 a2 = a**2 b2 = a2 - n for i in range(3): b, fermat = integer_nthroot(b2, 2) if fermat: break b2 += 2*a + 1 # equiv to (a+1)**2 - n a += 1 if fermat: if verbose: print(fermat_msg) if limit: limit -= 1 for r in [a - b, a + b]: facs = factorint(r, limit=limit, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose) factors.update(facs) raise StopIteration # ...see if factorization can be terminated _check_termination(factors, n, limit, use_trial, use_rho, use_pm1, verbose) except StopIteration: if verbose: print(complete_msg) return factors # these are the limits for trial division which will # be attempted in parallel with pollard methods low, high = next_p, 2*next_p limit = limit or sqrt_n # add 1 to make sure limit is reached in primerange calls limit += 1 while 1: try: high_ = high if limit < high_: high_ = limit # Trial division if use_trial: if verbose: print(trial_msg % (low, high_)) ps = sieve.primerange(low, high_) n, found_trial = _trial(factors, n, ps, verbose) if found_trial: _check_termination(factors, n, limit, use_trial, use_rho, use_pm1, verbose) else: found_trial = False if high > limit: if verbose: print('Exceeded limit:', limit) if n > 1: factors[int(n)] = 1 raise StopIteration # Only used advanced methods when no small factors were found if not found_trial: if (use_pm1 or use_rho): high_root = max(int(math.log(high_**0.7)), low, 3) # Pollard p-1 if use_pm1: if verbose: print(pm1_msg % (high_root, high_)) c = pollard_pm1(n, B=high_root, seed=high_) if c: # factor it and let _trial do the update ps = factorint(c, limit=limit - 1, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose) n, _ = _trial(factors, n, ps, verbose=False) _check_termination(factors, n, limit, use_trial, use_rho, use_pm1, verbose) # Pollard rho if use_rho: max_steps = high_root if verbose: print(rho_msg % (1, max_steps, high_)) c = pollard_rho(n, retries=1, max_steps=max_steps, seed=high_) if c: # factor it and let _trial do the update ps = factorint(c, limit=limit - 1, use_trial=use_trial, use_rho=use_rho, use_pm1=use_pm1, verbose=verbose) n, _ = _trial(factors, n, ps, verbose=False) _check_termination(factors, n, limit, use_trial, use_rho, use_pm1, verbose) except __HOLE__: if verbose: print(complete_msg) return factors low, high = high, high*2
StopIteration
dataset/ETHPy150Open sympy/sympy/sympy/ntheory/factor_.py/factorint
3,832
def __str__(self): if self.msg: return self.msg try: return str(self.__dict__) except (NameError, __HOLE__, KeyError), e: return 'Unprintable exception %s: %s' \ % (self.__class__.__name__, str(e))
ValueError
dataset/ETHPy150Open benoitc/restkit/restkit/errors.py/ResourceError.__str__
3,833
def get_value(self, report, row): val = self.mapfunc(report, self.key, row) if self.format is not None: val = self.format % val elif val is None: val = "" if type(val) != str: try: val = val.encode('utf-8') except __HOLE__: val = str(val) if self.truncate and len(val) > self.truncate: val = val[0:self.truncate] + "..." return val
AttributeError
dataset/ETHPy150Open berrange/gerrymander/gerrymander/reports.py/ReportOutputColumn.get_value
3,834
def error_for_errno(space, errno): try: name = _errno_for_oserror_map[errno] except __HOLE__: w_type = space.w_SystemCallError else: w_type = space.find_const(space.find_const(space.w_object, "Errno"), name) return space.error( w_type, os.strerror(errno), [space.newint(errno)] )
KeyError
dataset/ETHPy150Open topazproject/topaz/topaz/error.py/error_for_errno
3,835
def watcher(task, *args, **kwargs): while True: run('clear') kwargs['warn'] = True task(*args, **kwargs) try: run( 'inotifywait -q -e create -e modify -e delete ' '--exclude ".*\.(pyc|sw.)" -r docs/ mopidy/ tests/') except __HOLE__: sys.exit()
KeyboardInterrupt
dataset/ETHPy150Open mopidy/mopidy/tasks.py/watcher
3,836
def choose(rects, lines = [], gauges = [None], trace=''): def induce(r): if trace == 'induce': pdb.set_trace() uncommons = r.get_uncommons() if len(uncommons) < 2: return irs = [] for s in rects: if s.dir != r.dir: continue pss = [] uncs = s.get_uncommons(pickednos) lnos = s.lnos & ~pickednos assert len(uncs) == len(lnos) for unc, lno in zip(uncs, lnos): if unc in uncommons: pss.append(lno) if len(pss) == len(uncommons): pslnos = immbitset(pss) pss = [lines[lno] for lno in pss] if s.dir == -1: c = InducedRightRect else: c = InducedLeftRect ir = c(s, pss, pslnos) if trace == 'indap': pdb.set_trace() irs.append(ir) if irs: #pdb.set_trace() news.extend(irs) def overlap(r): # if 'overlap' in trace: pdb.set_trace() rlnos = r.lnos tonews = [] for s in rects: if s is r: continue if s.dir != r.dir: continue slnos = s.lnos if not (slnos & rlnos): continue slnos &= ~ pickednos if not slnos: # remove continue scom = s.common_part if not scom: continue for t in rects: if t is s: continue if t.dir == s.dir: continue tlnos = t.lnos & ~pickednos if (tlnos & rlnos): continue olnos = tlnos & slnos if not olnos: continue if slnos == tlnos: continue tcom = t.common_part if not tcom: continue c = cmp_gauged(scom, tcom, gauges) if c > 0: continue if trace == 'obreak': pdb.set_trace break else: # s is ok tonews.append(s) rects.remove(s) if len(tonews) > 1: pdb.set_trace() news.extend(tonews) def picknext(): while 1: if news: if trace == 'news': pdb.set_trace() r = news[0] del news[0] else: r = None for s in list(rects): slnos = s.lnos - pickednos if not slnos: rects.remove(s) continue sn = len(slnos) - 1 sw = s.width if r is not None: if not sw: break if not sn: continue if rwn: rmemo = r.gainmemo smemo = s.gainmemo c = 0 for gauge in gauges: try: gr = rmemo[gauge] except KeyError: gr = sum_gauge(gauge, r.common_part) rmemo[gauge] = gr gr *= rn try: gs = smemo[gauge] except __HOLE__: gs = sum_gauge(gauge, s.common_part) smemo[gauge] = gs gs *= sn c = gr - gs if c: break if c >= 0: continue r = s rlnos = slnos if not sw: break rn = sn rw = sw rwn = sn * sw if r is not None: rects.remove(r) if r is not None: r.reducelines(pickednos) if r.lnos: return r def cmpinit(x, y): wx = x.width wy = y.width c = wy - wx if c: return c c = y.dir - x.dir if c: return c c = cmp(x.lnos[0], y.lnos[0]) return c if gauges[0] == None: gauges = gauges[1:] lnobyid = dict([(id(line), i) for i, line in enumerate(lines)]) orects = rects rects = list(orects) for r in rects: r.init2(lnobyid, lines) rects.sort(cmpinit) allnos = immbitrange(len(lines)) pickednos = mutbitset() pickedrects = [] news = [] while pickednos != allnos: r = picknext() pickednos |= r.lnos pickedrects.append(r) induce(r) if trace == 'induced': pdb.set_trace() overlap(r) if trace == 'chosorted': pdb.set_trace() if trace == 'choosen': pdb.set_trace() return pickedrects
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/etc/RE_Rect.py/choose
3,837
def validate_json_file(file): try: data = generics_utils.check_json_syntax(file) if data is None: return #check manadatory fields if "stack" in data: stack=check_mandatory_stack(data["stack"]) if stack is None: return #else: # print "No stack section find in the template file" # return if "builders" in data: check_mandatory_builders(data["builders"]) return data except __HOLE__ as e: printer.out("JSON parsing error: "+str(e), printer.ERROR) printer.out("Syntax of template file ["+file+"]: FAILED") except IOError as e: printer.out("unknown error template json file", printer.ERROR) #manage uforge exception
ValueError
dataset/ETHPy150Open usharesoft/hammr/src/hammr/utils/hammr_utils.py/validate_json_file
3,838
def request(self, method, url, headers, post_data=None): kwargs = {} if self._verify_ssl_certs: kwargs['verify'] = os.path.join( os.path.dirname(__file__), 'data/ca-certificates.crt') else: kwargs['verify'] = False try: try: result = requests.request(method, url, headers=headers, data=post_data, timeout=80, **kwargs) except __HOLE__, e: raise TypeError( 'Warning: It looks like your installed version of the ' '"requests" library is not compatible with Shippo\'s ' 'usage thereof. (HINT: The most likely cause is that ' 'your "requests" library is out of date. You can fix ' 'that by running "pip install -U requests".) The ' 'underlying error was: %s' % (e,)) # This causes the content to actually be read, which could cause # e.g. a socket timeout. TODO: The other fetch methods probably # are succeptible to the same and should be updated. content = result.content status_code = result.status_code except Exception, e: # Would catch just requests.exceptions.RequestException, but can # also raise ValueError, RuntimeError, etc. self._handle_request_error(e) return content, status_code
NotImplementedError
dataset/ETHPy150Open goshippo/shippo-python-client/shippo/http_client.py/RequestsClient.request
3,839
def run(self, config, args): appname = self.default_appname(config, args) server, procfile = config.get('server', 'procfile') if not args['<jobs>'] and not args['--from-file']: # unload from a procfile if not args["--no-input"]: if not self.confirm("Do you want to reload %r?" % appname): return apps = server.sessions() if appname not in apps: raise RuntimeError("%r not found" % appname) # unload the complete app server.jobs_walk(lambda s, job: self._reload(s, job, appname)) print("==> app %r reloaded" % appname) elif args['--from-file']: # unload from a JSON config file fname = args['--from-file'] # load configs configs = self.load_jsonconfig(fname) # finally unload all jobs from the give config for conf in configs: try: job_name = conf.pop('name') except __HOLE__: raise ValueError("invalid job config") # parse job name and eventually extract the appname appname, name = self.parse_name(job_name, self.default_appname(config, args)) # always force the appname if specified if args['--app']: appname = args['--app'] # unload the job pname = "%s.%s" % (appname, name) if not args["--no-input"]: if not self.confirm("Do you want to reload %r?" % pname): continue try: server.reload(name, appname) print("job %r reloaded" % pname) except GafferNotFound: sys.stderr.write("%r not found in %r\n" % (name, appname)) sys.stderr.flush() else: # unload all jobs given on the command line. it can be either a # job specified in the Procfile or any job in the gafferd node. for job_name in args['<jobs>']: appname, name = self.parse_name(job_name, appname) if (self.use_procfile(config, appname) and name not in procfile.cfg): print("Ignore %r" % name) continue if not args["--no-input"]: if not self.confirm("Do you want to reload %r?" % job_name): continue # unload the job try: server.reload(name, appname) print("job %r reloaded" % job_name) except GafferNotFound: sys.stderr.write("%r not found in %r\n" % (job_name, appname)) sys.stderr.flush()
KeyError
dataset/ETHPy150Open benoitc/gaffer/gaffer/cli/commands/reload.py/Reload.run
3,840
def handle(self, doc_types, *args, **options): input = raw_input('\n'.join([ '\n\nReally delete documents of the following types: {}?', 'This operation is not reversible. Enter a number N to delete the first ' 'N found, or type "delete all" to delete everything.', '', ]).format(doc_types)) if input == 'delete all': remaining = None else: try: remaining = int(input) except __HOLE__: print 'aborting' sys.exit() doc_types = doc_types.split(',') deleted = 0 # unfortunately the only couch view we have for this needs to go by domain # will be a bit slow domain_names = Domain.get_all_names() for doc_type in doc_types: db = get_db_by_doc_type(doc_type) if not db: print "Cannot find db for {}, skipping".format(doc_type) continue for domain in domain_names: docs = [row['doc'] for row in db.view( 'by_domain_doc_type_date/view', startkey=[domain, doc_type], endkey=[domain, doc_type, {}], reduce=False, include_docs=True, )][:remaining] if docs: count = len(docs) print 'deleting {} {}s from {}'.format(count, doc_type, domain) db.delete_docs(docs) deleted += count if remaining is not None: remaining -= count if remaining <= 0: return print 'successfully deleted {} documents'.format(deleted)
ValueError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/cleanup/management/commands/purge_docs.py/Command.handle
3,841
def __init__(self, prototype , elements): """ Constructor, needs the prototype and the elements of the clustering. TODO: change it by (elements, [prototype]). Prototype must be calculated on demand and use bookkeeping """ self.set_elements(elements) self.id = "" try: self.set_prototype(prototype) except __HOLE__: raise
TypeError
dataset/ETHPy150Open victor-gil-sepulveda/pyProCT/pyproct/clustering/cluster.py/Cluster.__init__
3,842
def expanduser(path): """Expand ~ and ~user constructs. If user or $HOME is unknown, do nothing.""" if path[:1] != '~': return path i, n = 1, len(path) while i < n and path[i] not in '/\\': i = i + 1 if 'HOME' in os.environ: userhome = os.environ['HOME'] elif 'USERPROFILE' in os.environ: userhome = os.environ['USERPROFILE'] elif not 'HOMEPATH' in os.environ: return path else: try: drive = os.environ['HOMEDRIVE'] except __HOLE__: drive = '' userhome = join(drive, os.environ['HOMEPATH']) if i != 1: #~user userhome = join(dirname(userhome), path[1:i]) return userhome + path[i:] # Expand paths containing shell variable substitutions. # The following rules apply: # - no expansion within single quotes # - '$$' is translated into '$' # - '%%' is translated into '%' if '%%' are not seen in %var1%%var2% # - ${varname} is accepted. # - $varname is accepted. # - %varname% is accepted. # - varnames can be made out of letters, digits and the characters '_-' # (though is not verifed in the ${varname} and %varname% cases) # XXX With COMMAND.COM you can use any characters in a variable name, # XXX except '^|<>='.
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ntpath.py/expanduser
3,843
def expandvars(path): """Expand shell variables of the forms $var, ${var} and %var%. Unknown variables are left unchanged.""" if '$' not in path and '%' not in path: return path import string varchars = string.ascii_letters + string.digits + '_-' res = '' index = 0 pathlen = len(path) while index < pathlen: c = path[index] if c == '\'': # no expansion within single quotes path = path[index + 1:] pathlen = len(path) try: index = path.index('\'') res = res + '\'' + path[:index + 1] except __HOLE__: res = res + path index = pathlen - 1 elif c == '%': # variable or '%' if path[index + 1:index + 2] == '%': res = res + c index = index + 1 else: path = path[index+1:] pathlen = len(path) try: index = path.index('%') except ValueError: res = res + '%' + path index = pathlen - 1 else: var = path[:index] if var in os.environ: res = res + os.environ[var] else: res = res + '%' + var + '%' elif c == '$': # variable or '$$' if path[index + 1:index + 2] == '$': res = res + c index = index + 1 elif path[index + 1:index + 2] == '{': path = path[index+2:] pathlen = len(path) try: index = path.index('}') var = path[:index] if var in os.environ: res = res + os.environ[var] else: res = res + '${' + var + '}' except ValueError: res = res + '${' + path index = pathlen - 1 else: var = '' index = index + 1 c = path[index:index + 1] while c != '' and c in varchars: var = var + c index = index + 1 c = path[index:index + 1] if var in os.environ: res = res + os.environ[var] else: res = res + '$' + var if c != '': index = index - 1 else: res = res + c index = index + 1 return res # Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B. # Previously, this function also truncated pathnames to 8+3 format, # but as this module is called "ntpath", that's obviously wrong!
ValueError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/ntpath.py/expandvars
3,844
def net(self, irc, msg, args): """takes no arguments Returns some interesting network-related statistics. """ try: elapsed = time.time() - self.connected[irc.getRealIrc()] timeElapsed = utils.timeElapsed(elapsed) except __HOLE__: timeElapsed = _('an indeterminate amount of time') irc.reply(format(_('I have received %s messages for a total of %S. ' 'I have sent %s messages for a total of %S. ' 'I have been connected to %s for %s.'), self.recvdMsgs, self.recvdBytes, self.sentMsgs, self.sentBytes, irc.server, timeElapsed))
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Status/plugin.py/Status.net
3,845
@internationalizeDocstring def cpu(self, irc, msg, args): """takes no arguments Returns some interesting CPU-related statistics on the bot. """ (user, system, childUser, childSystem, elapsed) = os.times() now = time.time() target = msg.args[0] timeRunning = now - world.startedAt if self.registryValue('cpu.children', target) and \ user+system < timeRunning+1: # Fudge for FPU inaccuracies. children = _('My children have taken %.2f seconds of user time ' 'and %.2f seconds of system time ' 'for a total of %.2f seconds of CPU time.') % \ (childUser, childSystem, childUser+childSystem) else: children = '' activeThreads = threading.activeCount() response = _('I have taken %.2f seconds of user time and %.2f seconds ' 'of system time, for a total of %.2f seconds of CPU ' 'time. %s') % (user, system, user + system, children) if self.registryValue('cpu.threads', target): response += format('I have spawned %n; I currently have %i still ' 'running.', (world.threadsSpawned, 'thread'), activeThreads) if self.registryValue('cpu.memory', target): mem = None pid = os.getpid() plat = sys.platform try: if plat.startswith('linux') or plat.startswith('sunos') or \ plat.startswith('freebsd') or plat.startswith('openbsd') or \ plat.startswith('darwin'): cmd = 'ps -o rss -p %s' % pid try: inst = subprocess.Popen(cmd.split(), close_fds=True, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE) except __HOLE__: irc.error(_('Unable to run ps command.'), Raise=True) (out, foo) = inst.communicate() inst.wait() mem = int(out.splitlines()[1]) elif sys.platform.startswith('netbsd'): mem = int(os.stat('/proc/%s/mem' % pid)[7]) if mem: response += format(_(' I\'m taking up %S of memory.'), mem*1024) else: response += _(' I\'m taking up an unknown amount of memory.') except Exception: self.log.exception('Uncaught exception in cpu.memory:') irc.reply(utils.str.normalizeWhitespace(response))
OSError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Status/plugin.py/Status.cpu
3,846
def get_version(self): try: return str(self.cookie_set.all()[0].get_version()) except __HOLE__: return ""
IndexError
dataset/ETHPy150Open bmihelac/django-cookie-consent/cookie_consent/models.py/CookieGroup.get_version
3,847
def _cleanup_response_queue(self, message): """Stop tracking the response queue either because we're done receiving responses, or we've timed out. """ try: del self.response_queues[message.uuid] except __HOLE__: # Ignore if queue is gone already somehow. pass
KeyError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/cells/messaging.py/MessageRunner._cleanup_response_queue
3,848
def deserialize_remote_exception(data, allowed_remote_exmods): failure = jsonutils.loads(str(data)) trace = failure.get('tb', []) message = failure.get('message', "") + "\n" + "\n".join(trace) name = failure.get('class') module = failure.get('module') # NOTE(ameade): We DO NOT want to allow just any module to be imported, in # order to prevent arbitrary code execution. if module != 'exceptions' and module not in allowed_remote_exmods: return messaging.RemoteError(name, failure.get('message'), trace) try: mod = importutils.import_module(module) klass = getattr(mod, name) if not issubclass(klass, Exception): raise TypeError("Can only deserialize Exceptions") failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) except (AttributeError, TypeError, ImportError): return messaging.RemoteError(name, failure.get('message'), trace) ex_type = type(failure) str_override = lambda self: message new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,), {'__str__': str_override, '__unicode__': str_override}) new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX) try: # NOTE(ameade): Dynamically create a new exception type and swap it in # as the new type for the exception. This only works on user defined # Exceptions and not core python exceptions. This is important because # we cannot necessarily change an exception message so we must override # the __str__ method. failure.__class__ = new_ex_type except __HOLE__: # NOTE(ameade): If a core exception then just add the traceback to the # first exception argument. failure.args = (message,) + failure.args[1:] return failure
TypeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/cells/messaging.py/deserialize_remote_exception
3,849
def run(self): self.socket = self.context.socket(zmq.REP) self.socket.connect(self.uri) try: while True: cmd, uid, args = protocol.msg.extract_request( self.socket.recv() ) try: if cmd in self.db_commands: db = None else: db = self.databases.get(uid) except Exception: resp = protocol.msg.format_response( uid, status=protocol.status.NO_DB) else: if db: resp = self.handle_cmd(db, self.commands, cmd, args) else: resp = self.handle_cmd( None, self.db_commands, cmd, args ) if isinstance(resp, Multipart): self.socket.send_multipart(resp) else: self.socket.send(resp) except zmq.ZMQError: pass except __HOLE__: pass finally: self.socket.close(linger=0)
RuntimeError
dataset/ETHPy150Open onitu/onitu/onitu/escalator/server/worker.py/Worker.run
3,850
def handle_cmd(self, db, commands, cmd, args): cb = commands.get(cmd) if cb: try: resp = cb(db, *args) if db is not None else cb(*args) except __HOLE__ as e: self.logger.warning("Invalid arguments: {}", e) resp = protocol.msg.format_response( cmd, status=protocol.status.INVALID_ARGS) else: self.logger.warning("Command not found: {}", cmd) resp = protocol.msg.format_response( cmd, status=protocol.status.CMD_NOT_FOUND) return resp
TypeError
dataset/ETHPy150Open onitu/onitu/onitu/escalator/server/worker.py/Worker.handle_cmd
3,851
def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None, ignored_modules=None): """ Return a list of views from a list of urlpatterns. Each object in the returned list is a tuple: (view_func, regex, name) """ ignored_modules = ignored_modules if ignored_modules else [] views = [] for p in urlpatterns: if isinstance(p, RegexURLPattern): # Handle correct single URL patterns try: if namespace: name = '{0}:{1}'.format(namespace, p.name) else: name = p.name if hasattr(p.callback, '__module__'): if p.callback.__module__.split('.')[0] not in ignored_modules: views.append((p.callback, base + p.regex.pattern, name)) else: views.append((p.callback, base + p.regex.pattern, name)) except ViewDoesNotExist: continue elif isinstance(p, RegexURLResolver): # Handle include() definitions try: patterns = p.url_patterns except ImportError: continue views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern, namespace=(namespace or p.namespace), ignored_modules=ignored_modules)) elif hasattr(p, '_get_callback'): # Handle string like 'foo.views.view_name' or just function view try: views.append((p._get_callback(), base + p.regex.pattern, p.name)) except ViewDoesNotExist: continue elif hasattr(p, 'url_patterns') or hasattr(p, '_get_url_patterns'): # Handle url_patterns objects try: patterns = p.url_patterns except __HOLE__: continue views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern, namespace=namespace, ignored_modules=ignored_modules)) else: raise TypeError("%s does not appear to be a urlpattern object" % p) return views
ImportError
dataset/ETHPy150Open potatolondon/djangae/djangae/contrib/security/commands_utils.py/extract_views_from_urlpatterns
3,852
def analyse_commit(self, commit, ref_id, repo_id): message = commit.message author_name = commit.author.name author_email = commit.author.email committer_name = commit.committer.name committer_email = commit.committer.email size = commit.size sha = commit.hexsha authored_date = datetime.fromtimestamp(commit.authored_date).strftime("%Y-%m-%d %H:%M:%S") committed_date = datetime.fromtimestamp(commit.committed_date).strftime("%Y-%m-%d %H:%M:%S") #insert author author_id = self.insert_developer(author_name, author_email) committer_id = self.insert_developer(committer_name, committer_email) #insert commit self.insert_commit(repo_id, sha, message, author_id, committer_id, authored_date, committed_date, size) commit_id = self.select_commit(sha) #insert parents of the commit self.insert_commit_parents(commit.parents, commit_id, sha, repo_id) #insert commits in reference self.insert_commit_in_reference(repo_id, commit_id, ref_id) try: if self.querier.commit_has_no_parents(commit): for diff in self.querier.get_diffs_no_parent_commit(commit): file_path = diff[0] ext = self.get_ext(file_path) self.insert_file(repo_id, file_path, ext, ref_id) file_id = self.select_file_id(repo_id, file_path, ref_id) patch_content = re.sub(r'^(\w|\W)*\n@@', '@@', diff[1]) stats = self.querier.get_stats_for_file(commit, file_path) status = self.querier.get_status(stats, diff) #insert file modification last_file_modification = self.insert_file_modification(commit_id, file_id, status, stats[0], stats[1], stats[2], patch_content) line_details = self.querier.get_line_details(patch_content, ext) for line_detail in line_details: self.insert_line_details(last_file_modification, line_detail) else: for diff in self.querier.get_diffs(commit): if self.querier.is_renamed(diff): if diff.rename_from: file_previous = diff.rename_from else: file_previous = diff.diff.split('\n')[1].replace('rename from ', '') ext_current = self.get_ext(file_previous) if diff.rename_to: file_current = diff.rename_to else: file_current = diff.diff.split('\n')[2].replace('rename to ', '') #insert new file self.insert_file(repo_id, file_current, ext_current, ref_id) #get id new file current_file_id = self.select_file_id(repo_id, file_current, ref_id) #retrieve the id of the previous file previous_file_id = self.select_file_id(repo_id, file_previous, ref_id) if not previous_file_id: self.logger.warning("Git2Db: previous file id not found. commit message " + commit.message) if current_file_id == previous_file_id: self.logger.warning("Git2Db: previous file id is equal to current file id (" + str(current_file_id) + ") " + commit.message) self.insert_file_renamed(repo_id, current_file_id, previous_file_id) self.insert_file_modification(commit_id, current_file_id, "renamed", 0, 0, 0, None) else: #insert file #if the file does not have a path, it won't be inserted try: file_path = diff.a_blob.path ext = self.get_ext(file_path) stats = self.querier.get_stats_for_file(commit, file_path) status = self.querier.get_status(stats, diff) self.insert_file(repo_id, file_path, ext, ref_id) file_id = self.select_file_id(repo_id, file_path, ref_id) #insert file modification (additions, deletions) patch_content = re.sub(r'^(\w|\W)*\n@@', '@@', diff.diff) last_file_modification = self.insert_file_modification(commit_id, file_id, status, stats[0], stats[1], stats[2], patch_content) line_details = self.querier.get_line_details(patch_content, ext) for line_detail in line_details: self.insert_line_details(last_file_modification, line_detail) except: self.logger.warning("Git2Db: GitPython null file path " + str(sha) + " - " + str(message)) except __HOLE__ as e: self.logger.error("Git2Db: GitPython just failed on commit " + str(sha) + " - " + str(message) + ". Details: " + str(e)) finally: return
AttributeError
dataset/ETHPy150Open SOM-Research/Gitana/git2db.py/Git2Db.analyse_commit
3,853
def _check_backend(): from ..utils import _check_pyface_backend try: from pyface.api import warning except __HOLE__: warning = None backend, status = _check_pyface_backend() if status == 0: return elif status == 1: msg = ("The currently selected Pyface backend %s has not been " "extensively tested. We recommend using qt4 which can be " "enabled by installing the pyside package. If you proceed with " "the current backend pease let the developers know your " "experience." % backend) elif status == 2: msg = ("The currently selected Pyface backend %s has known issues. We " "recommend using qt4 which can be enabled by installing the " "pyside package." % backend) warning(None, msg, "Pyface Backend Warning")
ImportError
dataset/ETHPy150Open mne-tools/mne-python/mne/gui/_backend.py/_check_backend
3,854
def getTexcoordToImgMapping(mesh): #get a list of all texture coordinate sets all_texcoords = {} for geom in mesh.geometries: for prim_index, prim in enumerate(geom.primitives): inputs = prim.getInputList().getList() texindex = 0 for offset, semantic, srcid, setid in inputs: if semantic == 'TEXCOORD': try: setid = int(setid) except (__HOLE__, TypeError): setid = 0 texset = TexcoordSet(geom.id, prim_index, texindex, setid) texindex += 1 all_texcoords[texset] = [] #create a mapping between each texcoordset and the images they get bound to by traversing scenes for scene in mesh.scenes: for boundobj in itertools.chain(scene.objects('geometry'), scene.objects('controller')): if isinstance(boundobj, collada.geometry.BoundGeometry): boundgeom = boundobj else: boundgeom = boundobj.geometry geom_id = boundgeom.original.id for prim_index, boundprim in enumerate(boundgeom.primitives()): if boundprim.material is not None: effect = boundprim.material.effect inputmap = boundprim.inputmap for prop in itertools.chain(effect.supported, ['bumpmap']): propval = getattr(effect, prop) if type(propval) is collada.material.Map: if propval.texcoord in inputmap: cimg = propval.sampler.surface.image semantic, setid = inputmap[propval.texcoord] if not setid: setid = 0 else: try: setid = int(setid) except (ValueError, TypeError): setid = 0 if semantic == 'TEXCOORD': texset = TexcoordSet(geom_id, prim_index, -1, setid) if texset in all_texcoords: if cimg.path not in all_texcoords[texset]: all_texcoords[texset].append(cimg.path) #remove any texture coordinates that dont get mapped to textures all_texcoords = dict( (texset, imglist) for texset, imglist in all_texcoords.iteritems() if len(imglist) > 0 ) return all_texcoords
ValueError
dataset/ETHPy150Open pycollada/meshtool/meshtool/filters/atlas_filters/make_atlases.py/getTexcoordToImgMapping
3,855
def do_longs(opts, opt, longopts, args): try: i = opt.index('=') except __HOLE__: optarg = None else: opt, optarg = opt[:i], opt[i + 1:] has_arg, opt = long_has_args(opt, longopts) if has_arg: if optarg is None: if not args: raise GetoptError('option --%s requires argument' % opt, opt) optarg, args = args[0], args[1:] elif optarg: raise GetoptError('option --%s must not have an argument' % opt, opt) opts.append(('--' + opt, optarg or '')) return opts, args # Return: # has_arg? # full option name
ValueError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/common/diagnostic/pydevDebug/runfiles.py/do_longs
3,856
def filter_tests(self, test_objs): """ based on a filter name, only return those tests that have the test case names that match """ test_suite = [] for test_obj in test_objs: if isinstance(test_obj, unittest.TestSuite): if test_obj._tests: test_obj._tests = self.filter_tests(test_obj._tests) if test_obj._tests: test_suite.append(test_obj) elif isinstance(test_obj, unittest.TestCase): test_cases = [] for tc in test_objs: try: testMethodName = tc._TestCase__testMethodName except __HOLE__: #changed in python 2.5 testMethodName = tc._testMethodName if self.__match(self.test_filter, testMethodName) and self.__match_tests(self.tests, tc, testMethodName): test_cases.append(tc) return test_cases return test_suite
AttributeError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/common/diagnostic/pydevDebug/runfiles.py/PydevTestRunner.filter_tests
3,857
def run(self, cmd, code): """Attempt to parse code as JSON, return '' if it succeeds, the error message if it fails.""" # Use ST's loose parser for its setting files. strict = os.path.splitext(self.filename)[1] not in self.extensions try: if strict: self.__class__.regex = self.strict_regex json.loads(code) else: self.__class__.regex = self.loose_regex sublime.decode_value(code) return '' except __HOLE__ as err: return str(err)
ValueError
dataset/ETHPy150Open SublimeLinter/SublimeLinter-json/linter.py/JSON.run
3,858
def update_user(self, user, attributes, attribute_mapping, force_save=False): """Update a user with a set of attributes and returns the updated user. By default it uses a mapping defined in the settings constant SAML_ATTRIBUTE_MAPPING. For each attribute, if the user object has that field defined it will be set, otherwise it will try to set it in the profile object. """ if not attribute_mapping: return user try: profile = user.get_profile() except __HOLE__: profile = None except SiteProfileNotAvailable: profile = None # Django 1.5 custom model assumed except AttributeError: profile = user user_modified = False profile_modified = False for saml_attr, django_attrs in attribute_mapping.items(): try: for attr in django_attrs: if hasattr(user, attr): modified = self._set_attribute( user, attr, attributes[saml_attr][0]) user_modified = user_modified or modified elif profile is not None and hasattr(profile, attr): modified = self._set_attribute( profile, attr, attributes[saml_attr][0]) profile_modified = profile_modified or modified except KeyError: # the saml attribute is missing pass logger.debug('Sending the pre_save signal') signal_modified = any( [response for receiver, response in pre_user_save.send_robust(sender=user, attributes=attributes, user_modified=user_modified)] ) if user_modified or signal_modified or force_save: user.save() if (profile is not None and (profile_modified or signal_modified or force_save)): profile.save() return user
ObjectDoesNotExist
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/djangosaml2-0.13.0/djangosaml2/backends.py/Saml2Backend.update_user
3,859
@use_bootstrap3 @retry_resource(3) def view_generic(request, domain, app_id=None, module_id=None, form_id=None, copy_app_form=None): """ This is the main view for the app. All other views redirect to here. """ if form_id and not module_id: return bail(request, domain, app_id) app = module = form = None try: if app_id: app = get_app(domain, app_id) if module_id: try: module = app.get_module(module_id) except ModuleNotFoundException: raise Http404() if not module.unique_id: module.get_or_create_unique_id() app.save() if form_id: try: form = module.get_form(form_id) except __HOLE__: raise Http404() except ModuleNotFoundException: return bail(request, domain, app_id) if app and app.application_version == '1.0': _assert = soft_assert(to=['droberts' + '@' + 'dimagi.com']) _assert(False, 'App version 1.0', {'domain': domain, 'app_id': app_id}) return render(request, 'app_manager/no_longer_supported.html', { 'domain': domain, 'app': app, }) context = get_apps_base_context(request, domain, app) if app and app.copy_of: # don't fail hard. return HttpResponseRedirect(reverse( "corehq.apps.app_manager.views.view_app", args=[domain, app.copy_of] )) # grandfather in people who set commcare sense earlier if app and 'use_commcare_sense' in app: if app['use_commcare_sense']: if 'features' not in app.profile: app.profile['features'] = {} app.profile['features']['sense'] = 'true' del app['use_commcare_sense'] app.save() context.update({ 'module': module, 'form': form, }) lang = context['lang'] if app and not module and hasattr(app, 'translations'): context.update({"translations": app.translations.get(lang, {})}) if form: template, form_context = get_form_view_context_and_template( request, domain, form, context['langs'] ) context.update({ 'case_properties': get_all_case_properties(app), 'usercase_properties': get_usercase_properties(app), }) context.update(form_context) elif module: template = get_module_template(module) # make sure all modules have unique ids app.ensure_module_unique_ids(should_save=True) module_context = get_module_view_context(app, module, lang) context.update(module_context) elif app: template = "app_manager/app_view.html" context.update(get_app_view_context(request, app)) else: from corehq.apps.dashboard.views import NewUserDashboardView template = NewUserDashboardView.template_name context.update({'templates': NewUserDashboardView.templates(domain)}) # update multimedia context for forms and modules. menu_host = form or module if menu_host: default_file_name = 'module%s' % module_id if form_id: default_file_name = '%s_form%s' % (default_file_name, form_id) specific_media = { 'menu': { 'menu_refs': app.get_menu_media( module, module_id, form=form, form_index=form_id, to_language=lang ), 'default_file_name': '{name}_{lang}'.format(name=default_file_name, lang=lang), } } if module and module.uses_media(): def _make_name(suffix): return "{default_name}_{suffix}_{lang}".format( default_name=default_file_name, suffix=suffix, lang=lang, ) specific_media['case_list_form'] = { 'menu_refs': app.get_case_list_form_media(module, module_id, to_language=lang), 'default_file_name': _make_name('case_list_form'), } specific_media['case_list_menu_item'] = { 'menu_refs': app.get_case_list_menu_item_media(module, module_id, to_language=lang), 'default_file_name': _make_name('case_list_menu_item'), } specific_media['case_list_lookup'] = { 'menu_refs': app.get_case_list_lookup_image(module, module_id), 'default_file_name': '{}_case_list_lookup'.format(default_file_name), } if hasattr(module, 'product_details'): specific_media['product_list_lookup'] = { 'menu_refs': app.get_case_list_lookup_image(module, module_id, type='product'), 'default_file_name': '{}_product_list_lookup'.format(default_file_name), } context.update({ 'multimedia': { "references": app.get_references(), "object_map": app.get_object_map(), 'upload_managers': { 'icon': MultimediaImageUploadController( "hqimage", reverse(ProcessImageFileUploadView.name, args=[app.domain, app.get_id]) ), 'audio': MultimediaAudioUploadController( "hqaudio", reverse(ProcessAudioFileUploadView.name, args=[app.domain, app.get_id]) ), }, } }) context['multimedia'].update(specific_media) error = request.GET.get('error', '') context.update({ 'error': error, 'app': app, }) # Pass form for Copy Application to template domain_names = [d.name for d in Domain.active_for_user(request.couch_user)] domain_names.sort() context.update({ 'copy_app_form': copy_app_form if copy_app_form is not None else CopyApplicationForm(app_id), 'domain_names': domain_names, }) context['latest_commcare_version'] = get_commcare_versions(request.user)[-1] if app and app.doc_type == 'Application' and has_privilege(request, privileges.COMMCARE_LOGO_UPLOADER): uploader_slugs = ANDROID_LOGO_PROPERTY_MAPPING.keys() from corehq.apps.hqmedia.controller import MultimediaLogoUploadController from corehq.apps.hqmedia.views import ProcessLogoFileUploadView context.update({ "sessionid": request.COOKIES.get('sessionid'), 'uploaders': [ MultimediaLogoUploadController( slug, reverse( ProcessLogoFileUploadView.name, args=[domain, app_id, slug], ) ) for slug in uploader_slugs ], "refs": { slug: ApplicationMediaReference( app.logo_refs.get(slug, {}).get("path", slug), media_class=CommCareImage, module_id=app.logo_refs.get(slug, {}).get("m_id"), ).as_dict() for slug in uploader_slugs }, "media_info": { slug: app.logo_refs.get(slug) for slug in uploader_slugs if app.logo_refs.get(slug) }, }) response = render(request, template, context) response.set_cookie('lang', encode_if_unicode(lang)) return response
IndexError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/app_manager/views/view_generic.py/view_generic
3,860
def CMDreindex(parser): """Begins to reindex the quickopen database""" (options, args) = parser.parse_args() db = open_db(options) try: db.begin_reindex() print "Reindexing has begun." except __HOLE__: print "%s." % DBStatus.not_running_string()
IOError
dataset/ETHPy150Open natduca/quickopen/src/quickopen.py/CMDreindex
3,861
def CMDedit(parser): """Searches for <query> then opens it in $EDITOR""" parser.add_option('--current-filename', dest='current_filename', action='store', default=None, help="Hints quickopen about the current buffer to improve search relevance.") parser.add_option('--open-filenames', dest='open_filenames', action='store', default=[], help="Hints quickopen about the filenames currently open to improve search relevance.") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Expected: <query> or nothing') if not os.getenv('EDITOR'): parser.error('$EDITOR must be set in environment') db = open_db(options) if not db.has_index: print "Database is not fully indexed. Wait a bit or try quickopen status" return 255 def edit(filenames, canceled): if canceled: return 255 args = shlex.split(os.getenv('EDITOR')) args.extend(filenames) proc = subprocess.Popen(args, shell=False) try: return proc.wait() except __HOLE__: proc.kill() return 255 search_args = {} if options.current_filename: search_args["current_filename"] = options.current_filename if options.open_filenames: search_args["open_filenames"] = split_open_filenames(options.open_filenames) if len(args): initial_filter = args[0] else: initial_filter = None from src import open_dialog from src import message_loop def edit_at_quit(filenames, canceled): def do_edit(): edit(filenames, canceled) message_loop.add_quit_handler(do_edit) open_dialog.run(options, db, initial_filter, edit_at_quit) # will not return on osx.
KeyboardInterrupt
dataset/ETHPy150Open natduca/quickopen/src/quickopen.py/CMDedit
3,862
def run(verbosity=1,doctest=False,numpy=True): """Run NetworkX tests. Parameters ---------- verbosity: integer, optional Level of detail in test reports. Higher numbers provide more detail. doctest: bool, optional True to run doctests in code modules numpy: bool, optional True to test modules dependent on numpy """ try: import nose except __HOLE__: raise ImportError(\ "The nose package is needed to run the NetworkX tests.") sys.stderr.write("Running NetworkX tests:") nx_install_dir=path.join(path.dirname(__file__), path.pardir) # stop if running from source directory if getcwd() == path.abspath(path.join(nx_install_dir,path.pardir)): raise RuntimeError("Can't run tests from source directory.\n" "Run 'nosetests' from the command line.") argv=[' ','--verbosity=%d'%verbosity, '-w',nx_install_dir, '-exe'] if doctest: argv.extend(['--with-doctest','--doctest-extension=txt']) if not numpy: argv.extend(['-A not numpy']) nose.run(argv=argv)
ImportError
dataset/ETHPy150Open networkx/networkx/networkx/tests/test.py/run
3,863
def push_data(self, item, key, data): if self.postprocessor is not None: result = self.postprocessor(self.path, key, data) if result is None: return item key, data = result if item is None: item = self.dict_constructor() try: value = item[key] if isinstance(value, list): value.append(data) else: item[key] = [value, data] except __HOLE__: item[key] = data return item
KeyError
dataset/ETHPy150Open haukurk/flask-restapi-recipe/restapi/utils/conversion/xmltodict.py/_DictSAXHandler.push_data
3,864
def parse(xml_input, encoding=None, expat=expat, process_namespaces=False, namespace_separator=':', **kwargs): """Parse the given XML input and convert it into a dictionary. `xml_input` can either be a `string` or a file-like object. If `xml_attribs` is `True`, element attributes are put in the dictionary among regular child elements, using `@` as a prefix to avoid collisions. If set to `False`, they are just ignored. Simple example:: >>> import xmltodict >>> doc = xmltodict.parse(\"\"\" ... <a prop="x"> ... <b>1</b> ... <b>2</b> ... </a> ... \"\"\") >>> doc['a']['@prop'] u'x' >>> doc['a']['b'] [u'1', u'2'] If `item_depth` is `0`, the function returns a dictionary for the root element (default behavior). Otherwise, it calls `item_callback` every time an item at the specified depth is found and returns `None` in the end (streaming mode). The callback function receives two parameters: the `path` from the document root to the item (name-attribs pairs), and the `item` (dict). If the callback's return value is false-ish, parsing will be stopped with the :class:`ParsingInterrupted` exception. Streaming example:: >>> def handle(path, item): ... print 'path:%s item:%s' % (path, item) ... return True ... >>> xmltodict.parse(\"\"\" ... <a prop="x"> ... <b>1</b> ... <b>2</b> ... </a>\"\"\", item_depth=2, item_callback=handle) path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:1 path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:2 The optional argument `postprocessor` is a function that takes `path`, `key` and `value` as positional arguments and returns a new `(key, value)` pair where both `key` and `value` may have changed. Usage example:: >>> def postprocessor(path, key, value): ... try: ... return key + ':int', int(value) ... except (ValueError, TypeError): ... return key, value >>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>', ... postprocessor=postprocessor) OrderedDict([(u'a', OrderedDict([(u'b:int', [1, 2]), (u'b', u'x')]))]) You can pass an alternate version of `expat` (such as `defusedexpat`) by using the `expat` parameter. E.g: >>> import defusedexpat >>> xmltodict.parse('<a>hello</a>', expat=defusedexpat.pyexpat) OrderedDict([(u'a', u'hello')]) """ handler = _DictSAXHandler(namespace_separator=namespace_separator, **kwargs) if isinstance(xml_input, _unicode): if not encoding: encoding = 'utf-8' xml_input = xml_input.encode(encoding) parser = expat.ParserCreate( encoding, namespace_separator if process_namespaces else None ) try: parser.ordered_attributes = True except AttributeError: # Jython's expat does not support ordered_attributes pass parser.StartElementHandler = handler.startElement parser.EndElementHandler = handler.endElement parser.CharacterDataHandler = handler.characters try: parser.ParseFile(xml_input) except (TypeError, __HOLE__): parser.Parse(xml_input, True) return handler.item
AttributeError
dataset/ETHPy150Open haukurk/flask-restapi-recipe/restapi/utils/conversion/xmltodict.py/parse
3,865
def unparse(input_dict, output=None, encoding='utf-8', **kwargs): """Emit an XML document for the given `input_dict` (reverse of `parse`). The resulting XML document is returned as a string, but if `output` (a file-like object) is specified, it is written there instead. Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted as XML node attributes, whereas keys equal to `cdata_key` (default=`'#text'`) are treated as character data. The `pretty` parameter (default=`False`) enables pretty-printing. In this mode, lines are terminated with `'\n'` and indented with `'\t'`, but this can be customized with the `newl` and `indent` parameters. """ ((key, value),) = input_dict.items() must_return = False if output is None: output = StringIO() must_return = True content_handler = XMLGenerator(output, encoding) content_handler.startDocument() _emit(key, value, content_handler, **kwargs) content_handler.endDocument() if must_return: value = output.getvalue() try: # pragma no cover value = value.decode(encoding) except __HOLE__: # pragma no cover pass return value
AttributeError
dataset/ETHPy150Open haukurk/flask-restapi-recipe/restapi/utils/conversion/xmltodict.py/unparse
3,866
def resolve_selection ( self, selection_list ): """ Returns a list of (row, col) grid-cell coordinates that correspond to the objects in *selection_list*. For each coordinate, if the row is -1, it indicates that the entire column is selected. Likewise coordinates with a column of -1 indicate an entire row that is selected. For the TableModel, the objects in *selection_list* must be TraitGridSelection objects. """ items = self.__filtered_items() cells = [] for selection in selection_list: row = -1 if selection.obj is not None: try: row = items.index( selection.obj ) except __HOLE__: continue column = -1 if selection.name != '': column = self._get_column_index_by_trait( selection.name ) if column is None: continue cells.append( ( row, column ) ) return cells
ValueError
dataset/ETHPy150Open enthought/traitsui/traitsui/wx/table_model.py/TableModel.resolve_selection
3,867
def _generate_filename_to_mtime(self): filename_to_mtime = {} num_files = 0 for dirname, dirnames, filenames in os.walk(self._directory, followlinks=True): for filename in filenames + dirnames: if num_files == 10000: warnings.warn( 'There are too many files in your application for ' 'changes in all of them to be monitored. You may have to ' 'restart the development server to see some changes to your ' 'files.') return filename_to_mtime num_files += 1 path = os.path.join(dirname, filename) try: mtime = os.path.getmtime(path) except (IOError, __HOLE__): pass else: filename_to_mtime[path] = mtime return filename_to_mtime
OSError
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/tools/devappserver2/mtime_file_watcher.py/MtimeFileWatcher._generate_filename_to_mtime
3,868
def _json_payload(request): """ Return a parsed JSON payload for the request. :raises PayloadError: if the body has no valid JSON body """ try: return request.json_body except __HOLE__: raise PayloadError()
ValueError
dataset/ETHPy150Open hypothesis/h/h/api/views.py/_json_payload
3,869
def get_validation_errors(outfile, app=None): """ Validates all models that are part of the specified app. If no app name is provided, validates all models of all installed apps. Writes errors, if any, to outfile. Returns number of errors. """ from django.conf import settings from django.db import models, connection from django.db.models.loading import get_app_errors from django.db.models.fields.related import RelatedObject e = ModelErrorCollection(outfile) for (app_name, error) in get_app_errors().items(): e.add(app_name, error) for cls in models.get_models(app): opts = cls._meta # Do field-specific validation. for f in opts.local_fields: if f.name == 'id' and not f.primary_key and opts.pk.name == 'id': e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name) if f.name.endswith('_'): e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name) if isinstance(f, models.CharField): try: max_length = int(f.max_length) if max_length <= 0: e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name) except (__HOLE__, TypeError): e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name) if isinstance(f, models.DecimalField): decimalp_ok, mdigits_ok = False, False decimalp_msg ='"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.' try: decimal_places = int(f.decimal_places) if decimal_places < 0: e.add(opts, decimalp_msg % f.name) else: decimalp_ok = True except (ValueError, TypeError): e.add(opts, decimalp_msg % f.name) mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.' try: max_digits = int(f.max_digits) if max_digits <= 0: e.add(opts, mdigits_msg % f.name) else: mdigits_ok = True except (ValueError, TypeError): e.add(opts, mdigits_msg % f.name) invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than the value of the "decimal_places" attribute.' if decimalp_ok and mdigits_ok: if decimal_places >= max_digits: e.add(opts, invalid_values_msg % f.name) if isinstance(f, models.FileField) and not f.upload_to: e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name) if isinstance(f, models.ImageField): # Try to import PIL in either of the two ways it can end up installed. try: from PIL import Image except ImportError: try: import Image except ImportError: e.add(opts, '"%s": To use ImageFields, you need to install the Python Imaging Library. Get it at http://www.pythonware.com/products/pil/ .' % f.name) if isinstance(f, models.BooleanField) and getattr(f, 'null', False): e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name) if f.choices: if isinstance(f.choices, basestring) or not is_iterable(f.choices): e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name) else: for c in f.choices: if not isinstance(c, (list, tuple)) or len(c) != 2: e.add(opts, '"%s": "choices" should be a sequence of two-tuples.' % f.name) if f.db_index not in (None, True, False): e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name) # Perform any backend-specific field validation. connection.validation.validate_field(e, opts, f) # Check to see if the related field will clash with any existing # fields, m2m fields, m2m related objects or related objects if f.rel: if f.rel.to not in models.get_models(): e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to)) # it is a string and we could not find the model it refers to # so skip the next section if isinstance(f.rel.to, (str, unicode)): continue # Make sure the related field specified by a ForeignKey is unique if not f.rel.to._meta.get_field(f.rel.field_name).unique: e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.rel.field_name, f.rel.to.__name__)) rel_opts = f.rel.to._meta rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name() rel_query_name = f.related_query_name() if not f.rel.is_hidden(): for r in rel_opts.fields: if r.name == rel_name: e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) if r.name == rel_query_name: e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) for r in rel_opts.local_many_to_many: if r.name == rel_name: e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) if r.name == rel_query_name: e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) for r in rel_opts.get_all_related_many_to_many_objects(): if r.get_accessor_name() == rel_name: e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) if r.get_accessor_name() == rel_query_name: e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) for r in rel_opts.get_all_related_objects(): if r.field is not f: if r.get_accessor_name() == rel_name: e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) if r.get_accessor_name() == rel_query_name: e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) seen_intermediary_signatures = [] for i, f in enumerate(opts.local_many_to_many): # Check to see if the related m2m field will clash with any # existing fields, m2m fields, m2m related objects or related # objects if f.rel.to not in models.get_models(): e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to)) # it is a string and we could not find the model it refers to # so skip the next section if isinstance(f.rel.to, (str, unicode)): continue # Check that the field is not set to unique. ManyToManyFields do not support unique. if f.unique: e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name) if f.rel.through is not None and not isinstance(f.rel.through, basestring): from_model, to_model = cls, f.rel.to if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created: e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.") seen_from, seen_to, seen_self = False, False, 0 for inter_field in f.rel.through._meta.fields: rel_to = getattr(inter_field.rel, 'to', None) if from_model == to_model: # relation to self if rel_to == from_model: seen_self += 1 if seen_self > 2: e.add(opts, "Intermediary model %s has more than " "two foreign keys to %s, which is ambiguous " "and is not permitted." % ( f.rel.through._meta.object_name, from_model._meta.object_name ) ) else: if rel_to == from_model: if seen_from: e.add(opts, "Intermediary model %s has more " "than one foreign key to %s, which is " "ambiguous and is not permitted." % ( f.rel.through._meta.object_name, from_model._meta.object_name ) ) else: seen_from = True elif rel_to == to_model: if seen_to: e.add(opts, "Intermediary model %s has more " "than one foreign key to %s, which is " "ambiguous and is not permitted." % ( f.rel.through._meta.object_name, rel_to._meta.object_name ) ) else: seen_to = True if f.rel.through not in models.get_models(include_auto_created=True): e.add(opts, "'%s' specifies an m2m relation through model " "%s, which has not been installed." % (f.name, f.rel.through) ) signature = (f.rel.to, cls, f.rel.through) if signature in seen_intermediary_signatures: e.add(opts, "The model %s has two manually-defined m2m " "relations through the model %s, which is not " "permitted. Please consider using an extra field on " "your intermediary model instead." % ( cls._meta.object_name, f.rel.through._meta.object_name ) ) else: seen_intermediary_signatures.append(signature) if not f.rel.through._meta.auto_created: seen_related_fk, seen_this_fk = False, False for field in f.rel.through._meta.fields: if field.rel: if not seen_related_fk and field.rel.to == f.rel.to: seen_related_fk = True elif field.rel.to == cls: seen_this_fk = True if not seen_related_fk or not seen_this_fk: e.add(opts, "'%s' is a manually-defined m2m relation " "through model %s, which does not have foreign keys " "to %s and %s" % (f.name, f.rel.through._meta.object_name, f.rel.to._meta.object_name, cls._meta.object_name) ) elif isinstance(f.rel.through, basestring): e.add(opts, "'%s' specifies an m2m relation through model %s, " "which has not been installed" % (f.name, f.rel.through) ) elif isinstance(f, GenericRelation): if not any([isinstance(vfield, GenericForeignKey) for vfield in f.rel.to._meta.virtual_fields]): e.add(opts, "Model '%s' must have a GenericForeignKey in " "order to create a GenericRelation that points to it." % f.rel.to.__name__ ) rel_opts = f.rel.to._meta rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name() rel_query_name = f.related_query_name() # If rel_name is none, there is no reverse accessor (this only # occurs for symmetrical m2m relations to self). If this is the # case, there are no clashes to check for this field, as there are # no reverse descriptors for this field. if rel_name is not None: for r in rel_opts.fields: if r.name == rel_name: e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) if r.name == rel_query_name: e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) for r in rel_opts.local_many_to_many: if r.name == rel_name: e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) if r.name == rel_query_name: e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) for r in rel_opts.get_all_related_many_to_many_objects(): if r.field is not f: if r.get_accessor_name() == rel_name: e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) if r.get_accessor_name() == rel_query_name: e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) for r in rel_opts.get_all_related_objects(): if r.get_accessor_name() == rel_name: e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) if r.get_accessor_name() == rel_query_name: e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) # Check ordering attribute. if opts.ordering: for field_name in opts.ordering: if field_name == '?': continue if field_name.startswith('-'): field_name = field_name[1:] if opts.order_with_respect_to and field_name == '_order': continue # Skip ordering in the format field1__field2 (FIXME: checking # this format would be nice, but it's a little fiddly). if '__' in field_name: continue try: opts.get_field(field_name, many_to_many=False) except models.FieldDoesNotExist: e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name) # Check unique_together. for ut in opts.unique_together: for field_name in ut: try: f = opts.get_field(field_name, many_to_many=True) except models.FieldDoesNotExist: e.add(opts, '"unique_together" refers to %s, a field that doesn\'t exist. Check your syntax.' % field_name) else: if isinstance(f.rel, models.ManyToManyRel): e.add(opts, '"unique_together" refers to %s. ManyToManyFields are not supported in unique_together.' % f.name) if f not in opts.local_fields: e.add(opts, '"unique_together" refers to %s. This is not in the same model as the unique_together statement.' % f.name) return len(e.errors)
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/validation.py/get_validation_errors
3,870
def missing_or_empty(obj, key): try: if not obj[key]: return True except __HOLE__: return True return False
KeyError
dataset/ETHPy150Open upconsulting/IsisCB/isiscb/zotero/admin.py/missing_or_empty
3,871
def is_valid(self): """ Enforce validation for ``value`` based on ``type_controlled``. """ val = super(AttributeForm, self).is_valid() if all(x in self.cleaned_data for x in ['value', 'type_controlled']): value = self.cleaned_data['value'] attr_type = self.cleaned_data['type_controlled'] if (value and not attr_type) or (attr_type and not value): self.add_error('value', 'Missing data') try: value_model = attr_type.value_content_type.model_class() except AttributeError as E: self.add_error('type_controlled', 'No type selected') value_model = None if value_model: try: value_model.is_valid(value) except __HOLE__ as E: self.add_error('value', E) return super(AttributeForm, self).is_valid()
ValidationError
dataset/ETHPy150Open upconsulting/IsisCB/isiscb/zotero/admin.py/AttributeForm.is_valid
3,872
def match(request, draftmodel, choicemodel): """ Load selected draft and production instances based on user selection. See :meth:`.DraftCitationAdmin.match` and :meth:`.DraftAuthorityAdmin.match`\. """ chosen = [] for field in request.POST.keys(): if not field.startswith('suggestions_for'): continue suggestion_choice_id = request.POST.get(field, None) # The "None" selection in the radio field has a value of "-1". if not suggestion_choice_id or suggestion_choice_id == '-1': continue # There's a chance that something went wrong with template # rendering that messed up field names. We'll swallow this, # for now... try: draftinstance_id = int(field.split('_')[-1]) except __HOLE__: continue draftinstance = draftmodel.objects.get(pk=draftinstance_id) suggestion_choice = choicemodel.objects.get(pk=suggestion_choice_id) chosen.append((draftinstance, suggestion_choice)) return chosen
ValueError
dataset/ETHPy150Open upconsulting/IsisCB/isiscb/zotero/admin.py/match
3,873
def create_authority(self, request, draftauthority_id): """ A staff user can create a new :class:`isisdata.Authority` record using data from a :class:`zotero.DraftAuthority` instance. """ context = dict(self.admin_site.each_context(request)) context.update({'title': 'Create new authority record'}) draftauthority = DraftAuthority.objects.get(pk=draftauthority_id) context.update({'draftauthority': draftauthority}) AttributeInlineFormSet = formset_factory(AttributeForm) LinkedDataInlineFormSet = formset_factory(LinkedDataForm) if request.method == 'GET': form = AuthorityForm(initial={ 'name': draftauthority.name, 'type_controlled': draftauthority.type_controlled, 'record_history': u'Created from Zotero accession {0}, performed at {1} by {2}. Subsequently validated and curated by {3}.'.format(draftauthority.part_of.id, draftauthority.part_of.imported_on, draftauthority.part_of.imported_by, request.user.username), }) attributeFormset = AttributeInlineFormSet() linkeddataFormset = LinkedDataInlineFormSet() elif request.method == 'POST': form = AuthorityForm(request.POST) attributeFormset = AttributeInlineFormSet(request.POST) linkeddataFormset = LinkedDataInlineFormSet(request.POST) if form.is_valid() and attributeFormset.is_valid() and linkeddataFormset.is_valid(): # Create the Authority entry. instance = form.save() # Create new Attributes. for attributeForm in attributeFormset: try: # ISISCB-396; some invalid forms are getting past. attributeType = attributeForm.cleaned_data['type_controlled'] except __HOLE__: continue valueModel = attributeType.value_content_type.model_class() value = attributeForm.cleaned_data['value'] attribute_instance = Attribute( source=instance, type_controlled=attributeType, ) attribute_instance.save() value_instance = valueModel( attribute=attribute_instance, value=value, ) value_instance.save() # Create new LinkedData entries. for linkeddataForm in linkeddataFormset: linkeddataType = linkeddataForm.cleaned_data['type_controlled'] urn = linkeddataForm.cleaned_data['universal_resource_name'] linkeddata_instance = LinkedData( subject=instance, universal_resource_name=urn, type_controlled=linkeddataType, ) linkeddata_instance.save() # Add a new InstanceResolutionEvent. irEvent = InstanceResolutionEvent( for_instance=draftauthority, to_instance=instance ) irEvent.save() # Update the DraftAuthority. draftauthority.processed = True draftauthority.save() # If successful, take the user to the Authority change view. return HttpResponseRedirect(reverse("admin:isisdata_authority_change", args=[instance.id])) context.update({ 'form': form, 'attribute_formset': attributeFormset, 'linkeddata_formset': linkeddataFormset, }) return TemplateResponse(request, "admin/authority_create.html", context)
KeyError
dataset/ETHPy150Open upconsulting/IsisCB/isiscb/zotero/admin.py/DraftAuthorityAdmin.create_authority
3,874
def get_txt(name): """Return a TXT record associated with a DNS name. @param name: The bytestring domain name to look up. """ # pydns needs Unicode, but DKIM's d= is ASCII (already punycoded). try: unicode_name = name.decode('ascii') except __HOLE__: return None txt = _get_txt(unicode_name) if txt: txt = txt.encode('utf-8') return txt
UnicodeDecodeError
dataset/ETHPy150Open Flolagale/mailin/python/dkim/dnsplug.py/get_txt
3,875
@conf def get_python_variables(self, variables, imports=None): """ Spawn a new python process to dump configuration variables :param variables: variables to print :type variables: list of string :param imports: one import by element :type imports: list of string :return: the variable values :rtype: list of string """ if not imports: try: imports = self.python_imports except AttributeError: imports = DISTUTILS_IMP program = list(imports) # copy program.append('') for v in variables: program.append("print(repr(%s))" % v) os_env = dict(os.environ) try: del os_env['MACOSX_DEPLOYMENT_TARGET'] # see comments in the OSX tool except __HOLE__: pass try: out = self.cmd_and_log(self.env.PYTHON + ['-c', '\n'.join(program)], env=os_env) except Errors.WafError: self.fatal('The distutils module is unusable: install "python-devel"?') return_values = [] for s in out.split('\n'): s = s.strip() if not s: continue if s == 'None': return_values.append(None) elif s[0] == "'" and s[-1] == "'": return_values.append(s[1:-1]) elif s[0].isdigit(): return_values.append(int(s)) else: break return return_values
KeyError
dataset/ETHPy150Open cournape/Bento/bento/backends/waf_tools/custom_python.py/get_python_variables
3,876
@conf def check_python_headers(conf): """ Check for headers and libraries necessary to extend or embed python by using the module *distutils*. On success the environment variables xxx_PYEXT and xxx_PYEMBED are added: * PYEXT: for compiling python extensions * PYEMBED: for embedding a python interpreter """ # FIXME rewrite if not conf.env['CC_NAME'] and not conf.env['CXX_NAME']: conf.fatal('load a compiler first (gcc, g++, ..)') if not conf.env['PYTHON_VERSION']: conf.check_python_version() env = conf.env pybin = env.PYTHON if not pybin: conf.fatal('could not find the python executable') v = 'INCLUDEPY SO LDFLAGS MACOSX_DEPLOYMENT_TARGET LDSHARED CFLAGS'.split() try: lst = conf.get_python_variables(["get_config_var('%s') or ''" % x for x in v]) except __HOLE__: conf.fatal("Python development headers not found (-v for details).") vals = ['%s = %r' % (x, y) for (x, y) in zip(v, lst)] conf.to_log("Configuration returned from %r:\n%r\n" % (pybin, '\n'.join(vals))) dct = dict(zip(v, lst)) x = 'MACOSX_DEPLOYMENT_TARGET' if dct[x]: conf.env[x] = conf.environ[x] = dct[x] env['pyext_PATTERN'] = '%s' + dct['SO'] # not a mistake if Options.options.use_distutils_flags: all_flags = dct['LDFLAGS'] + ' ' + dct['LDSHARED'] + ' ' + dct['CFLAGS'] conf.parse_flags(all_flags, 'PYEXT') env['INCLUDES_PYEXT'] = [dct['INCLUDEPY']]
RuntimeError
dataset/ETHPy150Open cournape/Bento/bento/backends/waf_tools/custom_python.py/check_python_headers
3,877
@feature('pyext') @before_method('propagate_uselib_vars', 'apply_link') @after_method('apply_bundle') def init_pyext(self): """ Change the values of *cshlib_PATTERN* and *cxxshlib_PATTERN* to remove the *lib* prefix from library names. """ self.uselib = self.to_list(getattr(self, 'uselib', [])) if not 'PYEXT' in self.uselib: self.uselib.append('PYEXT') # override shlib_PATTERN set by the osx module self.env['cshlib_PATTERN'] = self.env['cxxshlib_PATTERN'] = self.env['macbundle_PATTERN'] = self.env['pyext_PATTERN'] try: if not self.install_path: return except __HOLE__: self.install_path = '${PYTHONARCHDIR}'
AttributeError
dataset/ETHPy150Open cournape/Bento/bento/backends/waf_tools/custom_python.py/init_pyext
3,878
def _Cmd(self, command, mode=None, merge_stderr_first=False, send=None, require_low_chanid=False): response = '' retries_left = 1 while True: try: chan = self._ssh_client.get_transport().open_session() chan.settimeout(self.timeout_response) if require_low_chanid and chan.remote_chanid > _LOW_CHANID_THRESHOLD: # We should not be having multiple channels open. If we do, # close them before proceeding. logging.error( 'Remote ssh channel id %d exceeded %d when opening session to ' '%s(%s), reconnecting.', chan.remote_chanid, _LOW_CHANID_THRESHOLD, self.host, self.loopback_ipv4) self.Disconnect() self.Connect(self._username, self._password, self._ssh_keys, self._enable_password) chan = self._ssh_client.get_transport().open_session() chan.exec_command(command) stdin = chan.makefile('wb', -1) stdout = chan.makefile('rb', -1) stderr = chan.makefile_stderr('rb', -1) if send is not None: stdin.write(send) stdout_data = stdout.read() stderr_data = stderr.read() # Request channel close by remote peer. chan.close() break except paramiko.SSHException as e: msg = str(e) logging.error('%s(%s) Cmd(%r, mode=%r): %s', self.host, self.loopback_ipv4, command, mode, msg) raise exceptions.CmdError(msg) except __HOLE__: # This occurs when self._ssh_client becomes None after a Paramiko # failure. Pause momentarily, try to reconnect and loop to resend # the command. time.sleep(0.25) try: if retries_left: self._Connect(self._username, self._password, self._ssh_keys) retries_left -= 1 continue else: raise exceptions.CmdError('Failed to exec_command after retry.') except paramiko.SSHException as e: msg = str(e) logging.error('%s(%s) Cmd(%r, mode=%r): %s', self.host, self.loopback_ipv4, command, mode, msg) raise exceptions.ConnectError(msg) except Exception as e: # Paramiko may raise any exception, so catch and log it here. msg = '%s:%s(%s) Cmd(%r, mode=%r): %s: %s' % ( type(e), self.host, self.loopback_ipv4, command, mode, e.__class__.__name__, str(e)) logging.exception(msg) raise exceptions.CmdError('%s: %s' % (e.__class__.__name__, str(e))) # Remove stderr lines started with 'waiting for'. if stderr_data and not merge_stderr_first: out = [] for l in stderr_data.splitlines(): if not l.startswith('waiting for'): out.append(l) stderr_data = '\n'.join(out) # Marshal the response from the stdout/err channels and handle errors. if stderr_data and not merge_stderr_first: raise exceptions.CmdError(stderr_data) elif stdout_data: if merge_stderr_first and stderr_data: response = stderr_data response += stdout_data else: # Sometimes, a command (e.g., 'show system license keys') returns # nothing. This can mean that the channel went away on us, and we # got no data back (and no error). if self.connected: logging.warn('Both STDOUT and STDERR empty after %s on %s(%s)', repr(command), self.host, self.loopback_ipv4) else: raise exceptions.CmdError('Connection to %s(%s) was terminated.' % (self.host, self.loopback_ipv4)) return response
AttributeError
dataset/ETHPy150Open google/capirca/tools/ldpush/paramiko_device.py/ParamikoDevice._Cmd
3,879
@classmethod def get_all_metadata(cls, config_providers=default_settings.PROVIDERS): ret = {} providers = cls.get_providers(config_providers) for provider in providers: provider_data = {} provider_data["provides_metrics"] = provider.provides_metrics provider_data["provides_aliases"] = provider.provides_aliases try: provider_data["url"] = provider.url except AttributeError: pass try: provider_data["descr"] = provider.descr except AttributeError: pass try: provider_data["metrics"] = provider.static_meta_dict except __HOLE__: pass provider_name = provider.__class__.__name__.lower() ret[provider_name] = provider_data return ret
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/ProviderFactory.get_all_metadata
3,880
def _get_error(self, status_code, response=None): try: headers = response.headers except __HOLE__: headers = {} try: text = response.text except (AttributeError, TypeError): text = "" if response: url = response.url else: url = None # analytics.track("CORE", "Received error response from Provider", { # "provider": self.provider_name, # "url": url, # "text": text, # "status_code": status_code # }) if status_code >= 500: error = ProviderServerError(response) self.logger.info(u"%s ProviderServerError status code=%i, %s, %s" % (self.provider_name, status_code, text, str(headers))) else: error = ProviderClientError(response) self.logger.info(u"%s ProviderClientError status code=%i, %s, %s" % (self.provider_name, status_code, text, str(headers))) raise(error) return error
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/Provider._get_error
3,881
def _get_templated_url(self, template, id, method=None): try: id_unicode = unicode(id, "UTF-8") except __HOLE__: id_unicode = id id_utf8 = id_unicode.encode("UTF-8") substitute_id = id_utf8 if template != "%s": substitute_id = urllib.quote(id_utf8) url = template % substitute_id return(url)
TypeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/Provider._get_templated_url
3,882
def metric_names(self): try: metric_names = self.static_meta_dict.keys() except __HOLE__: metric_names = [] return(metric_names) # default method; providers can override
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/Provider.metric_names
3,883
def member_items(self, query_string, provider_url_template=None, cache_enabled=True): if not self.provides_members: raise NotImplementedError() self.logger.debug(u"%s getting member_items for %s" % (self.provider_name, query_string)) if not provider_url_template: provider_url_template = self.member_items_url_template url = self._get_templated_url(provider_url_template, query_string, "members") if not url: return [] # try to get a response from the data provider response = self.http_get(url, cache_enabled=cache_enabled) if response.status_code != 200: self.logger.info(u"%s status_code=%i" % (self.provider_name, response.status_code)) if response.status_code == 404: raise ProviderItemNotFoundError elif response.status_code == 303: #redirect pass else: self._get_error(response.status_code, response) page = response.text # extract the member ids try: members = self._extract_members(page, query_string) except (__HOLE__, TypeError): members = [] return(members) # default method; providers can override
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/Provider.member_items
3,884
def get_biblio_for_id(self, id, provider_url_template=None, cache_enabled=True): if not self.provides_biblio: return {} self.logger.debug(u"%s getting biblio for %s" % (self.provider_name, id)) if not provider_url_template: provider_url_template = self.biblio_url_template url = self._get_templated_url(provider_url_template, id, "biblio") # try to get a response from the data provider response = self.http_get(url, cache_enabled=cache_enabled) if response.status_code != 200: self.logger.info(u"%s status_code=%i" % (self.provider_name, response.status_code)) if response.status_code == 404: #not found return {} elif response.status_code == 403: #forbidden return {} elif ((response.status_code >= 300) and (response.status_code < 400)): #redirect return {} else: self._get_error(response.status_code, response) # extract the aliases try: biblio_dict = self._extract_biblio(response.text, id) except (AttributeError, __HOLE__): biblio_dict = {} return biblio_dict # default method; providers can override
TypeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/Provider.get_biblio_for_id
3,885
def _get_aliases_for_id(self, id, provider_url_template=None, cache_enabled=True): if not self.provides_aliases: return [] self.logger.debug(u"%s getting aliases for %s" % (self.provider_name, id)) if not provider_url_template: provider_url_template = self.aliases_url_template url = self._get_templated_url(provider_url_template, id, "aliases") # try to get a response from the data provider response = self.http_get(url, cache_enabled=cache_enabled) if response.status_code != 200: self.logger.info(u"%s status_code=%i" % (self.provider_name, response.status_code)) if response.status_code == 404: return [] elif response.status_code == 403: #forbidden return [] elif response.status_code == 303: #redirect pass else: self._get_error(response.status_code, response) try: new_aliases = self._extract_aliases(response.text, id) except (__HOLE__, AttributeError): new_aliases = [] return new_aliases # default method; providers can override
TypeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/Provider._get_aliases_for_id
3,886
def get_metrics_for_id(self, id, provider_url_template=None, cache_enabled=True, url_override=None, extract_metrics_method=None): if not self.provides_metrics: return {} if not extract_metrics_method: extract_metrics_method = self._extract_metrics # self.logger.debug(u"%s getting metrics for %s" % (self.provider_name, id)) if url_override: url = url_override else: if not provider_url_template: provider_url_template = self.metrics_url_template url = self._get_templated_url(provider_url_template, id, "metrics") if not url: return {} # try to get a response from the data provider response = self.http_get(url, cache_enabled=cache_enabled, allow_redirects=True) #self.logger.debug(u"%s get_metrics_for_id response.status_code %i" % (self.provider_name, response.status_code)) # extract the metrics try: metrics_dict = extract_metrics_method(response.text, response.status_code, id=id) except (requests.exceptions.Timeout, socket.timeout) as e: # can apparently be thrown here self.logger.info(u"%s Provider timed out *after* GET in socket" %(self.provider_name)) raise ProviderTimeout("Provider timed out *after* GET in socket", e) except (AttributeError, __HOLE__): # throws type error if response.text is none metrics_dict = {} return metrics_dict # ideally would aggregate all tweets from all urls. # the problem is this requires multiple drill-down links, which is troubling for UI at the moment # for now, look up all the alias urls and use metrics for url that is most tweeted
TypeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/Provider.get_metrics_for_id
3,887
def http_get(self, url, headers={}, timeout=20, cache_enabled=True, allow_redirects=False): """ Returns a requests.models.Response object or raises exception on failure. Will cache requests to the same URL. """ headers["User-Agent"] = USER_AGENT if cache_enabled: cache = cache_module.Cache(self.max_cache_duration) cached_response = get_page_from_cache(url, headers, allow_redirects, cache) if cached_response: self.logger.debug(u"{provider_name} CACHE HIT on {url}".format( provider_name=self.provider_name, url=url)) return cached_response try: # analytics.track("CORE", "Sent GET to Provider", {"provider": self.provider_name, "url": url}, # context={ "providers": { 'Mixpanel': False } }) try: self.logger.info(u"{provider_name} LIVE GET on {url}".format( provider_name=self.provider_name, url=url)) except __HOLE__: self.logger.info(u"{provider_name} LIVE GET on an url that throws UnicodeDecodeError".format( provider_name=self.provider_name)) r = requests.get(url, headers=headers, timeout=timeout, allow_redirects=allow_redirects, verify=False) if r and not r.encoding: r.encoding = "utf-8" if r and cache_enabled: store_page_in_cache(url, headers, allow_redirects, r, cache) except (requests.exceptions.Timeout, socket.timeout) as e: self.logger.info(u"{provider_name} provider timed out on GET on {url}".format( provider_name=self.provider_name, url=url)) # analytics.track("CORE", "Received no response from Provider (timeout)", # {"provider": self.provider_name, "url": url}) raise ProviderTimeout("Provider timed out during GET on " + url, e) except requests.exceptions.RequestException as e: self.logger.exception(u"{provider_name} RequestException on GET on {url}".format( provider_name=self.provider_name, url=url)) # analytics.track("CORE", "Received RequestException from Provider", # {"provider": self.provider_name, "url": url}) raise ProviderHttpError("RequestException during GET on: " + url, e) return r
UnicodeDecodeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/Provider.http_get
3,888
def _lookup_json(data, keylist): for mykey in keylist: try: data = data[mykey] except (KeyError, __HOLE__): return None return(data)
TypeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/_lookup_json
3,889
def _get_doc_from_xml(page): try: try: doc = minidom.parseString(page.strip().encode('utf-8')) except __HOLE__: doc = minidom.parseString(page.strip()) lookup_function = _lookup_xml_from_dom except ExpatError, e: doc = BeautifulSoup.BeautifulStoneSoup(page) lookup_function = _lookup_xml_from_soup if not doc: raise ProviderContentMalformedError return (doc, lookup_function)
UnicodeDecodeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/_get_doc_from_xml
3,890
def _find_all_in_xml(page, mykey): (doc, lookup_function) = _get_doc_from_xml(page) if not doc: return None try: doc_list = doc.getElementsByTagName(mykey) except (KeyError, IndexError, __HOLE__): return None return(doc_list)
TypeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/_find_all_in_xml
3,891
def _lookup_xml_from_dom(doc, keylist): response = None for mykey in keylist: if not doc: return None try: doc_list = doc.getElementsByTagName(mykey) # just takes the first one for now doc = doc_list[0] except (__HOLE__, IndexError): return None if doc: try: response = doc.firstChild.data except AttributeError: return None try: response = int(response) except ValueError: pass return(response)
KeyError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/_lookup_xml_from_dom
3,892
def _lookup_xml_from_soup(soup, keylist): smaller_bowl_of_soup = soup for mykey in keylist: if not smaller_bowl_of_soup: return None try: # BeautifulSoup forces all keys to lowercase smaller_bowl_of_soup = smaller_bowl_of_soup.find(mykey.lower()) except __HOLE__: return None if smaller_bowl_of_soup: response = smaller_bowl_of_soup.text else: response = None try: response = int(response) except (ValueError, TypeError): pass return(response)
KeyError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/_lookup_xml_from_soup
3,893
def _extract_from_xml(page, dict_of_keylists): (doc, lookup_function) = _get_doc_from_xml(page) return_dict = {} if dict_of_keylists: for (metric, keylist) in dict_of_keylists.iteritems(): value = lookup_function(doc, keylist) # only set metrics for non-zero and non-null metrics if value: try: value = value.strip() #strip spaces if any except __HOLE__: pass return_dict[metric] = value return return_dict # given a url that has a doi embedded in it, return the doi
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/_extract_from_xml
3,894
def doi_from_url_string(url): logger.info(u"%s parsing url %s" %("doi_from_url_string", url)) result = re.findall("(10\.\d+.[0-9a-wA-W_/\.\-%]+)" , url, re.DOTALL) try: doi = urllib.unquote(result[0]) except __HOLE__: doi = None return(doi)
IndexError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpact/providers/provider.py/doi_from_url_string
3,895
def _get_shebang(self, encoding, post_interp=b'', options=None): enquote = True if self.executable: executable = self.executable enquote = False # assume this will be taken care of elif not sysconfig.is_python_build(): executable = get_executable() elif in_venv(): executable = os.path.join(sysconfig.get_path('scripts'), 'python%s' % sysconfig.get_config_var('EXE')) else: executable = os.path.join( sysconfig.get_config_var('BINDIR'), 'python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE'))) if options: executable = self._get_alternate_executable(executable, options) # If the user didn't specify an executable, it may be necessary to # cater for executable paths with spaces (not uncommon on Windows) if enquote and ' ' in executable: executable = '"%s"' % executable # Issue #51: don't use fsencode, since we later try to # check that the shebang is decodable using utf-8. executable = executable.encode('utf-8') # in case of IronPython, play safe and enable frames support if (sys.platform == 'cli' and '-X:Frames' not in post_interp and '-X:FullFrames' not in post_interp): post_interp += b' -X:Frames' shebang = b'#!' + executable + post_interp + b'\n' # Python parser starts to read a script using UTF-8 until # it gets a #coding:xxx cookie. The shebang has to be the # first line of a file, the #coding:xxx cookie cannot be # written before. So the shebang has to be decodable from # UTF-8. try: shebang.decode('utf-8') except UnicodeDecodeError: raise ValueError( 'The shebang (%r) is not decodable from utf-8' % shebang) # If the script is encoded to a custom encoding (use a # #coding:xxx cookie), the shebang has to be decodable from # the script encoding too. if encoding != 'utf-8': try: shebang.decode(encoding) except __HOLE__: raise ValueError( 'The shebang (%r) is not decodable ' 'from the script encoding (%r)' % (shebang, encoding)) return shebang
UnicodeDecodeError
dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/site-packages/pip/_vendor/distlib/scripts.py/ScriptMaker._get_shebang
3,896
def _copy_script(self, script, filenames): adjust = False script = os.path.join(self.source_dir, convert_path(script)) outname = os.path.join(self.target_dir, os.path.basename(script)) if not self.force and not self._fileop.newer(script, outname): logger.debug('not copying %s (up-to-date)', script) return # Always open the file, but ignore failures in dry-run mode -- # that way, we'll get accurate feedback if we can read the # script. try: f = open(script, 'rb') except __HOLE__: if not self.dry_run: raise f = None else: encoding, lines = detect_encoding(f.readline) f.seek(0) first_line = f.readline() if not first_line: logger.warning('%s: %s is an empty file (skipping)', self.get_command_name(), script) return match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) if match: adjust = True post_interp = match.group(1) or b'' if not adjust: if f: f.close() self._fileop.copy_file(script, outname) if self.set_mode: self._fileop.set_executable_mode([outname]) filenames.append(outname) else: logger.info('copying and adjusting %s -> %s', script, self.target_dir) if not self._fileop.dry_run: shebang = self._get_shebang(encoding, post_interp) if b'pythonw' in first_line: ext = 'pyw' else: ext = 'py' n = os.path.basename(outname) self._write_script([n], shebang, f.read(), filenames, ext) if f: f.close()
IOError
dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/site-packages/pip/_vendor/distlib/scripts.py/ScriptMaker._copy_script
3,897
def show_samples(m, model_path): """ Show samples given a DBM model. Parameters ---------- m: int rows * cols model_path: str Path of the model. """ model = load_model(model_path, m) print('Loading data (used for setting up visualization ' 'and seeding gibbs chain) ...') dataset_yaml_src = model.dataset_yaml_src dataset = yaml_parse.load(dataset_yaml_src) pv = init_viewer(dataset, rows, cols) if hasattr(model.visible_layer, 'beta'): beta = model.visible_layer.beta.get_value() print('beta: ', (beta.min(), beta.mean(), beta.max())) print('showing seed data...') vis_batch = dataset.get_batch_topo(m) update_viewer(dataset, pv, vis_batch, rows, cols) pv.show() print('How many Gibbs steps should I run with the seed data clamped?' '(negative = ignore seed data)') x = int(input()) # Make shared variables representing the sampling state of the model layer_to_state = model.make_layer_to_state(m) # Seed the sampling with the data batch vis_sample = layer_to_state[model.visible_layer] validate_all_samples(model, layer_to_state) if x >= 0: if vis_sample.ndim == 4: vis_sample.set_value(vis_batch) else: design_matrix = dataset.get_design_matrix(vis_batch) vis_sample.set_value(design_matrix) validate_all_samples(model, layer_to_state) sample_func = get_sample_func(model, layer_to_state, x) while True: print('Displaying samples. ' 'How many steps to take next? (q to quit, ENTER=1)') while True: x = input() if x == 'q': quit() if x == '': x = 1 break else: try: x = int(x) break except __HOLE__: print('Invalid input, try again') for i in xrange(x): print(i) sample_func() validate_all_samples(model, layer_to_state) vis_batch = vis_sample.get_value() update_viewer(dataset, pv, vis_batch, rows, cols) pv.show() if 'Softmax' in str(type(model.hidden_layers[-1])): state = layer_to_state[model.hidden_layers[-1]] value = state.get_value() y = np.argmax(value, axis=1) assert y.ndim == 1 for i in xrange(0, y.shape[0], cols): print(y[i:i+cols])
ValueError
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/scripts/dbm/show_samples.py/show_samples
3,898
def main(opts): try: # Load a context with initialization ctx = Context.load(opts.workspace, strict=True) # Initialize the workspace if necessary if ctx: print('Catkin workspace `%s` is already initialized. No action taken.' % (ctx.workspace)) else: print('Initializing catkin workspace in `%s`.' % (opts.workspace or os.getcwd())) # initialize the workspace init_metadata_root( opts.workspace or os.getcwd(), opts.reset) ctx = Context.load(opts.workspace) print(ctx.summary()) except __HOLE__ as exc: # Usually happens if workspace is already underneath another catkin_tools workspace print('error: could not initialize catkin workspace: %s' % exc.message) return 1 return 0
IOError
dataset/ETHPy150Open catkin/catkin_tools/catkin_tools/verbs/catkin_init/cli.py/main
3,899
def _get_data_from_resource_manager(resource_manager, attrs_white_list_rules, additional_display_options): data = [] display_options = {} display_options.update(additional_display_options) instances_list = resource_manager.list(**display_options) for inst in instances_list: inst_details = {} obj_dict = \ inst.to_dict() if hasattr(inst, "to_dict") else inst.__dict__ for rule in attrs_white_list_rules: try: inst_details[rule.map_to_name] = utils.get_attr_value( rule.path, rule.transform_func, obj_dict ) except __HOLE__: # in case retrieved attribute is highlevel key # and is not present in obj_dict KeyError occurs which # cannot be handled by get_attr_value function due to # its features so we must do it here in order # to prevent from situation when whole set data is not # collected for particular resource logger.info("{0} cannot be collected for the statistic " "as attribute with path {1} is not present in the " "resource manager's data".format(rule.map_to_name, rule.path)) data.append(inst_details) return data
KeyError
dataset/ETHPy150Open openstack/fuel-web/nailgun/nailgun/statistics/oswl/helpers.py/_get_data_from_resource_manager